[
  {
    "path": ".commit-template",
    "content": "# Please use the following format for the commit message:\n#\n# <module>: short description of the change.\n#\n# More detailed description about the issue\n# and the way it is addressed.\n#\n# Implementation details.\n#\n\n"
  },
  {
    "path": ".gitignore",
    "content": ".deps\n.libs\n.dirstamp\nMakefile.in\nMakefile\n*.o\n*.lo\n*.la\n*.diff\n*.orig\n*.rej\n*.gcda\n*.gcno\n*~\n\\#*\\#\nTAGS\ncscope.*\n\n/autotools/depcomp\n/autotools/install-sh\n/autotools/ltmain.sh\n/autotools/missing\n/autotools/compile\n/autotools/config.guess\n/autotools/config.sub\n/autotools/ar-lib\n/autotools/m4/libtool.m4\n/autotools/m4/ltoptions.m4\n/autotools/m4/ltsugar.m4\n/autotools/m4/ltversion.m4\n/autotools/m4/lt~obsolete.m4\n/autotools/ylwrap\n/autotools/test-driver\n/autom4te.cache\n/config.status\n/libtool\n/Makefile\n/aclocal.m4\n/config.log\n/configure\n/doc/doxygen/doxygen.cfg\n/man/Makefile\n/robinhood-*.tar.gz\n/robinhood.spec\n/rpms/\n/scripts/Makefile\n/scripts/robinhood.init\n/scripts/robinhood.init.sles\n/scripts/robinhood.service\n/scripts/robinhood@.service\n/scripts/sysconfig_robinhood\n/scripts/ld.so.robinhood.conf\n/scripts/rbh_cksum.sh\n/src/Makefile\n/src/todo\n/src/robinhood/todo\n/src/cfg_parsing/Makefile\n/src/chglog_reader/Makefile\n/src/common/Makefile\n/src/entry_processor/Makefile\n/src/fs_scan/Makefile\n/src/include/Makefile\n/src/include/config.h\n/src/include/config.h.in\n/src/include/stamp-h1\n/src/list_mgr/Makefile\n/src/logs/Makefile\n/src/policy_matching/Makefile\n/src/policy_modules/Makefile\n/src/policy_modules/hsm_remove/Makefile\n/src/policy_modules/migration/Makefile\n/src/policy_modules/purge/Makefile\n/src/policy_modules/rmdir/Makefile\n/src/robinhood/Makefile\n/src/tests/Makefile\n/src/tests/test_uidgidcache\n/src/tests/test_params\n/src/tests/test_confparam\n/src/tests/test_parse\n/src/tests/create_nostripe\n/src/tests/test_forcestripe\n/src/backend_ext/Makefile\n/tests/Makefile\n/web_gui/Makefile\n/src/tools/Makefile\n/src/tools/gen_lov_objid\n/src/tools/read_lovea\n/src/tools/set_lovea\n/src/tools/ost_fids_remap\n/src/tools/lhsmtool_cmd\n/src/robinhood/rbh-du\n/src/robinhood/rbh-find\n/src/robinhood/rbh-report\n/src/robinhood/rbh-diff\n/src/robinhood/rbh-undelete\n/src/robinhood/robinhood\n/tests/lustre_fs/*.log\n/tests/posix_fs/*.log\n/tests/test_suite/create-random\n/TODO\n/src/include/db_schema.h\n/scripts/make_mans.sh\n/web_gui/gui_v3/config_local.php\n"
  },
  {
    "path": "ChangeLog",
    "content": "Summary of changes in versions 3.x:\n\n3.2:\n- Add features for Lustre's project quota:\n\t- Retrieve project id when scanning and reading changelogs\n\t- Add report: rbh-report --project-info\n\t- New filtering options in rbh-report: --filter-project\n\t- New option --split-user-projects to split user's usage per project\n\t- Display project-info with 'rbh-find --printf %RP'\n\t- Filter project with 'rbh-find -projid num'\n\t- New 'projid' trigger target on command line\n- Implement policy sort order by size, e.g. lru_sort_attr = size;\n- Implement asc/desc modifiers for sort order, e.g. lru_sort_attr = size(desc);\n- Implement policy trigger thresholds as percentage of available inodes:\n    high/low_threshold_cntpct = xx%;\n- policy optimization: no DB update when pre_sched_match and post_sched_match\n  are set to \"none or \"cache_only\".\n- Fix errors \"Out of range value for size columns\" due to DB triggers\n- Make top-user --by-count take all entry types into account\n- Adaptations for Lustre 2.15\n- Adaptations for RHEL9.4 OS family\n\n3.1.7:\n- Fix pool_usage triggers.\n- Fix partial application of policies when run on multiple OSTs.\n\n3.1.6:\n- fix build on Lustre 2.12.4\n- check the filesystem returns consistent statfs values\n- chglog_reader: reopen changelog in case of unexpected error\n- chglog_reader: de-duplicate HSM STATE events (keep the latest)\n- policy run time stat now includes initial DB request time\n- fix the use of archive_id parameter in lhsm\n- port to CentOS8 and lustre master (2.13.52)\n\n3.1.5:\n- Lustre: compatibility with FPL and DoM (no full-featured support)\n- Fix and improvement of ratelimit scheduler\n- Stop retrying SQL requests on SIGTERM\n- Faster remove policies (disable sorting)\n\n3.1.4:\n* Web gui:\n    - new access control criteria: IP address, hostname;\n    - tasks: scheduled requests, and possiblity to keep result history;\n    - custom graphs.\n* Performance improvements:\n    - Improved rules-to-SQL conversion engine. Enable faster policy runs.\n    - Auto-tune hash table sizes (changelog reader and entry processor).\n    - Pipeline tuning to reduce CPU usage and improve ingestion rate.\n    - Default linking to jemalloc memory allocator for better performance.\n      Note: it is advised to start MariaDB with this allocator too\n      to achieve maximal performance.\n    - Fix major performance bug in changelog reader (appeared in v3.1.3).\n* Configuration files: support %include directives in sub-blocks.\n* New parameter to 'common.copy': \"mkdir=yes\" creates target directories\n  prior to the copy operation.\n* New action 'common.move' (move an entry from one path/name to another).\n* Command copytool 'lhsmtool_cmd': add man page.\n\n3.1.3:\n* Policies: add matching modes \"auto_update_attrs\" and \"auto_update_all\"\n* Faster changelog reader shutdown when the process is terminated by a signal\n* Changelog optimization: drop CREATE/UNLINK and MKDIR/RMDIR changelog pairs\n* rbh-find:\n     add \"-links\" criterion\n     \"-ost\" accepts OST sets\n     add \"-iname\" option for case-insensitive name matching\n     support \"-not\" for links, size and dates\n* FS scan optimization (use openat() to walk through the filesystem instead\n  of full paths).\n* Fix rules-to-SQL conversion\n\n3.1.2:\n* Implement command rbh-rebind command to assign an archived entry to a newly\n  created fid (e.g. used for undelete operation).\n* Make lhsm undelete more resilient to error cases\n* lhsm: add tunable to allow custom/smaller UUID\n* REST API:\n     add nagios plugin\n     add graph preview in console plugin\n     add new filters\n\nMain changes in robinhood 3.1:\n* Ingest rate optimizations: up to x10\n* New plugin type: action schedulers\n  These plugins make it possible to reorder policy actions, or smooth policy execution.\n  In this release, two of such plugins are provided: 'common.max_per_run' and\n  'common.rate_limit'.\n* Policy optimizations:\n  - Improved pre-filtering: convert all policy rules to SQL for better\n    pre-filtering.\n  - Configurable attribute matching before/after scheduling\n    (pre/post_sched_match parameters).\n* FS scan: make it possible to restrict scan to several directories\n  ('scan_only' configuration parameter).\n  - Especially useful with Lustre's static DNE partitioning.\n* Pre/post policy run commands\n* Modeguard status manager\n  - Enforce or clear permission bits on entries of the filesystem\n* Fixes for Lustre 2.10\n  - Note: this robinhood version does not support Progressive File Layouts yet\n* REST API and web interface enhancements:\n\t- Plugin mechanism to add custom charts/fields/requests...\n\t- Finer access control (limit user views only to their stats)\n\t- All robinhood DB available through the REST API\n\nMain changes in robinhood 3.0:\n* Policies at will:\n    - Define new custom policies at will, just by writting just a few lines of configuration.\n    - Schedule all imaginable actions on filesystem entries.\n    - Templates are provided to implement: old entries cleanup, directory cleanup, data corruption checks (by regularly checking file's checksum), Lustre/HSM policies.\n* Fully configurable policies:\n    - All policy aspects made configurable (scope, actions, action parameters, sort order...)\n    - Fine-grained specification of policy actions and parameters.\n* Plugin-based architecture:\n    - Missing a specific feature in Robinhood core? Implement you own plugin to manage new kind of policies, maintain specific info in robinhood DB, enable interactions with specific site ecosystem (job scheduler, system load...), ...\n* Enabling the Robinhood community:\n    - Share your custom policy definitions and plugins, so that other community members can benefit from them and enrich them!\n* All Robinhood features in a single instance:\n    - No more distinct and incompatible robinhood \"flavors\" (tmpfs, lhsm, backup...)\n    - All policies can be implemented in a single robinhood instance.\n* Get a better overlook of your filesystem contents than ever!\n    - New web interface\n    - REST interface to query filesystem stats\n    - Tag your filesystem entries based on arbitrary criteria using the new fileclass implementation\n    - Flexible reporting with \"rbh-find -printf\"\n* RedHat 7 integration:\n    - systemd support.\n    - Per filesystem service: robinhood@<fsname>\n* New features for Lustre/HSM:\n    - Undelete: recover accidentally deleted files.\n    - Implement Lustre/HSM with any backend, using the generic command copytool shipped with robinhood: 'lhsmtool_cmd'.\n    - UUID support for Lustre/HSM copytools.\n* Convenient:\n    - Automatic DB conversion after major upgrades.\n* Even more robust & Improved code quality.\n\nChanges between version 2.5.4 and 2.5.5:\n* [lustre] Support Lustre versions up to 2.7.\n* [DB performance] Allow both batching and parallelizing DB operations when\n  accounting is OFF. Benchmarks show a x3~x4 speedup of DB ingest rate in this\n  case.\n* [DB performance] Reduce lock contention on indexes of STRIPE_ITEMS table.\n* [acct] Avoid accounting stats inconsistency when this feature is enabled/\n  disabled/re-enabled.\n* [policy performance] Disable ignored class re-matching by default.\n* [policy performance] Disable sort by mtime by default.\n* [policy performance] Allow disabling sort by atime for purge policies.\n* [fix] Fix segfault when log rotation occurs under heavy logging.\n* [fix] When processing a RENAME changelog record, match the new path in\n  filesystem instead of building it from DB (which caused 'incomplete path'\n  errors).\n* [fix] Fix sendfile copy operation for backup mode.\n* [fix] Fix various requests about directories: --topdirs with --filter-path\n  option, --toprmdir, ...\n* [fix] Fix possible duplicate insert error when updated entry attributes are\n  only in ANNEX_INFO table (rare).\n* [fix] Some purge parameters may not be taken into account.\n* [packaging] clean permissions in distribution tarball.\n* [report performance] Improve performance of path matching when running\n  directory reports with --filter-path option.\n\nChanges between version 2.5.3 and 2.5.4:\n* [lustre] Lustre 2.4+: detect all stripe changes and update the DB accordingly.\n* [scan] Prevent from dropping entries from DB when opendir or stat fail.\n* [DB] Optimization to batch more DB requests.\n* [DB] Allow using any MySQL engine (new config parameter: listmanager::mysql::engine)\n* [rbh-config] Avoid backup_db to lock the whole tables with innodb.\n* [bugfix] Improve robustness to corrupted mtimes.\n* [bugfix] Fix possible crash in db_exec_sql.\n* [bugfix] Fix possible overflow when executing a custom archive command.\n* [backup mode] Clean all non-printable characters in backend path.\n* [pkg] libattr-devel is now mandatory to build robinhood.\n\n2.5.4 release notes:\n* \"innodb = enabled/disabled\": this parameter will be deprecated.\n  It has to be replaced by \"engine = innodb\".\n* (Lustre only) one of the field types changed in the DB.\n  It will be automatically converted the first time rbh 2.5.4 starts.\n  The conversion takes about 5s per million entries.\n* Admin guides are now available online (http://robinhood.sf.net > \"Online documentation\")\n  \"odt\" and \"pdf\" versions are no longer maintained.\n\nChanges between version 2.5.2 and 2.5.3:\n- custom purge_command (fix): fixed vulnerability to malicious file names.\n- changelog processing (fix): fixed errors 'Entry has incomplete path in DB'\n  in some case of rm/create patterns.\n- migration policy (fix): don't trigger copy of files that no longer exist.\n- rbh-config (feature): new option 'reset_acct' to rebuild accounting info.\n- changelog reader (feature): new parameter 'dump_file' to dump all incoming\n  changelog records to a file.\n- Port to Lustre 2.6.\n\nChanges between version 2.5.1 and 2.5.2:\n\n- rbh-du: fixed major performance regression (since v2.5.0).\n- rbh-find: fixed occasional crash.\n- HSM and backup modes: fixed a risk of removing an existing entry from the backend\n  (in some situations of hardlink/rename+unlink).\n- backup mode: optimized sendfile()-based copy (Linux kernel >= 2.6.33).\n- logs: avoid flood of log messages in case of DB connection error.\n- alerts: added host name to alert mail title.\n- rbh-config empty_db/repair_db: also manage/fix stored procedures.\n- cosmetic: fix wrong display of purged blocks for count-based triggers.\n- cosmetic: fix migration counter display.\n- init script: check that 'ulimit -s' is reasonable.\n- fixed build dependancies on Fedora19 and Fedora20.\n- code sanity: fixed many 'coverity' warnings + a couple of minor memleaks.\n- doc: details about RPM installation locations.\n- doc: detail of 'backend' paramaters for backup mode.\n\nChanges between version 2.5.0 and 2.5.1:\n\n- entry processing (major fix): fixed deadlock when the pipeline is full\n  and an entry with an unknown parent is encountered.\n- purge (enhancement): start purging data from the most used OSTs.\n- rbh-find (features): new options: -pool, -exec, -print, -nouser, -nogroup, -lsost\n- rbh-find (optimization): automatically switch to bulk DB request mode when\n  command argument is filesystem root (+new option -nobulk to disable it).\n- logging (enhancement): new config parameters to control log header format\n- backup (feature): allow compressing data in archive.\n- backup (fix): wrong path in archive when robinhood root directory != mount point.\n- backup (fix): fix segfault when importing a single file with a FID-ending name.\n\nChanges in version 2.5.0:\nSummary:\n- filesystem disaster recovery features\n- new namespace management (new DB schema to properly handle hardlinks, renames...)\n- scanning and changelog processing optimizations\n- database optimizations (requests batching)\n- many other changes, improvements and code cleaning...\n\nDetails:\n- rbh-diff:\n    * new command to detect differences between the filesystem and the information\n      in robinhood database.\n    * option \"--apply=fs\" for disaster recovery purpose: restore the filesystem\n      metadata from robinhood DB.\n    * makes it possible to rebuild a Lustre MDT from scratch, or from a LVM snapshot\n      (see \"Robinhood Lustre disaster recovery guide\" for more details).\n- database:\n    * new namespace implementation in database with new NAMES table (Cray contribution)\n        - fixes/improves hardlink support\n        - fixes/improves Lustre ChangeLog hardlink/rename/unlink support\n        - saves DB storage space\n    * database request batching: significantly increase database ingest rate.\n      No longer needs innodb_flush_log_at_tx_commit != 1 to speed up DB operations.\n    * additional information in DB that can help for disaster recovery:\n      symlink info, access rights, stripe object indexes, stripe order, nlink...\n    * set default commit behavior to transaction (prevent from DB inconsistencies)\n    * optimized multi-table requests\n    * optimization: minimized attribute set in DB update operations\n      (don't update attributes that didn't change)\n    * Fix: deal with mysql case insensitivity for string matching\n    * triggers and stored procedures versioning mechanism\n    * prevent from overflows for large INSERT requests, wide stripes...\n    * prevent from DB deadlocks\n- scanning:\n    * --partial-scan option is deprecated and replaced by an optional argument to --scan (e.g. --scan=/fs/subdir).\n    * better management of partial scans:\n        - better detection of removed entries vs. entries moved from a directory to another.\n        - partial scans can be used for initial DB population (even if the DB is initially empty).\n    * garbage collection of removed entries in DB is a long operation when terminating a scan (and even more\n      when terminating a partial scan). Added --no-gc option to skip it (recommanded for partial scans).\n    * automatically enabling --no-gc if the DB is initially empty (eg. for initial scan).\n    * optimization: use *at() functions (openat, fstatat) and readdir by chunk (using getdents) instead of POSIX lstat() and readdir_r().\n    * optimization: use NOATIME flag to access entries as much as possible\n    * optimizations of get_stripe and get_fid operations.\n    * new --diff option for robinhood --scan and --readlog: output detected changes in a diff-like format.\n- Lustre changelogs:\n    * changelog batching (Cray contribution): to speed up changelog processing,\n      robinhood retains changelog records in memory a short time,\n      to aggregate similar/redundant Changelog records on the same entry before\n      updating its database.\n    * support multiple changelog readers (for DNE) as multiple threads (default)\n      or as multiple processes, possibly on different hosts, by giving a MDT index\n      to --readlog option.\n    * resilience to filesystem umount/mount.\n- rbh-report:\n    * new option --entry-info to get all the stored information about an entry\n    * option --dump-ost can now list multiple OSTs and support ranges notation (e.g. 3,5-8,12-23).\n    * --dump-ost output indicates if a file has data on a given OST (could be striped on the OST but have no data on it).\n- rbh-find:\n    * new option -crtime to filter entries on creation time.\n    * output ordering closer to find output\n    * added missing info in 'rbh-find -ls' output (nlink, mode, symlink info...)\n- robinhood-backup:\n    * by default, use a built-in copy function to avoid the cost of forking copy commands.\n    * rbh-backup-rebind: tool to rebind an entry in the backend if its fid changed in the filesystem\n      for any reason (file copied to a new one to change its stripe, etc...)\n    * rbh-backup-recov new features and options:\n        --list (list information about entries to be recovered)\n        --ost <ost_set> to only recover entries for a given set of OSTs (support range notation):\n            the basic use-case is OST disaster recovery.\n        --since <time> to only recover entries modified since a given date:\n            the basic use case is after restoring an OST snapshot.\n    * symlinks archiving to backend made optional (new parameter 'archive_symlinks')\n      as they can now be restored using robinhood database information.\n- configuration:\n    * can specify environment variables in config file (e.g. fs_path = $ROOT_DIR ;)\n    * prevent from using a wrong config file (Cray contribution):\n        - only check files in /etc/robinhood.d/<purpose>, no longer in the current directory\n        - fails if to many config files are available.\n\nChanges between version 2.4.2 and 2.4.3:\n* [lustre] support of Lustre 2.4\n    - DNE not fully supported yet: if running multiple MDS,\n      run 1 instance of changelog reader per MDT.\n    - Detect file layout changes (new changelog record CL_LAYOUT).\n* [lustre] added statistics about changelog processing speed.\n* [policies] new parameter 'recheck_ignored_classes' to allow/avoid\n    rematching entries from ignored classes in migration and purge policies.\n* [web ui] security patch to prevent from SQL injection.\n* [lustre] fix stack overflow when handling files with wide stripes.\n* [DB] better handling of ER_QUERY_INTERRUPTED MySQL error.\n* [DB] fixed DB connection leaks.\n* Backup & HSM modes:\n   - [fix] fix segfault in import command when uid/gid can't be resolved.\n   - [rbh-report] fix bad display of total volume with -u or -g.\n* Migration policy features and optimizations:\n   - [feature] new parameter 'lru_sort_attr' to select LRU sort criteria for policy application.\n        Previously based on last modification time, it can now be one of:\n        creation, last_archive, last_mod, last_access.\n   - [feature] special meaning for condition 'last_archive == 0':\n        matches entries that have never been archived.\n   - [feature] suspend migration if copy error rate exceed a threshold.\n        This is controled by 'suspend_error_pct' and 'suspend_error_min' parameters.\n   - [stats] migration stats while migration is running: added skipped and error counters.\n   - [optim] avoid rechecking ignored entries at each pass\n   - [optim] smoother feeding of migration workers queue\n* Code & environment:\n   - [build] can specify a path to alternative lustre source tree in ./configure\n   - [tests] allow specifying an alternative path to lfs command\n\nChanges between version 2.4.1 and 2.4.2:\n* [general] immediate exit on ctrl+C: don't process all queued operations, just finish current.\n* [general] LSB compliance if daemon is already started.\n* [DB] validation with MariaDB (replacement for MySQL in Fedora19).\n* [config] can set default config file using RBH_CFG_DEFAULT environment variable.\n* [config] more precise message if no config file is found.\n* [rbh-find] added -not/-! option to rbh-find.\n* [lustre] fix for 16 chars pool names.\n* [bugfix] fixed memleak in rbh-find.\n* [bugfix] fixed segfault if checking scan deadline occured exactly when scan ended.\n* [logs] display bandwidth and rate stats during migration run.\n* [logs] fix: DB get operations were counted twice in stats.\n* [cosmetic] removed \"connection failed\" warning for one shot commands.\n* [cosmetic] fix typos in logs.\n* [devel] port to automake 1.12 (since Fedora18).\n\nChanges between version 2.4.0 and 2.4.1:\n* [lustre] better file size change detection using CLOSE events\n  from MDT ChangeLog (requires Lustre 2.2 or +)\n* [scan] optimization: using fstatat and getdents\n* [rbh-find] added -atime/-amin options\n* [logs] add'l information in logs (DB operations, HSM_rm details)\n* [logs] log to stderr if opening of the log file fails\n* [fix] scan blocked if final DB operation failed\n* [fix] avoid DB lock exhaustion for huge requests\n* [backup] manage cross device rename in backend\n* [backup] rebind an entry in backend after fid change (e.g. restripe)\n\n\nNew features in robinhood 2.4:\n* rbh-du and rbh-find: \"du\" and \"find\" clones querying robinhood's database\n  Faster way to search for entries in a filesystem!\n\n  Performance comparison for a 1 million entries Lustre v2 filesystem\n        find /lustre -user foo -type f -size -32M -ls\n        (no possible criteria on OST index)\n        > 58m13s\n\n        lfs find /lustre -user foo -type f --obd lustre-OST0001\n        (no possible criteria on size)\n        > 20m46s\n\n        rbh-find /lustre -user foo -type f -size -32M -ost 1 -ls\n        > 1.2s\n\n* Directory reporting: top directories per dirent count, per avg file size\n  (useful for small file hunting)\n* File size profiling: global, per user, per group, per fileclass...\n  (+additional section in webUI)\n* Sorting user/groups by size range (eg. percentage of files < 1G)\n  (useful for small file hunting)\n\n* Partial scans to update only a subset of the filesystem.\n  => allow distributed scans by splitting the namespace into\n     partial scans running on multiple clients.\n\nOther changes in 2.4.0:\n* [packaging] rpm name 'robinhood-tmp_fs_mgr' changed to 'robinhood-tmpfs'\n* [packaging] 'rbh-config' command moved to new RPM 'robinhood-adm'\n* [report] refurbished rbh-report output format\n* [policies] new criteria on file creation time\n* [database] use innodb by default for MySQL engine\n* [system] ability to detect \"fake mtime\" (mtime != actual modification time)\n* [system] improved filesystem detection,\n           using fsname or devid as FS identifier (config driven)\n* [scan] can trigger external completion command when a scan ends\n* [misc.] can use short config name instead of full path\n          (eg. \"-f <name>\" instead of \"-f /full/path/to/name.conf\")\n* [backup] directory and symlink recovery\n* [lustre] port to Lustre 2.2 and 2.3\n* [lustre] support for new Changelog record struct (lu-1331)\n* [fix] max_rm_count=0 resulted in no rm (instead of unlimited)\n* [fix] segfault in realpath() on Ubuntu\n* [fix] unsigned arithmetic issue with MySQL 5.5\n\nChanges between version 2.3.3 and 2.3.4:\n- Faster and safer shutdown on SIGINT/SIGTERM\n- Can use short config name instead of full config file path.\n  E.g. \"-f myconf\" instead of \"-f /etc/robinhood.d/tmp_fs/myconf.cfg\"\n- Consider all non-dirs for classinfo (instead of files only)\n- Implemented max_rm_count in hsm remove policy\n- clearer messages about DB connection and retries\n- added lu543 configure option (must be enabled if this patch\n  is integrated to your Lustre distribution)\n- Better block counting for purges\n- backup/shook modes:\n    - import of existing files from backend\n    - entry state set to 'archive_running' during migration\n    - recovery for entries with 'release pending' or 'restore running' state on startup\n      (new parameter: check_purge_status_on_startup)\n    - enable DB rebuild if it is lost\n    - fix: symlink recovery\n    - improvements of rbhext_tool_clnt/svr (timeout, traces, ...)\n    - user.shook_state xattr changed to security.shook_state\n      (to avoid users to change it)\n- Fix: Don't consider 'released' entries for quota-like purge triggers\n- Fix: migrate-group did migrate user\n- Generate up-to-date template automatically at RPM installation\n- 72 new regression tests (all policies conditions and config file parameters are tested)\n\nChanges between version 2.3.2 and 2.3.3:\n[webgui]\n    - added FS name to page title and page header\n    - added missing file in RPM (.htaccess)\n[reports]\n    - new options for top-users/top-groups: --by-avgsize, --count-min, --reverse\n    - Lustre changelog stats in 'rbh-report -a'\n[policies]\n    - fix: 'tree' condition must match root entry\n    - fix: migration class matching at scan time\n[config]\n    - simpler parameter 'scan_interval'\n    - fix: don't reload config of disabled modules on SIGHUP\n    - fix: on SIGHUP, don't reload parameters specified on cmd line\n[database]\n    - retry on connection failure\n[stats]\n    - dump process stats on SIGUSR1\n[backup]\n    - clean special chars in archive names\n    - fix issues in symlink archiving\n[lustre]\n    - specific compilation option for jira's LU-543\n[misc]\n    - code cleaning, sanity checks, improved traces...\n\n\nChanges between version 2.3.1 and 2.3.2:\n- [webgui] Web interface (beta)\n- [quota/alerts] Implemented quota alerts on inode count (users and groups)\n- [reporting] New option --by-count for --top-users, to sort users by entry count\n- [database] Support of InnoDB MySQL engine\n- [database] MySQL 4 compatibility fix\n- [bugfix](minor) handling DB deadlock error\n- [bugfix](tweak) added acct parameters to default and template outputs\n- [testing] big tests with 1M entries\n- [backup] about backup mode (beta):\n    - [bugfix](major) fixed error determining symlink status\n    - [bugfix](minor) don't consider 'new' entries in deferred removal\n    - [trace] display warning if mtime in FS < mtime in backend\n\nChanges between version 2.3.0 and 2.3.1:\n- [bugfix](major) Wrong accounting values if file owner changes\n- [bugfix](major) SQL error for widely striped files\n- [compat] Compatibility fix for MySQL servers between 5.0.0 and 5.0.32\n\nChanges between version 2.2.3 and 2.3.0:\n- [optim.] instant accounting reports (user/group usage, fs content summary, ...)\n- [reporting] split user usage per group (--split-user-groups option)\n- [reporting] split group usage per user (--split-user-groups option)\n- [feature] new policy criteria for Lustre FileSystems: ost_index\n- [reporting] detailed FS scan statistics in \"rbh-report -a\"\n- [misc.] fast and clean abort on ctrl^c (during scan, migration and purge)\n- [admin.] automatically disables features that are not defined in config file\n- [admin.] \"rbh-config backup_db\" helper to create a robinhood DB backup\n- [misc.] -V option displays Lustre version and release number\n- [tweak] changed 'watermark' parameters to 'threshold'\n- [tweak] changed 'notify_lw' and 'alert_hw' parameters to 'alert_low' and 'alert_high'\n- [database] alternative port or socket file can be used for MySQL connection\n- [database] limiting DB access rights for reporting command\n- [bugfix](major) fixed inconsistent pool names\n- [bugfix](minor) kill -HUP terminated the process if no trigger was defined\n- [bugfix](minor) 'unknown' status not correctly filtered in '--dump-status' report\n- [bugfix](tweak) added 'reload' in short help of SLES init script\n- [misc.] code cleaning, error message cleaning, removed some obsolete code\n- [feature] new robinhood flavor to track modifications in a Lustre v2 filesystem, and backup data to an external storage (current status: Alpha testing only).\n  As part of this feature:\n        - soft rm + command to retrieve removed files\n        - disaster recovery command\n        - \"--migrate-file\" option to archive a single file\n        - pre-maintenance mode to smoothly backup the whole filesystem content before a due date.\n\n\nChanges between version 2.2.2 and 2.2.3:\n\n- [feature] periodic purge trigger\n- [feature] options for controlling trigger notifications\n- [doc] pdf documentation updated\n\nChanges between version 2.2.1 and 2.2.2:\n\n- [bugfix] (major) fixed \"duplicate key\" errors\n- [bugfix] (major) FS scan sometimes blocks on Lustre 2\n- [misc.] integration to automatic testing suite (Hudson)\n\nChanges between version 2.2.0 and 2.2.1:\n\n- [feature] new purge command: --purge-class to apply purge policy on files in a given class\n- [feature] new migration command: --migrate-class to apply migration policy on files in a given class\n- [feature] support of syslog for logging\n- [report cmd] Added summary line to all reports, with total nbr entries and volume.\n- [report cmd] Added '-q' option to hide headers and footers in reports.\n- [optim.] changed primary key format to reduce DB requests\n- [misc.] new command 'repair_db' in rbh-config, to fix tables after a MySQL server crash.\n- [compat.] Support for Lustre MDT changelogs on Lustre v2.0 final\n- [compat.] port to FreeBSD\n- [admin] added 'reload' action to init.d script\n- [misc.] a gap in OST index list should displays a warning, not an error\n- [pkg] common spec file for both el4, el5 and el6\n- [bugfix] handling large UNIX groups (>4k) and long lists of alt groups.\n- [bugfix] retrieving Lustre pool fails with error \"Unsupported Lustre magic number\"\n- [bugfix] wrong class matching on OST pools when scanning\n- [bugfix] unescaped SQL strings caused error for filenames with single quotes\n- [bugfix] error in init script when RBH_OPT contains several options\n\nChanges between version 2.1.5 and 2.2.0:\n\n- [feature] fileclass union/intersection/negation\n- [feature] rbh-report displays last matched fileclass\n- [feature] new reporting command '--class-info' generates fileclass summary\n- [feature] new reporting option '--filter-class' to dump entries per fileclass\n- [feature] alert batching: send a mail summary instead of 1 mail per matching entry\n- [feature] alert improvements: named alerts, tweak changes\n- [feature] special wildcard '**' in 'path ' or 'tree' conditions matches any count of directory levels\n- [feature] quota-like purge triggers fully implemented (on group or user)\n- [feature] triggers on used inode count in filesystem\n- [feature] '--check-triggers' option to check triggers without purging files\n- [feature] notification can be sent when a high watermark is reached (for triggers)\n- [feature] rbh-config helper now supports batch commands\n- [feature] Lustre 2.0 ready\n- [optim.] configurable fileclass periodic matching to reduce calls to filesystem\n- [optim.] configurable attr/path periodic update in DB to reduce calls to filesystem\n- [bugfix] explicit trace when readdir fails\n- [bugfix] issue when filtering on fields with NULL values in DB\n- [bugfix] check migration timeout on last effective action, not on last queued entry\n- [bugfix] name-based conditions complaining about missing auto-generated fields\n- [bugfix] race condition when appplying policy lead to handle the same entry several times\n- [bugfix] removing removed directories from database for recursive rmdir policies\n- [misc.] added documented file in /etc/sysconfig for robinhood service parameters\n- [misc.] changing source directory layout\n- [misc.] documentation update\n\nChanges between version 2.1.4 and 2.1.5:\n\n- Major bug fix: incomplete database content after scan\n\nChanges between version 2.1.3 and 2.1.4:\n\n- New recursive rmdir policy (for TMP_FS_MGR purpose)\n- changed default value for max_pending_operations\n  (unlimited value could result in excessive memory usage)\n- removing useless fields and redundant information in database\n- rh-* commands renamed to rbh-*, to avoid conflicts and confusions\n  with RedHat commands.\n- check conflicting flags in configure\n\nChanges between version 2.1.2 and 2.1.3:\n\n- SQLite support (should only be used for testing purpose or small filesystems)\n- Support of relative paths in 'path' and 'tree' conditions\n- Migration timeout mechanism\n- Prompting for database admin password in rh-config script\n\nChanges between version 2.1.1 and 2.1.2:\n\n- New reporting commands: Dump all files (--dump-all) and dump files\n  by status (--dump-status).\n- New configuration helper script: \"rh-config\"\n- Made RPM relocatable\n- BUG FIX: wrong scan duration when using volume-based purge triggers\n- Lustre-HSM: Checking previous migrations status when restarting\n- Lustre-HSM: CL_TIME record support (bz 19505)\n- Lustre-HSM: multi-archive support (archive_num)\n- Lustre-HSM: new --sync option (immediately archive all modified files)\n- Lustre-HSM: changed --handle-events action switch to --readlog\n- Fixed SLES portability issues\n\nChanges between version 2.1.0.beta2 and 2.1.1:\n\n- Added new report options: --dump-ost, --dump-user, --dump-group\n- Added --filter-path option to reporting tool.\n- TMP FS MGR purpose ported to Lustre 2.0-alpha5 (including changelog\n  support).\n- documentation updates (in doc/admin_guides)\n- Each purpose has its own service and binary names,\n  to make it possible to install and run several robinhood with\n  differents purposes on the same machine.\n- Added '--disable-lustre' compilation switch for disabling Lustre specific\n  features\n- Added '--disable-fid-support' compilation switch, to force addressing\n  entries by path, not by their Lustre fid.\n- Integration of new purpose \"SHERPA\" (software suite for cache management)\n- Generated RPM name includes lustre version it was built for.\n- report command displays help if is started without option.\n\nChanges between 2.1.0.beta1 and 2.1.0.beta2:\n\n- Extended attributes support in policy definition\n- 32 bits plateforms compatibility fixes\n- Fixed bug when using mysql4\n- Added parameter to force changelog polling\n- Fixed minor compilation warning\n- commands now search for config file in /etc/robinhood.d if no config file is\n  given on command line\n\nChanges from v2.0.1 to 2.1.0.beta1:\n\n- added '--dry-run' option instead of \"simulation_mode\" parameter in config file\n- added '--once' option, to perform an single pass of a given policy or action\n  and exit (same as '--one-shot' option).\n- Compatibility fixes for MySQL 4 and 5\n- Fixed dependencies on lustre include files.\n- Compatibility fixes for 32 bits platforms\n\nLustre-HSM specific features:\n- Porting to the new changelog interface (handling changelog records as\n  structures instead of text, and using CHANGELOG_FLAG_FOLLOW\n  and CHANGELOG_FLAG_BLOCK options)\n- Adapting to changes in changelog timestamp (secs+nano instead of jiffies)\n- Use fid as primary key in database schema (for better performance)\n- Added calls to llapi_hsm_request() to trigger migration, release, removal\n  in HSM.\n- Customizable migration hints to be passed to the copytool\n- Command line options to trigger manual migrations (by user, by OST...)\n- Deferred removal in HSM\n- Taking HSM file status into account (dirty, released, ...)\n- HSM event support\n- Changelog flag support (for UNLINK and HSM event)\n- Added '--ignore-policies' option to perform migration/purge to all eligible\n  files without checking policy conditions.\n\nChanges between v2.0-beta2 and v2.0.1:\n\n- New policy definition semantics, using filesets\n- Multiple fileset/policy associations\n- Several changes in configuration syntax, to avoid confusions\n- Support of OST pool names (on Lustre) for fileset definition and policies\n- Optimizations of policy application\n- Added features for Lustre-HSM\n\n"
  },
  {
    "path": "LICENSE.en.txt",
    "content": "Copyright CEA/DAM  (2004-2016)\n\nCeCILL-C FREE SOFTWARE LICENSE AGREEMENT\n\n\n    Notice\n\nThis Agreement is a Free Software license agreement that is the result\nof discussions between its authors in order to ensure compliance with\nthe two main principles guiding its drafting:\n\n    * firstly, compliance with the principles governing the distribution\n      of Free Software: access to source code, broad rights granted to\n      users,\n    * secondly, the election of a governing law, French law, with which\n      it is conformant, both as regards the law of torts and\n      intellectual property law, and the protection that it offers to\n      both authors and holders of the economic rights over software.\n\nThe authors of the CeCILL-C (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre])\nlicense are:\n\nCommissariat  l'Energie Atomique - CEA, a public scientific, technical\nand industrial research establishment, having its principal place of\nbusiness at 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris, France.\n\nCentre National de la Recherche Scientifique - CNRS, a public scientific\nand technological establishment, having its principal place of business\nat 3 rue Michel-Ange, 75794 Paris cedex 16, France.\n\nInstitut National de Recherche en Informatique et en Automatique -\nINRIA, a public scientific and technological establishment, having its\nprincipal place of business at Domaine de Voluceau, Rocquencourt, BP\n105, 78153 Le Chesnay cedex, France.\n\n\n    Preamble\n\nThe purpose of this Free Software license agreement is to grant users\nthe right to modify and re-use the software governed by this license.\n\nThe exercising of this right is conditional upon the obligation to make\navailable to the community the modifications made to the source code of\nthe software so as to contribute to its evolution.\n\nIn consideration of access to the source code and the rights to copy,\nmodify and redistribute granted by the license, users are provided only\nwith a limited warranty and the software's author, the holder of the\neconomic rights, and the successive licensors only have limited liability.\n\nIn this respect, the risks associated with loading, using, modifying\nand/or developing or reproducing the software by the user are brought to\nthe user's attention, given its Free Software status, which may make it\ncomplicated to use, with the result that its use is reserved for\ndevelopers and experienced professionals having in-depth computer\nknowledge. Users are therefore encouraged to load and test the\nsuitability of the software as regards their requirements in conditions\nenabling the security of their systems and/or data to be ensured and,\nmore generally, to use and operate it in the same conditions of\nsecurity. This Agreement may be freely reproduced and published,\nprovided it is not altered, and that no provisions are either added or\nremoved herefrom.\n\nThis Agreement may apply to any or all software for which the holder of\nthe economic rights decides to submit the use thereof to its provisions.\n\n\n    Article 1 - DEFINITIONS\n\nFor the purpose of this Agreement, when the following expressions\ncommence with a capital letter, they shall have the following meaning:\n\nAgreement: means this license agreement, and its possible subsequent\nversions and annexes.\n\nSoftware: means the software in its Object Code and/or Source Code form\nand, where applicable, its documentation, \"as is\" when the Licensee\naccepts the Agreement.\n\nInitial Software: means the Software in its Source Code and possibly its\nObject Code form and, where applicable, its documentation, \"as is\" when\nit is first distributed under the terms and conditions of the Agreement.\n\nModified Software: means the Software modified by at least one\nIntegrated Contribution.\n\nSource Code: means all the Software's instructions and program lines to\nwhich access is required so as to modify the Software.\n\nObject Code: means the binary files originating from the compilation of\nthe Source Code.\n\nHolder: means the holder(s) of the economic rights over the Initial\nSoftware.\n\nLicensee: means the Software user(s) having accepted the Agreement.\n\nContributor: means a Licensee having made at least one Integrated\nContribution.\n\nLicensor: means the Holder, or any other individual or legal entity, who\ndistributes the Software under the Agreement.\n\nIntegrated Contribution: means any or all modifications, corrections,\ntranslations, adaptations and/or new functions integrated into the\nSource Code by any or all Contributors.\n\nRelated Module: means a set of sources files including their\ndocumentation that, without modification to the Source Code, enables\nsupplementary functions or services in addition to those offered by the\nSoftware.\n\nDerivative Software: means any combination of the Software, modified or\nnot, and of a Related Module.\n\nParties: mean both the Licensee and the Licensor.\n\nThese expressions may be used both in singular and plural form.\n\n\n    Article 2 - PURPOSE\n\nThe purpose of the Agreement is the grant by the Licensor to the\nLicensee of a non-exclusive, transferable and worldwide license for the\nSoftware as set forth in Article 5 hereinafter for the whole term of the\nprotection granted by the rights over said Software. \n\n\n    Article 3 - ACCEPTANCE\n\n3.1 The Licensee shall be deemed as having accepted the terms and\nconditions of this Agreement upon the occurrence of the first of the\nfollowing events:\n\n    * (i) loading the Software by any or all means, notably, by\n      downloading from a remote server, or by loading from a physical\n      medium;\n    * (ii) the first time the Licensee exercises any of the rights\n      granted hereunder.\n\n3.2 One copy of the Agreement, containing a notice relating to the\ncharacteristics of the Software, to the limited warranty, and to the\nfact that its use is restricted to experienced users has been provided\nto the Licensee prior to its acceptance as set forth in Article 3.1\nhereinabove, and the Licensee hereby acknowledges that it has read and\nunderstood it.\n\n\n    Article 4 - EFFECTIVE DATE AND TERM\n\n\n      4.1 EFFECTIVE DATE\n\nThe Agreement shall become effective on the date when it is accepted by\nthe Licensee as set forth in Article 3.1.\n\n\n      4.2 TERM\n\nThe Agreement shall remain in force for the entire legal term of\nprotection of the economic rights over the Software.\n\n\n    Article 5 - SCOPE OF RIGHTS GRANTED\n\nThe Licensor hereby grants to the Licensee, who accepts, the following\nrights over the Software for any or all use, and for the term of the\nAgreement, on the basis of the terms and conditions set forth hereinafter.\n\nBesides, if the Licensor owns or comes to own one or more patents\nprotecting all or part of the functions of the Software or of its\ncomponents, the Licensor undertakes not to enforce the rights granted by\nthese patents against successive Licensees using, exploiting or\nmodifying the Software. If these patents are transferred, the Licensor\nundertakes to have the transferees subscribe to the obligations set\nforth in this paragraph.\n\n\n      5.1 RIGHT OF USE\n\nThe Licensee is authorized to use the Software, without any limitation\nas to its fields of application, with it being hereinafter specified\nthat this comprises:\n\n   1. permanent or temporary reproduction of all or part of the Software\n      by any or all means and in any or all form.\n\n   2. loading, displaying, running, or storing the Software on any or\n      all medium.\n\n   3. entitlement to observe, study or test its operation so as to\n      determine the ideas and principles behind any or all constituent\n      elements of said Software. This shall apply when the Licensee\n      carries out any or all loading, displaying, running, transmission\n      or storage operation as regards the Software, that it is entitled\n      to carry out hereunder.\n\n\n      5.2 RIGHT OF MODIFICATION\n\nThe right of modification includes the right to translate, adapt,\narrange, or make any or all modifications to the Software, and the right\nto reproduce the resulting software. It includes, in particular, the\nright to create a Derivative Software.\n\nThe Licensee is authorized to make any or all modification to the\nSoftware provided that it includes an explicit notice that it is the\nauthor of said modification and indicates the date of the creation thereof.\n\n\n      5.3 RIGHT OF DISTRIBUTION\n\nIn particular, the right of distribution includes the right to publish,\ntransmit and communicate the Software to the general public on any or\nall medium, and by any or all means, and the right to market, either in\nconsideration of a fee, or free of charge, one or more copies of the\nSoftware by any means.\n\nThe Licensee is further authorized to distribute copies of the modified\nor unmodified Software to third parties according to the terms and\nconditions set forth hereinafter.\n\n\n        5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION\n\nThe Licensee is authorized to distribute true copies of the Software in\nSource Code or Object Code form, provided that said distribution\ncomplies with all the provisions of the Agreement and is accompanied by:\n\n   1. a copy of the Agreement,\n\n   2. a notice relating to the limitation of both the Licensor's\n      warranty and liability as set forth in Articles 8 and 9,\n\nand that, in the event that only the Object Code of the Software is\nredistributed, the Licensee allows effective access to the full Source\nCode of the Software at a minimum during the entire period of its\ndistribution of the Software, it being understood that the additional\ncost of acquiring the Source Code shall not exceed the cost of\ntransferring the data.\n\n\n        5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE\n\nWhen the Licensee makes an Integrated Contribution to the Software, the\nterms and conditions for the distribution of the resulting Modified\nSoftware become subject to all the provisions of this Agreement.\n\nThe Licensee is authorized to distribute the Modified Software, in\nsource code or object code form, provided that said distribution\ncomplies with all the provisions of the Agreement and is accompanied by:\n\n   1. a copy of the Agreement,\n\n   2. a notice relating to the limitation of both the Licensor's\n      warranty and liability as set forth in Articles 8 and 9,\n\nand that, in the event that only the object code of the Modified\nSoftware is redistributed, the Licensee allows effective access to the\nfull source code of the Modified Software at a minimum during the entire\nperiod of its distribution of the Modified Software, it being understood\nthat the additional cost of acquiring the source code shall not exceed\nthe cost of transferring the data.\n\n\n        5.3.3 DISTRIBUTION OF DERIVATIVE SOFTWARE\n\nWhen the Licensee creates Derivative Software, this Derivative Software\nmay be distributed under a license agreement other than this Agreement,\nsubject to compliance with the requirement to include a notice\nconcerning the rights over the Software as defined in Article 6.4.\nIn the event the creation of the Derivative Software required modification \nof the Source Code, the Licensee undertakes that:\n\n   1. the resulting Modified Software will be governed by this Agreement,\n   2. the Integrated Contributions in the resulting Modified Software\n      will be clearly identified and documented,\n   3. the Licensee will allow effective access to the source code of the\n      Modified Software, at a minimum during the entire period of\n      distribution of the Derivative Software, such that such\n      modifications may be carried over in a subsequent version of the\n      Software; it being understood that the additional cost of\n      purchasing the source code of the Modified Software shall not\n      exceed the cost of transferring the data.\n\n\n        5.3.4 COMPATIBILITY WITH THE CeCILL LICENSE\n\nWhen a Modified Software contains an Integrated Contribution subject to\nthe CeCILL license agreement, or when a Derivative Software contains a\nRelated Module subject to the CeCILL license agreement, the provisions\nset forth in the third item of Article 6.4 are optional.\n\n\n    Article 6 - INTELLECTUAL PROPERTY\n\n\n      6.1 OVER THE INITIAL SOFTWARE\n\nThe Holder owns the economic rights over the Initial Software. Any or\nall use of the Initial Software is subject to compliance with the terms\nand conditions under which the Holder has elected to distribute its work\nand no one shall be entitled to modify the terms and conditions for the\ndistribution of said Initial Software.\n\nThe Holder undertakes that the Initial Software will remain ruled at\nleast by this Agreement, for the duration set forth in Article 4.2.\n\n\n      6.2 OVER THE INTEGRATED CONTRIBUTIONS\n\nThe Licensee who develops an Integrated Contribution is the owner of the\nintellectual property rights over this Contribution as defined by\napplicable law.\n\n\n      6.3 OVER THE RELATED MODULES\n\nThe Licensee who develops a Related Module is the owner of the\nintellectual property rights over this Related Module as defined by\napplicable law and is free to choose the type of agreement that shall\ngovern its distribution under the conditions defined in Article 5.3.3.\n\n\n      6.4 NOTICE OF RIGHTS\n\nThe Licensee expressly undertakes:\n\n   1. not to remove, or modify, in any manner, the intellectual property\n      notices attached to the Software;\n\n   2. to reproduce said notices, in an identical manner, in the copies\n      of the Software modified or not;\n\n   3. to ensure that use of the Software, its intellectual property\n      notices and the fact that it is governed by the Agreement is\n      indicated in a text that is easily accessible, specifically from\n      the interface of any Derivative Software.\n\nThe Licensee undertakes not to directly or indirectly infringe the\nintellectual property rights of the Holder and/or Contributors on the\nSoftware and to take, where applicable, vis--vis its staff, any and all\nmeasures required to ensure respect of said intellectual property rights\nof the Holder and/or Contributors.\n\n\n    Article 7 - RELATED SERVICES\n\n7.1 Under no circumstances shall the Agreement oblige the Licensor to\nprovide technical assistance or maintenance services for the Software.\n\nHowever, the Licensor is entitled to offer this type of services. The\nterms and conditions of such technical assistance, and/or such\nmaintenance, shall be set forth in a separate instrument. Only the\nLicensor offering said maintenance and/or technical assistance services\nshall incur liability therefor.\n\n7.2 Similarly, any Licensor is entitled to offer to its licensees, under\nits sole responsibility, a warranty, that shall only be binding upon\nitself, for the redistribution of the Software and/or the Modified\nSoftware, under terms and conditions that it is free to decide. Said\nwarranty, and the financial terms and conditions of its application,\nshall be subject of a separate instrument executed between the Licensor\nand the Licensee.\n\n\n    Article 8 - LIABILITY\n\n8.1 Subject to the provisions of Article 8.2, the Licensee shall be\nentitled to claim compensation for any direct loss it may have suffered\nfrom the Software as a result of a fault on the part of the relevant\nLicensor, subject to providing evidence thereof.\n\n8.2 The Licensor's liability is limited to the commitments made under\nthis Agreement and shall not be incurred as a result of in particular:\n(i) loss due the Licensee's total or partial failure to fulfill its\nobligations, (ii) direct or consequential loss that is suffered by the\nLicensee due to the use or performance of the Software, and (iii) more\ngenerally, any consequential loss. In particular the Parties expressly\nagree that any or all pecuniary or business loss (i.e. loss of data,\nloss of profits, operating loss, loss of customers or orders,\nopportunity cost, any disturbance to business activities) or any or all\nlegal proceedings instituted against the Licensee by a third party,\nshall constitute consequential loss and shall not provide entitlement to\nany or all compensation from the Licensor.\n\n\n    Article 9 - WARRANTY\n\n9.1 The Licensee acknowledges that the scientific and technical\nstate-of-the-art when the Software was distributed did not enable all\npossible uses to be tested and verified, nor for the presence of\npossible defects to be detected. In this respect, the Licensee's\nattention has been drawn to the risks associated with loading, using,\nmodifying and/or developing and reproducing the Software which are\nreserved for experienced users.\n\nThe Licensee shall be responsible for verifying, by any or all means,\nthe suitability of the product for its requirements, its good working\norder, and for ensuring that it shall not cause damage to either persons\nor properties.\n\n9.2 The Licensor hereby represents, in good faith, that it is entitled\nto grant all the rights over the Software (including in particular the\nrights set forth in Article 5).\n\n9.3 The Licensee acknowledges that the Software is supplied \"as is\" by\nthe Licensor without any other express or tacit warranty, other than\nthat provided for in Article 9.2 and, in particular, without any warranty\nas to its commercial value, its secured, safe, innovative or relevant\nnature.\n\nSpecifically, the Licensor does not warrant that the Software is free\nfrom any error, that it will operate without interruption, that it will\nbe compatible with the Licensee's own equipment and software\nconfiguration, nor that it will meet the Licensee's requirements.\n\n9.4 The Licensor does not either expressly or tacitly warrant that the\nSoftware does not infringe any third party intellectual property right\nrelating to a patent, software or any other property right. Therefore,\nthe Licensor disclaims any and all liability towards the Licensee\narising out of any or all proceedings for infringement that may be\ninstituted in respect of the use, modification and redistribution of the\nSoftware. Nevertheless, should such proceedings be instituted against\nthe Licensee, the Licensor shall provide it with technical and legal\nassistance for its defense. Such technical and legal assistance shall be\ndecided on a case-by-case basis between the relevant Licensor and the\nLicensee pursuant to a memorandum of understanding. The Licensor\ndisclaims any and all liability as regards the Licensee's use of the\nname of the Software. No warranty is given as regards the existence of\nprior rights over the name of the Software or as regards the existence\nof a trademark.\n\n\n    Article 10 - TERMINATION\n\n10.1 In the event of a breach by the Licensee of its obligations\nhereunder, the Licensor may automatically terminate this Agreement\nthirty (30) days after notice has been sent to the Licensee and has\nremained ineffective.\n\n10.2 A Licensee whose Agreement is terminated shall no longer be\nauthorized to use, modify or distribute the Software. However, any\nlicenses that it may have granted prior to termination of the Agreement\nshall remain valid subject to their having been granted in compliance\nwith the terms and conditions hereof.\n\n\n    Article 11 - MISCELLANEOUS\n\n\n      11.1 EXCUSABLE EVENTS\n\nNeither Party shall be liable for any or all delay, or failure to\nperform the Agreement, that may be attributable to an event of force\nmajeure, an act of God or an outside cause, such as defective\nfunctioning or interruptions of the electricity or telecommunications\nnetworks, network paralysis following a virus attack, intervention by\ngovernment authorities, natural disasters, water damage, earthquakes,\nfire, explosions, strikes and labor unrest, war, etc.\n\n11.2 Any failure by either Party, on one or more occasions, to invoke\none or more of the provisions hereof, shall under no circumstances be\ninterpreted as being a waiver by the interested Party of its right to\ninvoke said provision(s) subsequently.\n\n11.3 The Agreement cancels and replaces any or all previous agreements,\nwhether written or oral, between the Parties and having the same\npurpose, and constitutes the entirety of the agreement between said\nParties concerning said purpose. No supplement or modification to the\nterms and conditions hereof shall be effective as between the Parties\nunless it is made in writing and signed by their duly authorized\nrepresentatives.\n\n11.4 In the event that one or more of the provisions hereof were to\nconflict with a current or future applicable act or legislative text,\nsaid act or legislative text shall prevail, and the Parties shall make\nthe necessary amendments so as to comply with said act or legislative\ntext. All other provisions shall remain effective. Similarly, invalidity\nof a provision of the Agreement, for any reason whatsoever, shall not\ncause the Agreement as a whole to be invalid.\n\n\n      11.5 LANGUAGE\n\nThe Agreement is drafted in both French and English and both versions\nare deemed authentic.\n\n\n    Article 12 - NEW VERSIONS OF THE AGREEMENT\n\n12.1 Any person is authorized to duplicate and distribute copies of this\nAgreement.\n\n12.2 So as to ensure coherence, the wording of this Agreement is\nprotected and may only be modified by the authors of the License, who\nreserve the right to periodically publish updates or new versions of the\nAgreement, each with a separate number. These subsequent versions may\naddress new issues encountered by Free Software.\n\n12.3 Any Software distributed under a given version of the Agreement may\nonly be subsequently distributed under the same version of the Agreement\nor a subsequent version.\n\n\n    Article 13 - GOVERNING LAW AND JURISDICTION\n\n13.1 The Agreement is governed by French law. The Parties agree to\nendeavor to seek an amicable solution to any disagreements or disputes\nthat may arise during the performance of the Agreement.\n\n13.2 Failing an amicable solution within two (2) months as from their\noccurrence, and unless emergency proceedings are necessary, the\ndisagreements or disputes shall be referred to the Paris Courts having\njurisdiction, by the more diligent Party.\n\n\nVersion 1.0 dated 2006-09-05.\n"
  },
  {
    "path": "LICENSE.fr.txt",
    "content": "Copyright CEA/DAM  (2004-2013)\n\nCONTRAT DE LICENCE DE LOGICIEL LIBRE CeCILL-C\n\n\n    Avertissement\n\nCe contrat est une licence de logiciel libre issue d'une concertation\nentre ses auteurs afin que le respect de deux grands principes prside \nsa rdaction:\n\n    * d'une part, le respect des principes de diffusion des logiciels\n      libres: accs au code source, droits tendus confrs aux\n      utilisateurs,\n    * d'autre part, la dsignation d'un droit applicable, le droit\n      franais, auquel elle est conforme, tant au regard du droit de la\n      responsabilit civile que du droit de la proprit intellectuelle\n      et de la protection qu'il offre aux auteurs et titulaires des\n      droits patrimoniaux sur un logiciel.\n\nLes auteurs de la licence CeCILL-C (pour Ce[a] C[nrs] I[nria] L[ogiciel]\nL[ibre]) sont:\n\nCommissariat  l'Energie Atomique - CEA, tablissement public de\nrecherche  caractre scientifique, technique et industriel, dont le\nsige est situ 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris.\n\nCentre National de la Recherche Scientifique - CNRS, tablissement\npublic  caractre scientifique et technologique, dont le sige est\nsitu 3 rue Michel-Ange, 75794 Paris cedex 16.\n\nInstitut National de Recherche en Informatique et en Automatique -\nINRIA, tablissement public  caractre scientifique et technologique,\ndont le sige est situ Domaine de Voluceau, Rocquencourt, BP 105, 78153\nLe Chesnay cedex.\n\n\n    Prambule\n\nCe contrat est une licence de logiciel libre dont l'objectif est de\nconfrer aux utilisateurs la libert de modifier et de rutiliser le\nlogiciel rgi par cette licence.\n\nL'exercice de cette libert est assorti d'une obligation de remettre \nla disposition de la communaut les modifications apportes au code\nsource du logiciel afin de contribuer  son volution.\n\nL'accessibilit au code source et les droits de copie, de modification\net de redistribution qui dcoulent de ce contrat ont pour contrepartie\nde n'offrir aux utilisateurs qu'une garantie limite et de ne faire\npeser sur l'auteur du logiciel, le titulaire des droits patrimoniaux et\nles concdants successifs qu'une responsabilit restreinte.\n\nA cet gard l'attention de l'utilisateur est attire sur les risques\nassocis au chargement,  l'utilisation,  la modification et/ou au\ndveloppement et  la reproduction du logiciel par l'utilisateur tant\ndonn sa spcificit de logiciel libre, qui peut le rendre complexe \nmanipuler et qui le rserve donc  des dveloppeurs ou des\nprofessionnels avertis possdant des connaissances informatiques\napprofondies. Les utilisateurs sont donc invits  charger et tester\nl'adquation du logiciel  leurs besoins dans des conditions permettant\nd'assurer la scurit de leurs systmes et/ou de leurs donnes et, plus\ngnralement,  l'utiliser et l'exploiter dans les mmes conditions de\nscurit. Ce contrat peut tre reproduit et diffus librement, sous\nrserve de le conserver en l'tat, sans ajout ni suppression de clauses.\n\nCe contrat est susceptible de s'appliquer  tout logiciel dont le\ntitulaire des droits patrimoniaux dcide de soumettre l'exploitation aux\ndispositions qu'il contient.\n\n\n    Article 1 - DEFINITIONS\n\nDans ce contrat, les termes suivants, lorsqu'ils seront crits avec une\nlettre capitale, auront la signification suivante:\n\nContrat: dsigne le prsent contrat de licence, ses ventuelles versions\npostrieures et annexes.\n\nLogiciel: dsigne le logiciel sous sa forme de Code Objet et/ou de Code\nSource et le cas chant sa documentation, dans leur tat au moment de\nl'acceptation du Contrat par le Licenci.\n\nLogiciel Initial: dsigne le Logiciel sous sa forme de Code Source et\nventuellement de Code Objet et le cas chant sa documentation, dans\nleur tat au moment de leur premire diffusion sous les termes du Contrat.\n\nLogiciel Modifi: dsigne le Logiciel modifi par au moins une\nContribution Intgre.\n\nCode Source: dsigne l'ensemble des instructions et des lignes de\nprogramme du Logiciel et auquel l'accs est ncessaire en vue de\nmodifier le Logiciel.\n\nCode Objet: dsigne les fichiers binaires issus de la compilation du\nCode Source.\n\nTitulaire: dsigne le ou les dtenteurs des droits patrimoniaux d'auteur\nsur le Logiciel Initial.\n\nLicenci: dsigne le ou les utilisateurs du Logiciel ayant accept le\nContrat.\n\nContributeur: dsigne le Licenci auteur d'au moins une Contribution\nIntgre.\n\nConcdant: dsigne le Titulaire ou toute personne physique ou morale\ndistribuant le Logiciel sous le Contrat.\n\nContribution Intgre: dsigne l'ensemble des modifications,\ncorrections, traductions, adaptations et/ou nouvelles fonctionnalits\nintgres dans le Code Source par tout Contributeur.\n\nModule Li: dsigne un ensemble de fichiers sources y compris leur\ndocumentation qui, sans modification du Code Source, permet de raliser\ndes fonctionnalits ou services supplmentaires  ceux fournis par le\nLogiciel.\n\nLogiciel Driv: dsigne toute combinaison du Logiciel, modifi ou non,\net d'un Module Li.\n\nParties: dsigne collectivement le Licenci et le Concdant.\n\nCes termes s'entendent au singulier comme au pluriel.\n\n\n    Article 2 - OBJET\n\nLe Contrat a pour objet la concession par le Concdant au Licenci d'une\nlicence non exclusive, cessible et mondiale du Logiciel telle que\ndfinie ci-aprs  l'article 5 pour toute la dure de protection des droits\nportant sur ce Logiciel.\n\n\n    Article 3 - ACCEPTATION\n\n3.1 L'acceptation par le Licenci des termes du Contrat est rpute\nacquise du fait du premier des faits suivants:\n\n    * (i) le chargement du Logiciel par tout moyen notamment par\n      tlchargement  partir d'un serveur distant ou par chargement \n      partir d'un support physique;\n    * (ii) le premier exercice par le Licenci de l'un quelconque des\n      droits concds par le Contrat.\n\n3.2 Un exemplaire du Contrat, contenant notamment un avertissement\nrelatif aux spcificits du Logiciel,  la restriction de garantie et \nla limitation  un usage par des utilisateurs expriments a t mis \ndisposition du Licenci pralablement  son acceptation telle que\ndfinie  l'article 3.1 ci dessus et le Licenci reconnat en avoir pris\nconnaissance.\n\n\n    Article 4 - ENTREE EN VIGUEUR ET DUREE\n\n\n      4.1 ENTREE EN VIGUEUR\n\nLe Contrat entre en vigueur  la date de son acceptation par le Licenci\ntelle que dfinie en 3.1.\n\n\n      4.2 DUREE\n\nLe Contrat produira ses effets pendant toute la dure lgale de\nprotection des droits patrimoniaux portant sur le Logiciel.\n\n\n    Article 5 - ETENDUE DES DROITS CONCEDES\n\nLe Concdant concde au Licenci, qui accepte, les droits suivants sur\nle Logiciel pour toutes destinations et pour la dure du Contrat dans\nles conditions ci-aprs dtailles.\n\nPar ailleurs, si le Concdant dtient ou venait  dtenir un ou\nplusieurs brevets d'invention protgeant tout ou partie des\nfonctionnalits du Logiciel ou de ses composants, il s'engage  ne pas\nopposer les ventuels droits confrs par ces brevets aux Licencis\nsuccessifs qui utiliseraient, exploiteraient ou modifieraient le\nLogiciel. En cas de cession de ces brevets, le Concdant s'engage \nfaire reprendre les obligations du prsent alina aux cessionnaires.\n\n\n      5.1 DROIT D'UTILISATION\n\nLe Licenci est autoris  utiliser le Logiciel, sans restriction quant\naux domaines d'application, tant ci-aprs prcis que cela comporte:\n\n   1. la reproduction permanente ou provisoire du Logiciel en tout ou\n      partie par tout moyen et sous toute forme.\n\n   2. le chargement, l'affichage, l'excution, ou le stockage du\n      Logiciel sur tout support.\n\n   3. la possibilit d'en observer, d'en tudier, ou d'en tester le\n      fonctionnement afin de dterminer les ides et principes qui sont\n       la base de n'importe quel lment de ce Logiciel; et ceci,\n      lorsque le Licenci effectue toute opration de chargement,\n      d'affichage, d'excution, de transmission ou de stockage du\n      Logiciel qu'il est en droit d'effectuer en vertu du Contrat.\n\n\n      5.2 DROIT DE MODIFICATION\n\nLe droit de modification comporte le droit de traduire, d'adapter,\nd'arranger ou d'apporter toute autre modification au Logiciel et le\ndroit de reproduire le logiciel en rsultant. Il comprend en particulier\nle droit de crer un Logiciel Driv.\n\nLe Licenci est autoris  apporter toute modification au Logiciel sous\nrserve de mentionner, de faon explicite, son nom en tant qu'auteur de\ncette modification et la date de cration de celle-ci.\n\n\n      5.3 DROIT DE DISTRIBUTION\n\nLe droit de distribution comporte notamment le droit de diffuser, de\ntransmettre et de communiquer le Logiciel au public sur tout support et\npar tout moyen ainsi que le droit de mettre sur le march  titre\nonreux ou gratuit, un ou des exemplaires du Logiciel par tout procd.\n\nLe Licenci est autoris  distribuer des copies du Logiciel, modifi ou\nnon,  des tiers dans les conditions ci-aprs dtailles.\n\n\n        5.3.1 DISTRIBUTION DU LOGICIEL SANS MODIFICATION\n\nLe Licenci est autoris  distribuer des copies conformes du Logiciel,\nsous forme de Code Source ou de Code Objet,  condition que cette\ndistribution respecte les dispositions du Contrat dans leur totalit et\nsoit accompagne:\n\n   1. d'un exemplaire du Contrat,\n\n   2. d'un avertissement relatif  la restriction de garantie et de\n      responsabilit du Concdant telle que prvue aux articles 8\n      et 9,\n\net que, dans le cas o seul le Code Objet du Logiciel est redistribu,\nle Licenci permette un accs effectif au Code Source complet du\nLogiciel pendant au moins toute la dure de sa distribution du Logiciel,\ntant entendu que le cot additionnel d'acquisition du Code Source ne\ndevra pas excder le simple cot de transfert des donnes.\n\n\n        5.3.2 DISTRIBUTION DU LOGICIEL MODIFIE\n\nLorsque le Licenci apporte une Contribution Intgre au Logiciel, les\nconditions de distribution du Logiciel Modifi en rsultant sont alors\nsoumises  l'intgralit des dispositions du Contrat.\n\nLe Licenci est autoris  distribuer le Logiciel Modifi sous forme de\ncode source ou de code objet,  condition que cette distribution\nrespecte les dispositions du Contrat dans leur totalit et soit\naccompagne:\n\n   1. d'un exemplaire du Contrat,\n\n   2. d'un avertissement relatif  la restriction de garantie et de\n      responsabilit du Concdant telle que prvue aux articles 8\n      et 9,\n\net que, dans le cas o seul le code objet du Logiciel Modifi est\nredistribu, le Licenci permette un accs effectif  son code source\ncomplet pendant au moins toute la dure de sa distribution du Logiciel\nModifi, tant entendu que le cot additionnel d'acquisition du code\nsource ne devra pas excder le simple cot de transfert des donnes.\n\n\n        5.3.3 DISTRIBUTION DU LOGICIEL DERIVE\n\nLorsque le Licenci cre un Logiciel Driv, ce Logiciel Driv peut\ntre distribu sous un contrat de licence autre que le prsent Contrat \ncondition de respecter les obligations de mention des droits sur le\nLogiciel telles que dfinies  l'article 6.4. Dans le cas o la cration du\nLogiciel Driv a ncessit une modification du Code Source le licenci\ns'engage  ce que: \n\n   1. le Logiciel Modifi correspondant  cette modification soit rgi\n      par le prsent Contrat,\n   2. les Contributions Intgres dont le Logiciel Modifi rsulte\n      soient clairement identifies et documentes,\n   3. le Licenci permette un accs effectif au code source du Logiciel\n      Modifi, pendant au moins toute la dure de la distribution du\n      Logiciel Driv, de telle sorte que ces modifications puissent\n      tre reprises dans une version ultrieure du Logiciel, tant\n      entendu que le cot additionnel d'acquisition du code source du\n      Logiciel Modifi ne devra pas excder le simple cot du transfert\n      des donnes.\n\n\n        5.3.4 COMPATIBILITE AVEC LA LICENCE CeCILL\n\nLorsqu'un Logiciel Modifi contient une Contribution Intgre soumise au\ncontrat de licence CeCILL, ou lorsqu'un Logiciel Driv contient un\nModule Li soumis au contrat de licence CeCILL, les stipulations prvues\nau troisime item de l'article 6.4 sont facultatives.\n\n\n    Article 6 - PROPRIETE INTELLECTUELLE\n\n\n      6.1 SUR LE LOGICIEL INITIAL\n\nLe Titulaire est dtenteur des droits patrimoniaux sur le Logiciel\nInitial. Toute utilisation du Logiciel Initial est soumise au respect\ndes conditions dans lesquelles le Titulaire a choisi de diffuser son\noeuvre et nul autre n'a la facult de modifier les conditions de\ndiffusion de ce Logiciel Initial.\n\nLe Titulaire s'engage  ce que le Logiciel Initial reste au moins rgi\npar le Contrat et ce, pour la dure vise  l'article 4.2.\n\n\n      6.2 SUR LES CONTRIBUTIONS INTEGREES\n\nLe Licenci qui a dvelopp une Contribution Intgre est titulaire sur\ncelle-ci des droits de proprit intellectuelle dans les conditions\ndfinies par la lgislation applicable.\n\n\n      6.3 SUR LES MODULES LIES\n\nLe Licenci qui a dvelopp un Module Li est titulaire sur celui-ci des\ndroits de proprit intellectuelle dans les conditions dfinies par la\nlgislation applicable et reste libre du choix du contrat rgissant sa\ndiffusion dans les conditions dfinies  l'article 5.3.3.\n\n\n      6.4 MENTIONS DES DROITS\n\nLe Licenci s'engage expressment:\n\n   1.  ne pas supprimer ou modifier de quelque manire que ce soit les\n      mentions de proprit intellectuelle apposes sur le Logiciel;\n\n   2.  reproduire  l'identique lesdites mentions de proprit\n      intellectuelle sur les copies du Logiciel modifi ou non;\n\n   3.  faire en sorte que l'utilisation du Logiciel, ses mentions de\n      proprit intellectuelle et le fait qu'il est rgi par le Contrat\n      soient indiqus dans un texte facilement accessible notamment\n      depuis l'interface de tout Logiciel Driv.\n\nLe Licenci s'engage  ne pas porter atteinte, directement ou\nindirectement, aux droits de proprit intellectuelle du Titulaire et/ou\ndes Contributeurs sur le Logiciel et  prendre, le cas chant, \nl'gard de son personnel toutes les mesures ncessaires pour assurer le\nrespect des dits droits de proprit intellectuelle du Titulaire et/ou\ndes Contributeurs.\n\n\n    Article 7 - SERVICES ASSOCIES\n\n7.1 Le Contrat n'oblige en aucun cas le Concdant  la ralisation de\nprestations d'assistance technique ou de maintenance du Logiciel.\n\nCependant le Concdant reste libre de proposer ce type de services. Les\ntermes et conditions d'une telle assistance technique et/ou d'une telle\nmaintenance seront alors dtermins dans un acte spar. Ces actes de\nmaintenance et/ou assistance technique n'engageront que la seule\nresponsabilit du Concdant qui les propose.\n\n7.2 De mme, tout Concdant est libre de proposer, sous sa seule\nresponsabilit,  ses licencis une garantie, qui n'engagera que lui,\nlors de la redistribution du Logiciel et/ou du Logiciel Modifi et ce,\ndans les conditions qu'il souhaite. Cette garantie et les modalits\nfinancires de son application feront l'objet d'un acte spar entre le\nConcdant et le Licenci.\n\n\n    Article 8 - RESPONSABILITE\n\n8.1 Sous rserve des dispositions de l'article 8.2, le Licenci a la \nfacult, sous rserve de prouver la faute du Concdant concern, de\nsolliciter la rparation du prjudice direct qu'il subirait du fait du\nLogiciel et dont il apportera la preuve.\n\n8.2 La responsabilit du Concdant est limite aux engagements pris en\napplication du Contrat et ne saurait tre engage en raison notamment:\n(i) des dommages dus  l'inexcution, totale ou partielle, de ses\nobligations par le Licenci, (ii) des dommages directs ou indirects\ndcoulant de l'utilisation ou des performances du Logiciel subis par le\nLicenci et (iii) plus gnralement d'un quelconque dommage indirect. En\nparticulier, les Parties conviennent expressment que tout prjudice\nfinancier ou commercial (par exemple perte de donnes, perte de\nbnfices, perte d'exploitation, perte de clientle ou de commandes,\nmanque  gagner, trouble commercial quelconque) ou toute action dirige\ncontre le Licenci par un tiers, constitue un dommage indirect et\nn'ouvre pas droit  rparation par le Concdant.\n\n\n    Article 9 - GARANTIE\n\n9.1 Le Licenci reconnat que l'tat actuel des connaissances\nscientifiques et techniques au moment de la mise en circulation du\nLogiciel ne permet pas d'en tester et d'en vrifier toutes les\nutilisations ni de dtecter l'existence d'ventuels dfauts. L'attention\ndu Licenci a t attire sur ce point sur les risques associs au\nchargement,  l'utilisation, la modification et/ou au dveloppement et \nla reproduction du Logiciel qui sont rservs  des utilisateurs avertis.\n\nIl relve de la responsabilit du Licenci de contrler, par tous\nmoyens, l'adquation du produit  ses besoins, son bon fonctionnement et\nde s'assurer qu'il ne causera pas de dommages aux personnes et aux biens.\n\n9.2 Le Concdant dclare de bonne foi tre en droit de concder\nl'ensemble des droits attachs au Logiciel (comprenant notamment les\ndroits viss  l'article 5).\n\n9.3 Le Licenci reconnat que le Logiciel est fourni \"en l'tat\" par le\nConcdant sans autre garantie, expresse ou tacite, que celle prvue \nl'article 9.2 et notamment sans aucune garantie sur sa valeur commerciale,\nson caractre scuris, innovant ou pertinent.\n\nEn particulier, le Concdant ne garantit pas que le Logiciel est exempt\nd'erreur, qu'il fonctionnera sans interruption, qu'il sera compatible\navec l'quipement du Licenci et sa configuration logicielle ni qu'il\nremplira les besoins du Licenci.\n\n9.4 Le Concdant ne garantit pas, de manire expresse ou tacite, que le\nLogiciel ne porte pas atteinte  un quelconque droit de proprit\nintellectuelle d'un tiers portant sur un brevet, un logiciel ou sur tout\nautre droit de proprit. Ainsi, le Concdant exclut toute garantie au\nprofit du Licenci contre les actions en contrefaon qui pourraient tre\ndiligentes au titre de l'utilisation, de la modification, et de la\nredistribution du Logiciel. Nanmoins, si de telles actions sont\nexerces contre le Licenci, le Concdant lui apportera son aide\ntechnique et juridique pour sa dfense. Cette aide technique et\njuridique est dtermine au cas par cas entre le Concdant concern et\nle Licenci dans le cadre d'un protocole d'accord. Le Concdant dgage\ntoute responsabilit quant  l'utilisation de la dnomination du\nLogiciel par le Licenci. Aucune garantie n'est apporte quant \nl'existence de droits antrieurs sur le nom du Logiciel et sur\nl'existence d'une marque.\n\n\n    Article 10 - RESILIATION\n\n10.1 En cas de manquement par le Licenci aux obligations mises  sa\ncharge par le Contrat, le Concdant pourra rsilier de plein droit le\nContrat trente (30) jours aprs notification adresse au Licenci et\nreste sans effet.\n\n10.2 Le Licenci dont le Contrat est rsili n'est plus autoris \nutiliser, modifier ou distribuer le Logiciel. Cependant, toutes les\nlicences qu'il aura concdes antrieurement  la rsiliation du Contrat\nresteront valides sous rserve qu'elles aient t effectues en\nconformit avec le Contrat.\n\n\n    Article 11 - DISPOSITIONS DIVERSES\n\n\n      11.1 CAUSE EXTERIEURE\n\nAucune des Parties ne sera responsable d'un retard ou d'une dfaillance\nd'excution du Contrat qui serait d  un cas de force majeure, un cas\nfortuit ou une cause extrieure, telle que, notamment, le mauvais\nfonctionnement ou les interruptions du rseau lectrique ou de\ntlcommunication, la paralysie du rseau lie  une attaque\ninformatique, l'intervention des autorits gouvernementales, les\ncatastrophes naturelles, les dgts des eaux, les tremblements de terre,\nle feu, les explosions, les grves et les conflits sociaux, l'tat de\nguerre...\n\n11.2 Le fait, par l'une ou l'autre des Parties, d'omettre en une ou\nplusieurs occasions de se prvaloir d'une ou plusieurs dispositions du\nContrat, ne pourra en aucun cas impliquer renonciation par la Partie\nintresse  s'en prvaloir ultrieurement.\n\n11.3 Le Contrat annule et remplace toute convention antrieure, crite\nou orale, entre les Parties sur le mme objet et constitue l'accord\nentier entre les Parties sur cet objet. Aucune addition ou modification\naux termes du Contrat n'aura d'effet  l'gard des Parties  moins\nd'tre faite par crit et signe par leurs reprsentants dment habilits.\n\n11.4 Dans l'hypothse o une ou plusieurs des dispositions du Contrat\ns'avrerait contraire  une loi ou  un texte applicable, existants ou\nfuturs, cette loi ou ce texte prvaudrait, et les Parties feraient les\namendements ncessaires pour se conformer  cette loi ou  ce texte.\nToutes les autres dispositions resteront en vigueur. De mme, la\nnullit, pour quelque raison que ce soit, d'une des dispositions du\nContrat ne saurait entraner la nullit de l'ensemble du Contrat.\n\n\n      11.5 LANGUE\n\nLe Contrat est rdig en langue franaise et en langue anglaise, ces\ndeux versions faisant galement foi.\n\n\n    Article 12 - NOUVELLES VERSIONS DU CONTRAT\n\n12.1 Toute personne est autorise  copier et distribuer des copies de\nce Contrat.\n\n12.2 Afin d'en prserver la cohrence, le texte du Contrat est protg\net ne peut tre modifi que par les auteurs de la licence, lesquels se\nrservent le droit de publier priodiquement des mises  jour ou de\nnouvelles versions du Contrat, qui possderont chacune un numro\ndistinct. Ces versions ultrieures seront susceptibles de prendre en\ncompte de nouvelles problmatiques rencontres par les logiciels libres.\n\n12.3 Tout Logiciel diffus sous une version donne du Contrat ne pourra\nfaire l'objet d'une diffusion ultrieure que sous la mme version du\nContrat ou une version postrieure.\n\n\n    Article 13 - LOI APPLICABLE ET COMPETENCE TERRITORIALE\n\n13.1 Le Contrat est rgi par la loi franaise. Les Parties conviennent\nde tenter de rgler  l'amiable les diffrends ou litiges qui\nviendraient  se produire par suite ou  l'occasion du Contrat.\n\n13.2 A dfaut d'accord amiable dans un dlai de deux (2) mois  compter\nde leur survenance et sauf situation relevant d'une procdure d'urgence,\nles diffrends ou litiges seront ports par la Partie la plus diligente\ndevant les Tribunaux comptents de Paris.\n\n\nVersion 1.0 du 2006-09-05.\n"
  },
  {
    "path": "Makefile.am",
    "content": "ACLOCAL_AMFLAGS = -I autotools/m4\n\ncheck-valgrind:\n\tmake -C src/tests check-valgrind\n\nSUBDIRS=src scripts tests web_gui man doc\n\nrpm_dir=`pwd`/rpms\n\nif LUSTRE\n  rpmbuild_opt=--with lustre @LDEFINES@\nif USER_LOVEA\n  # Lustre 2.x only\n  rpmbuild_opt += --with recovtools\nendif\n\nelse\n  rpmbuild_opt=--without lustre\nendif\n\nif !COMMON_RPMS\n  rpmbuild_opt+=--without common_rpms\nendif\n\nif USE_MYSQL_DB\n  rpmbuild_opt += --with mysql\n  dbname=mysql\n  dbversion=`mysql_config --version | cut -d \".\" -f 1-2`\nelse\n  rpmbuild_opt += --with sqlite\n  dbname=sqlite\n  dbversion=`rpm -qa \"sqlite-devel*\" --qf \"%{Version}\\n\" | tail -1 | cut -d \".\" -f 1-2`\nendif\n\nif LUSTRE_HSM\n  rpmbuild_opt += --with lhsm\nelse\n  rpmbuild_opt += --without lhsm\nendif\nif SHOOK\n  rpmbuild_opt += --with shook\nelse\n  rpmbuild_opt += --without shook\nendif\nif HSM_LITE\n  rpmbuild_opt += --with backup\nelse\n  rpmbuild_opt += --without backup\nendif\nif USE_JEMALLOC\n  rpmbuild_opt += --with jemalloc\nelse\n  rpmbuild_opt += --without jemalloc\nendif\n\nrpmbuild_opt += --define=\"configure_flags @ac_configure_args@\"\n\nnew: clean all\n\nmydist:\n\tumask 022; \\\n\tTAR_OPTIONS='--owner=0 --group=0 --numeric-owner --mode=u+rw,go+r-ws' $(MAKE) dist-gzip\n\nrpms: robinhood.spec mydist\n\tmkdir -p $(rpm_dir)/BUILD $(rpm_dir)/SPECS $(rpm_dir)/SRPMS $(rpm_dir)/RPMS\n\trpmbuild $(rpmbuild_opt) --define=\"_topdir $(rpm_dir)\" \\\n\t\t--define=\"dbversion $(dbversion)\" --define=\"_prefix $(prefix)\" \\\n\t\t--define=\"_sysconfdir @CONFDIR@\" \\\n\t\t-ta $(distdir).tar.gz\n\nrpm: robinhood.spec mydist\n\tmkdir -p $(rpm_dir)/BUILD $(rpm_dir)/SPECS $(rpm_dir)/SRPMS $(rpm_dir)/RPMS\n\trpmbuild $(rpmbuild_opt) --define=\"_topdir $(rpm_dir)\" --define=\"_prefix $(prefix)\" \\\n\t\t--define=\"dbversion $(dbversion)\" --define=\"_sysconfdir @CONFDIR@\" \\\n\t\t--define=\"_prefix $(prefix)\" -tb $(distdir).tar.gz\n\nsrpm: robinhood.spec mydist\n\tmkdir -p $(rpm_dir)/SRPMS $(rpm_dir)/SOURCES\n\tcp -f $(distdir).tar.gz $(rpm_dir)/SOURCES/.\n\trpmbuild --without lustre --define=\"_topdir $(rpm_dir)\" -bs robinhood.spec\n\ncppcheck:\n\tcppcheck -j12 -v --force --enable=all -I`pwd`/src/include -DHAVE_CONFIG_H @PURPOSE_CFLAGS@ @DB_CFLAGS@ src/\n\nlcov:\n\tlcov --capture --directory . --output-file rbh_cov.info && genhtml rbh_cov.info --output-directory rbh_cov\nmans:\n\tchmod +x ./scripts/make_mans.sh && ./scripts/make_mans.sh\n\n# Wildcard paths need to be prefixed with $(srcdir) for out-of-tree make dist\nEXTRA_DIST= robinhood.spec robinhood.spec.in \\\n\tautogen.sh \\\n\tLICENSE.fr.txt LICENSE.en.txt README.md \\\n\tdoc/admin_guides \\\n\tdoc/install_webgui.txt \\\n\tChangeLog \\\n\t$(srcdir)/man/*.1\n\n\n\n#\tgoodies/vim/robinhood.vim\n"
  },
  {
    "path": "README.md",
    "content": "## ![robinhood logo](http://robinhood.sourceforge.net/images/logo_rh.gif) Robinhood Policy Engine\n                                                                                 \nRobinhood Policy Engine is a versatile tool to manage contents of large file systems. It maintains a replicate of filesystem medatada in a database that can be queried at will. It makes it possible to schedule mass action on filesystem entries by defining attribute-based policies, provides fast 'find' and 'du' enhanced clones, gives to administrators an overall view of filesystem contents through its web UI and command line tools.\nIt supports any POSIX filesystem and implements advanced features for  [Lustre](https://www.lustre.org) filesystems (list/purge files per OST or pool, read MDT changelogs...)\n                                                                                 \nOriginally developped for HPC, it has been designed to perform all its tasks in parallel, so it is particularly adapted for running on large filesystems with millions of entries and petabytes of data. But of course, you can take benefits of all its features for managing smaller filesystems.\n                                                                                 \nRobinhood is distributed under the [CeCILL-C](http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html) license, which is a French transposition of the [GNU LGPL](http://www.gnu.org/licenses/lgpl.html) and is fully LGPL-compatible.\n                                                                                 \n### Main features                                                                \n* Policy Engine: schedule actions on filesystem entries according to admin-defined criteria, based on entry attributes.\n* User/group usage accounting, including file size profiling.                    \n* Fast 'du' and 'find' clones.                                                   \n* Customizable alerts on filesystem entries.                                     \n* Aware of Lustre OSTs, pools and projects.                                      \n                                                                                 \nFor more information, refer to the [online documentation](https://github.com/cea-hpc/robinhood/wiki).\n\nI - License\n===========\n\nCopyright (C) 2004-2017 CEA/DAM.\nCopyright 2013-2016 Cray Inc. All Rights Reserved.\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the CeCILL-C License.\n\nThe fact that you are presently reading this means that you have had\nknowledge of the CeCILL-C license (http://www.cecill.info) and that you\naccept its terms.\n\nII - Compiling\n==============\n\n2.1 - From source tarball\n-------------------------\n\nIt is advised to build RobinHood on your target system, to ensure the best\ncompatibility with your Lustre and MySQL versions.\n\nBuild requirements: glib2-devel, libattr-devel, mysql-devel or mariadb-devel,\ns-nail, bison, flex, jemalloc, jemalloc-devel.\nFor lustre support: lustre or lustre-client, lustre-devel (Lustre >= 2.15)\nFor running RobinHood's CI: lustre-tests (Lustre >= 2.15)\n\nUnzip and untar the source distribution:\n```\ntar zxvf robinhood-3.x.x.tar.gz\ncd robinhood-3.x.x\n```\n\nConfigure and build:\n```\n./configure\nmake rpm\n```\n\nRPMs are generated in the 'rpms/RPMS/*arch*' directory.\n\n2.2 - From git repository\n-------------------------\n\nInstall git, and autotools stuff:\n```\nyum install git automake autoconf libtool\n```\n\nRetrieve robinhood sources\n```\ngit clone https://github.com/cea-hpc/robinhood.git\ncd robinhood\ngit checkout master *(or other branch)*\nsh autogen.sh\n```\n\nThen refer to section 2.1 for next compilation steps.\n\n2.3 - Build using src rpm\n---------------------------\n\n### 2.3.1 - Creating src rpm\nOnly requirements: rpm-build, gcc, flex, bison\n\nTo create src rpm on a host with minimal requirements, run:\n```\n./configure --enable-dist\nmake srpm\n```\n\n### 2.3.2 - Building from src rpm\nBy default, the src rpm builds robinhood for POSIX filesystems:\n```\nrpmbuild --rebuild robinhood-3.0*.src.rpm\n```\n\nTo build robinhood binary RPM for a Lustre filsystem, run:\n```\nrpmbuild --rebuild robinhood-3.0*.src.rpm --with lustre --define \"lversion x.y\"\n```\n\nwhere x.y is your target lustre version (e.g. 2.7).\n\nNote: if lustre-client is not installed on your target system, you can specify\nan alternative lustre package by defining \"lpackage\", e.g.\n```\nrpmbuild [...] --define \"lpackage lustre\"\n```\n\nNote: 'lversion' can be omitted, by it is strongly recommanded to prevent\nincompatibility issues between lustreapi versions.\n\nIII - Install\n=============\n\nFor installing robinhood on your target system, install **robinhood-adm** RPM.\nIt includes configuration helper for DB, changelogs, ...\n\n* For lustre filesystems install **robinhood-lustre** RPM.\n* For other filesystems install **robinhood-posix** RPM.\n\nIV - Database Configuration\n===========================\n\nRobinhood needs a MySQL database for storing information about files.\nThis database can run on a different node from Robinhood daemon.\n\n* Install MySQL server on the machine (mysql-server and mysql packages).\n* Start the DB engine:\n    * systemctl start mariadb\n* Run the configuration helper script as root on the database host to create\nthe database:\n```\nrbh-config create_db\n```\n\n* Write the DB password to a file with read access for root only (600)\n  e.g. to /etc/robinhood.d/.dbpassword\n\nNote: initially, the database schema is empty. Robinhood will create it the first time it is launched.\n\nV - Lustre 2.x Filesystems only: enabling changelogs\n====================================================\nFor Lustre, you must register robinhood as a MDT changelog consumer.\n\n* Run the configuration helper script on Lustre MDS:\n```\nrbh-config enable_chglogs\n```\n\n  This registers a changelog consumer and activate required changelog records.\n\nNote: by default, the script checks for a 'cl1' consumer.\nIf you need to register several changelog consumers on your file system,\nrefer to lustre documentation.\n\nVI - Configuration file\n=======================\nExamples of config files are installed in /etc/robinhood.d/templates/\n\nYou can also use the '--template' option to generate a documented configuration file template:\n```\nrobinhood --template=<template_file>\n```\n\nFor more details, refer to [Robinhood v3 admin guide](https://github.com/cea-hpc/robinhood/wiki/robinhood_v3_admin_doc).\n\nNote: by default, robinhood searches configuration files in \"/etc/robinhood.d\".\n\nVII - First run\n===============\n\nEven if your filesystem is empty, you need to perform an initial scan in order to initialize robinhood database.\nThis prevents from having entries in filesystem that it wouldn't know about.\n```\nrobinhood --scan --once\n```\n\nVIII - Start the daemon\n=======================\n* Configure per-filesystem daemon options in **/etc/sysconfig/robinhood.\\<fsname\\>**\n\nExample: to read lustre changelogs and run all policies:\n```\nRBH_OPT=\"--readlog --run=all\"\n```\n\nExample: to regularly scan filesystem and run all policies:\n```\nRBH_OPT=\"--scan --run=all\"\n```\n\nStart the daemon:\n```\nsystemctl start robinhood@*fsname*\n```\n"
  },
  {
    "path": "autogen.sh",
    "content": "#!/bin/bash\n\nfunction install_hook\n{\n    local src_file=\"$1\"\n    local tgt_file=\"$2\"\n\n    test ! -d \"$wdir/.git/\" && return 0\n\n    if [ ! -e \"$wdir/.git/hooks/$tgt_file\" ]; then\n        echo \"installing git hook: $tgt_file\"\n        ln -s \"../../scripts/$src_file\" \"$wdir/.git/hooks/$tgt_file\"\n    fi\n    if [ ! -x \"$wdir/.git/hooks/$tgt_file\" ]; then\n        chmod +x \"$wdir/.git/hooks/$tgt_file\"\n    fi\n}\n\nwdir=$(dirname $(readlink -m \"$0\"))\ninstall_hook git_prepare_hook prepare-commit-msg\ninstall_hook pre-commit pre-commit\ninstall_hook commit-msg commit-msg\n\nautoreconf --install\n"
  },
  {
    "path": "autotools/m4/args_mgmt.m4",
    "content": "\n#\n# This macro is for features that are disabled by default\n# and we want a CFLAG to be set if it is explicitely enabled\n# on \"configure\" command line (with --enable-...)\n#\n# AX_ENABLE_FLAG( FEATURE_NAME, HELP_STRING, CFLAGS_IF_ENABLED )\n#\n# Example:\n# AX_ENABLE_FLAG( [debug-memalloc], [enable debug traces for memory allocator], [-D_DEBUG_MEMALLOC] )\n#\nAC_DEFUN([AX_ENABLE_FLAG],\n[\n\n\tAC_MSG_CHECKING($1 option)\n\tAC_ARG_ENABLE( [$1], AS_HELP_STRING([--enable-$1],[$2]),\n\t\t       [enable_]m4_bpatsubst([$1], -, _)=$enableval, [enable_]m4_bpatsubst([$1], -, _)='no' )\n\n\tif test \"[$enable_]m4_bpatsubst([$1], -, _)\" == yes ; then\n\t\tCFLAGS=\"$CFLAGS $3\"\n\t    AC_MSG_RESULT(enabled)\n    else\n\t    AC_MSG_RESULT(disabled)\n\tfi\n])\n\n#\n# This macro is for features that are disabled by default\n# and we want a CFLAG to be set if it is explicitely enabled\n# on \"configure\" command line (with --enable-...)\n#\n# AX_ENABLE_FLAG_COND( FEATURE_NAME, HELP_STRING, CFLAGS_IF_ENABLED, COND )\n#\n# Example:\n# AX_ENABLE_FLAG_COND( [debug-memalloc], [enable debug traces for memory allocator], [-D_DEBUG_MEMALLOC], DEBUG_MEMALLOC )\n#\nAC_DEFUN([AX_ENABLE_FLAG_COND],\n[\n\n\tAC_MSG_CHECKING($1 option)\n\tAC_ARG_ENABLE( [$1], AS_HELP_STRING([--enable-$1],[$2]),\n\t\t       [enable_]m4_bpatsubst([$1], -, _)=$enableval, [enable_]m4_bpatsubst([$1], -, _)='no' )\n\n\tAM_CONDITIONAL( $4, test \"[$enable_]m4_bpatsubst([$1], -, _)\" == \"yes\" )\n\n\tif test \"[$enable_]m4_bpatsubst([$1], -, _)\" == yes ; then\n\t\tCFLAGS=\"$CFLAGS $3\"\n\t    AC_MSG_RESULT(enabled)\n        else\n\t    AC_MSG_RESULT(disabled)\n\tfi\n])\n\n\n\n#\n# This macro is for features that are enabled by default\n# and we want a CFLAG to be set if it is explicitely disabled\n# on \"configure\" command line (with --disable-...)\n#\n# AX_DISABLE_FLAG( FEATURE_NAME, HELP_STRING, CFLAGS_IF_DISABLED )\n#\n# Example:\n# AX_DISABLE_FLAG( [tcp-register], [disable registration of tcp services on portmapper], [-D_NO_TCP_REGISTER] )\n#\nAC_DEFUN([AX_DISABLE_FLAG],\n[\n\tAC_MSG_CHECKING($1 option)\n\tAC_ARG_ENABLE( [$1], AS_HELP_STRING([--disable-$1],[$2]),\n\t\t       [enable_]m4_bpatsubst([$1], -, _)=$enableval, [enable_]m4_bpatsubst([$1], -, _)='yes' )\n\n\tif test \"[$enable_]m4_bpatsubst([$1], -, _)\" != yes ; then\n\t\tCFLAGS=\"$CFLAGS $3\"\n\t    AC_MSG_RESULT(disabled)\n    else\n\t    AC_MSG_RESULT(enabled)\n\tfi\n])\n"
  },
  {
    "path": "autotools/m4/as-ac-expand.m4",
    "content": "dnl as-ac-expand.m4 0.2.0\ndnl autostars m4 macro for expanding directories using configure's prefix\ndnl thomas@apestaart.org\ndnl\n \ndnl AS_AC_EXPAND(VAR, CONFIGURE_VAR)\ndnl example\ndnl AS_AC_EXPAND(SYSCONFDIR, $sysconfdir)\ndnl will set SYSCONFDIR to /usr/local/etc if prefix=/usr/local\n \nAC_DEFUN([AS_AC_EXPAND],\n[\n    EXP_VAR=[$1]\n    FROM_VAR=[$2]\n \n    dnl first expand prefix and exec_prefix if necessary\n    prefix_save=$prefix\n    exec_prefix_save=$exec_prefix\n \n    dnl if no prefix given, then use /usr/local, the default prefix\n    if test \"x$prefix\" = \"xNONE\"; then\n        prefix=\"$ac_default_prefix\"\n    fi\n    dnl if no exec_prefix given, then use prefix\n    if test \"x$exec_prefix\" = \"xNONE\"; then\n        exec_prefix=$prefix\n    fi\n \n    full_var=\"$FROM_VAR\"\n    dnl loop until it doesn't change anymore\n    while true; do\n        new_full_var=\"`eval echo $full_var`\"\n        if test \"x$new_full_var\" = \"x$full_var\"; then break; fi\n        full_var=$new_full_var\n    done\n \n    dnl clean up\n    full_var=$new_full_var\n    AC_SUBST([$1], \"$full_var\")\n \n    dnl restore prefix and exec_prefix\n    prefix=$prefix_save\n    exec_prefix=$exec_prefix_save\n])\n"
  },
  {
    "path": "autotools/m4/ax_valgrind_check.m4",
    "content": "# ===========================================================================\n#     http://www.gnu.org/software/autoconf-archive/ax_valgrind_check.html\n# ===========================================================================\n#\n# SYNOPSIS\n#\n#   AX_VALGRIND_CHECK()\n#\n# DESCRIPTION\n#\n#   Checks whether Valgrind is present and, if so, allows running `make\n#   check` under a variety of Valgrind tools to check for memory and\n#   threading errors.\n#\n#   Defines VALGRIND_CHECK_RULES which should be substituted in your\n#   Makefile; and $enable_valgrind which can be used in subsequent configure\n#   output. VALGRIND_ENABLED is defined and substituted, and corresponds to\n#   the value of the --enable-valgrind option, which defaults to being\n#   enabled if Valgrind is installed and disabled otherwise.\n#\n#   If unit tests are written using a shell script and automake's\n#   LOG_COMPILER system, the $(VALGRIND) variable can be used within the\n#   shell scripts to enable Valgrind, as described here:\n#\n#     https://www.gnu.org/software/gnulib/manual/html_node/Running-self_002dtests-under-valgrind.html\n#\n#   Usage example:\n#\n#   configure.ac:\n#\n#     AX_VALGRIND_CHECK\n#\n#   Makefile.am:\n#\n#     @VALGRIND_CHECK_RULES@\n#     VALGRIND_SUPPRESSIONS_FILES = my-project.supp\n#     EXTRA_DIST = my-project.supp\n#\n#   This results in a \"check-valgrind\" rule being added to any Makefile.am\n#   which includes \"@VALGRIND_CHECK_RULES@\" (assuming the module has been\n#   configured with --enable-valgrind). Running `make check-valgrind` in\n#   that directory will run the module's test suite (`make check`) once for\n#   each of the available Valgrind tools (out of memcheck, helgrind, drd and\n#   sgcheck), and will output results to test-suite-$toolname.log for each.\n#   The target will succeed if there are zero errors and fail otherwise.\n#\n#   The macro supports running with and without libtool.\n#\n# LICENSE\n#\n#   Copyright (c) 2014, 2015 Philip Withnall <philip.withnall@collabora.co.uk>\n#\n#   Copying and distribution of this file, with or without modification, are\n#   permitted in any medium without royalty provided the copyright notice\n#   and this notice are preserved.  This file is offered as-is, without any\n#   warranty.\n\n#serial 3\n\nAC_DEFUN([AX_VALGRIND_CHECK],[\n\tdnl Check for --enable-valgrind\n\tAC_MSG_CHECKING([whether to enable Valgrind on the unit tests])\n\tAC_ARG_ENABLE([valgrind],\n\t              [AS_HELP_STRING([--enable-valgrind], [Whether to enable Valgrind on the unit tests])],\n\t              [enable_valgrind=$enableval],[enable_valgrind=])\n\n\t# Check for Valgrind.\n\tAC_CHECK_PROG([VALGRIND],[valgrind],[valgrind])\n\n\tAS_IF([test \"$enable_valgrind\" = \"yes\" -a \"$VALGRIND\" = \"\"],[\n\t\tAC_MSG_ERROR([Could not find valgrind; either install it or reconfigure with --disable-valgrind])\n\t])\n\tAS_IF([test \"$enable_valgrind\" != \"no\"],[enable_valgrind=yes])\n\n\tAM_CONDITIONAL([VALGRIND_ENABLED],[test \"$enable_valgrind\" = \"yes\"])\n\tAC_SUBST([VALGRIND_ENABLED],[$enable_valgrind])\n\tAC_MSG_RESULT([$enable_valgrind])\n\n\t# Check for Valgrind tools we care about.\n\tm4_define([valgrind_tool_list],[[memcheck], [helgrind], [drd], [exp-sgcheck]])\n\n\tAS_IF([test \"$VALGRIND\" != \"\"],[\n\t\tm4_foreach([vgtool],[valgrind_tool_list],[\n\t\t\tm4_define([vgtooln],AS_TR_SH(vgtool))\n\t\t\tm4_define([ax_cv_var],[ax_cv_valgrind_tool_]vgtooln)\n\t\t\tAC_CACHE_CHECK([for Valgrind tool ]vgtool,ax_cv_var,[\n\t\t\t\tax_cv_var=\n\t\t\t\tAS_IF([`$VALGRIND --tool=vgtool --help 2&>/dev/null`],[\n\t\t\t\t\tax_cv_var=\"vgtool\"\n\t\t\t\t])\n\t\t\t])\n\n\t\t\tAC_SUBST([VALGRIND_HAVE_TOOL_]vgtooln,[$ax_cv_var])\n\t\t])\n\t])\n\nVALGRIND_CHECK_RULES='\n# Valgrind check\n#\n# Optional:\n#  - VALGRIND_SUPPRESSIONS_FILES: Space-separated list of Valgrind suppressions\n#    files to load. (Default: empty)\n#  - VALGRIND_FLAGS: General flags to pass to all Valgrind tools.\n#    (Default: --num-callers=30)\n#  - VALGRIND_$toolname_FLAGS: Flags to pass to Valgrind $toolname (one of:\n#    memcheck, helgrind, drd, sgcheck). (Default: various)\n\n# Optional variables\nVALGRIND_SUPPRESSIONS ?= $(addprefix --suppressions=,$(VALGRIND_SUPPRESSIONS_FILES))\nVALGRIND_FLAGS ?= --num-callers=30\nVALGRIND_memcheck_FLAGS ?= --leak-check=full --show-reachable=no\nVALGRIND_helgrind_FLAGS ?= --history-level=approx\nVALGRIND_drd_FLAGS ?=\nVALGRIND_sgcheck_FLAGS ?=\n\n# Internal use\nvalgrind_tools = memcheck helgrind drd sgcheck\nvalgrind_log_files = $(addprefix test-suite-,$(addsuffix .log,$(valgrind_tools)))\n\nvalgrind_memcheck_flags = --tool=memcheck $(VALGRIND_memcheck_FLAGS)\nvalgrind_helgrind_flags = --tool=helgrind $(VALGRIND_helgrind_FLAGS)\nvalgrind_drd_flags = --tool=drd $(VALGRIND_drd_FLAGS)\nvalgrind_sgcheck_flags = --tool=exp-sgcheck $(VALGRIND_sgcheck_FLAGS)\n\nvalgrind_quiet = $(valgrind_quiet_$(V))\nvalgrind_quiet_ = $(valgrind_quiet_$(AM_DEFAULT_VERBOSITY))\nvalgrind_quiet_0 = --quiet\n\n# Support running with and without libtool.\nifneq ($(LIBTOOL),)\nvalgrind_lt = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=execute\nelse\nvalgrind_lt =\nendif\n\n# Use recursive makes in order to ignore errors during check\ncheck-valgrind:\nifeq ($(VALGRIND_ENABLED),yes)\n\t-$(foreach tool,$(valgrind_tools), \\\n\t\t$(if $(VALGRIND_HAVE_TOOL_$(tool))$(VALGRIND_HAVE_TOOL_exp_$(tool)), \\\n\t\t\t$(MAKE) $(AM_MAKEFLAGS) -k check-valgrind-tool VALGRIND_TOOL=$(tool); \\\n\t\t) \\\n\t)\nelse\n\t@echo \"Need to reconfigure with --enable-valgrind\"\nendif\n\n# Valgrind running\nVALGRIND_TESTS_ENVIRONMENT = \\\n\t$(TESTS_ENVIRONMENT) \\\n\tenv VALGRIND=$(VALGRIND) \\\n\tG_SLICE=always-malloc,debug-blocks \\\n\tG_DEBUG=fatal-warnings,fatal-criticals,gc-friendly\n\nVALGRIND_LOG_COMPILER = \\\n\t$(valgrind_lt) \\\n\t$(VALGRIND) $(VALGRIND_SUPPRESSIONS) --error-exitcode=1 $(VALGRIND_FLAGS)\n\ncheck-valgrind-tool:\nifeq ($(VALGRIND_ENABLED),yes)\n\t$(MAKE) check-TESTS \\\n\t\tTESTS_ENVIRONMENT=\"$(VALGRIND_TESTS_ENVIRONMENT)\" \\\n\t\tLOG_COMPILER=\"$(VALGRIND_LOG_COMPILER)\" \\\n\t\tLOG_FLAGS=\"$(valgrind_$(VALGRIND_TOOL)_flags)\" \\\n\t\tTEST_SUITE_LOG=test-suite-$(VALGRIND_TOOL).log\nelse\n\t@echo \"Need to reconfigure with --enable-valgrind\"\nendif\n\nDISTCHECK_CONFIGURE_FLAGS ?=\nDISTCHECK_CONFIGURE_FLAGS += --disable-valgrind\n\nMOSTLYCLEANFILES ?=\nMOSTLYCLEANFILES += $(valgrind_log_files)\n\n.PHONY: check-valgrind check-valgrind-tool\n'\n\n\tAC_SUBST([VALGRIND_CHECK_RULES])\n\tm4_ifdef([_AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE([VALGRIND_CHECK_RULES])])\n])\n"
  },
  {
    "path": "autotools/m4/db.m4",
    "content": "#\n# This macro test for MySQL config program and version\n#\nAC_DEFUN([AX_MYSQL_INFO],\n[\n        AC_CHECK_PROGS(MYSQL_CONFIG, mysql_config)\n\n        if test -z \"$MYSQL_CONFIG\"; then\n                AC_MSG_ERROR(MySQL must be installed)\n        fi\n\n        AC_MSG_CHECKING(for MySQL version)\n        MYSQL_VERSION=`$MYSQL_CONFIG --version 2>/dev/null | cut -d \".\" -f 1`\n\n        if test -z \"$MYSQL_VERSION\"; then\n                MYSQL_VERSION=\"none\"\n        fi\n\n        AC_MSG_RESULT($MYSQL_VERSION)\n])\n\n"
  },
  {
    "path": "autotools/m4/lustre.m4",
    "content": "#\n# This macro test installed lustre version and package name\n#\nAC_DEFUN([AX_LUSTRE_VERSION],\n[\n        # Check if any package provides 'lustre-client'\n        # Since lustre 2.8, this symbol is provided by both 'lustre' and 'lustre-client'\n        # so robinhood only needs to require it.\n        AC_MSG_CHECKING(if any package provides lustre-client)\n\n        if rpm -q --whatprovides lustre-client >/dev/null 2>/dev/null; then\n            AC_MSG_RESULT(yes)\n            LPACKAGE=lustre-client\n            # Assume we want the same version as this package,\n            # whatever 'lustre' or 'lustre-client'\n            #\n            # Added pipe to `head -1` to properly handle cases of multiple packages; lustre-client and lustre-client-dkms\n            #\n            AC_MSG_CHECKING(Lustre version)\n            LVERSION=`rpm -q --whatprovides lustre-client --qf \"%{Version}\\n\" 2>/dev/null | grep -v \"no package\" | cut -d \".\" -f 1-2 | head -1`\n            AC_MSG_RESULT($LVERSION)\n        else\n            AC_MSG_RESULT(no)\n            AC_MSG_CHECKING(if lustre is installed)\n\n            # fallback to lustre package\n            LPACKAGE=`rpm -q --whatprovides lustre --qf \"%{Name}\\n\" 2>/dev/null | grep -v \"no package\"`\n            if test -n \"$LPACKAGE\"; then\n                LVERSION=`rpm -q $LPACKAGE --qf \"%{Version}\\n\" 2>/dev/null | cut -d \".\" -f 1-2`\n                AC_MSG_RESULT(found version $LVERSION)\n            else\n                AC_MSG_RESULT(no)\n            fi\n        fi\n])\n\n# Get lustre version from sources\n# AX_LUSTRE_SRC_VERSION(LUSTRE_SRC_DIR)\nAC_DEFUN([AX_LUSTRE_SRC_VERSION],\n[\n        AC_MSG_CHECKING(Lustre source version)\n\n        if test -f $1/config.h ; then\n            LVERSION=`grep \"define VERSION \" $1/config.h | awk '{print $(NF)}' | sed -e 's/\"//g' | cut -d \".\" -f 1-2`\n            # default RPM dependancy to lustre-client\n            LPACKAGE=\"lustre-client\"\n \n            if test -z \"$LVERSION\"; then\n                AC_MSG_RESULT(none installed)\n            else\n                AC_MSG_RESULT(source version $LVERSION)\n            fi\n        else\n           AX_LUSTRE_EXPORT_VERSION([$1])\n        fi\n])\n\n# Get lustre version from exported src directory\n# AX_LUSTRE_EXPORT_VERSION(LUSTRE_SRC_DIR)\nAC_DEFUN([AX_LUSTRE_EXPORT_VERSION],\n[\n        AC_MSG_CHECKING(Lustre exported src version)\n\n        LVERSION=$([awk -F',' '/m._define\\(\\[LUSTRE_MINOR\\]/ { minver=gensub( /[\\[\\]\\)]/, \"\", \"g\", $(NF)) ; } /m._define\\(\\[LUSTRE_MAJOR\\]/ { majver=gensub( /[\\[\\]\\)]/, \"\", \"g\", $(NF)) ; } END { print majver \".\" minver ; }' $$1/usr/src/lustre-*/lustre/autoconf/lustre-version.ac])\n        # default RPM dependancy to lustre-client\n        LPACKAGE=\"lustre-client\"\n\n        if test -z \"$LVERSION\"; then\n            AC_MSG_RESULT(none installed)\n        else\n            AC_MSG_RESULT(exported src version $LVERSION)\n        fi\n\n])\n\n# -*- mode: shell; sh-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n"
  },
  {
    "path": "configure.ac",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n# Process this file with autoconf to produce a configure script.\n\nAC_PREREQ(2.59)\n\nAC_INIT( [robinhood], [3.2.0], [robinhood-support@lists.sourceforge.net])\nRELEASE=\"3\"\n\nAC_DEFINE_UNQUOTED(RELEASE, \"$RELEASE\", [release info])\nAC_SUBST(RELEASE)\n\nAC_CONFIG_AUX_DIR([autotools])\nAC_CONFIG_MACRO_DIR([autotools/m4])\nAC_CONFIG_HEADER([src/include/config.h])\nAC_CONFIG_SRCDIR([src/list_mgr/listmgr_init.c])\n\nAM_INIT_AUTOMAKE([-Wall -Werror tar-pax foreign subdir-objects])\nm4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])\n\nAC_PREFIX_DEFAULT([/usr])\n\nAC_GNU_SOURCE\n\nAC_CONFIG_FILES([Makefile\n         src/Makefile\n         src/include/Makefile\n         src/common/Makefile\n         src/cfg_parsing/Makefile\n         src/list_mgr/Makefile\n         src/entry_processor/Makefile\n         src/fs_scan/Makefile\n         src/chglog_reader/Makefile\n         src/modules/Makefile\n         src/policies/Makefile\n         src/robinhood/Makefile\n         src/tools/Makefile\n         src/tests/Makefile\n         doc/Makefile\n         doc/templates/Makefile\n         scripts/Makefile\n         scripts/robinhood.init\n         scripts/robinhood.init.sles\n         scripts/robinhood.service\n         scripts/robinhood@.service\n         scripts/sysconfig_robinhood\n         scripts/ld.so.robinhood.conf\n         scripts/rbh_cksum.sh\n         tests/Makefile\n         tests/test_suite/Makefile\n         web_gui/Makefile\n         man/Makefile\n         robinhood.spec\n         doc/doxygen/doxygen.cfg])\n\nAC_ARG_ENABLE([dist],\n              AC_HELP_STRING([--enable-dist], [Only configure enough for make dist]),\n              [],\n              [enable_dist=\"no\"])\n\nAM_PROG_LEX\nAC_PATH_PROG(LEX_INST, $LEX)\nif test -z \"$LEX_INST\" -a \"x$enable_dist\" = xyes; then\n  AC_MSG_ERROR([lex/flex not found])\nfi\n\nAC_PROG_YACC\nAC_PATH_PROG(YACC_INST, $YACC)\nif test -z \"$YACC_INST\" -a \"x$enable_dist\" = xyes; then\n  AC_MSG_ERROR([yacc/bison not found])\nfi\n\nAS_IF([test \"x$enable_dist\" = xyes], [\nAC_OUTPUT\nexit\n])\n\n# required for automake 1.12 (since fedora 18)\nm4_ifdef([AM_PROG_AR], [AM_PROG_AR])\n\nAC_PROG_LIBTOOL\nAC_PROG_CC\n\n# required for automake 1.12 (since fedora 18)\nm4_ifdef([AM_PROG_CC_C_O], [AM_PROG_CC_C_O])\n\nAC_C_INLINE\n\n# define everything necessary for accessing large files (64bits offset)\nAC_SYS_LARGEFILE\n\n# gcc options\nCC_OPT=\"-Wall -Werror -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -std=gnu99 -Wno-variadic-macros\"\nAC_SUBST(CC_OPT)\n\nAC_CHECK_SIZEOF([nlink_t])\nAC_CHECK_SIZEOF([off_t])\nAC_CHECK_SIZEOF([ino_t])\nAC_CHECK_SIZEOF([dev_t])\nAC_CHECK_SIZEOF([time_t])\nAC_CHECK_SIZEOF([size_t])\nAC_CHECK_SIZEOF([pthread_t])\n\nAX_ENABLE_FLAG([strict], [Compile with -Wall -Werror], [-Wall -Werror])\n\nAC_CHECK_LIB(z, gzflush,\n  [LIBS=\"-lz $LIBS\"\n   AC_DEFINE(HAVE_LIBZ, 1, [define if you have zlib])],\n  [AC_MSG_WARN([zlib is required])])\n\nPKG_CHECK_MODULES(GLIB2, [glib-2.0 >= 2.16])\nPKG_CHECK_MODULES(GTHREAD2, [gthread-2.0])\n\nCFLAGS=\"$CFLAGS $GLIB2_CFLAGS $GTHREAD2_CFLAGS\"\nLDFLAGS=\"$LDFLAGS $GLIB2_LIBS $GTHREAD2_LIBS\"\n\n# debug flags\nAX_ENABLE_FLAG([debug-db], [enables debug traces for database operations], [-D_DEBUG_DB])\nAX_ENABLE_FLAG([debug-parsing], [enables debug traces for configuration file parsing], [-D_DEBUG_PARSING])\nAM_CONDITIONAL(DEBUG_PARSING, test \"x$enable_debug_parsing\" == \"xyes\" )\n\nAX_ENABLE_FLAG([debug-pipeline], [enables debug traces for entry processor pipeline], [-D_DEBUG_ENTRYPROC])\nAX_ENABLE_FLAG([debug-policies], [enables debug traces for entry processor pipeline], [-D_DEBUG_POLICIES])\nAX_ENABLE_FLAG([debug-hash], [enables debug traces internal hash tables], [-D_DEBUG_HASH])\n\nAX_ENABLE_FLAG([bench-scan], [test only: build special version for scan benchmarking], [-D_BENCH_SCAN])\nAX_ENABLE_FLAG([bench-db], [test only: build special version for DB benchmarking], [-D_BENCH_DB])\nAX_ENABLE_FLAG([bench-pipeline], [test only: build special version for pipeline benchmarking], [-D_BENCH_PIPELINE -D_BENCH_DB])\nAX_ENABLE_FLAG([gprof], [test only: add gprof info to the binaries], [-g -pg])\nAX_ENABLE_FLAG([gcov], [test only: add gcov info to the binaries], [--coverage])\nAX_VALGRIND_CHECK\n\n# behavior flags\nAX_DISABLE_FLAG([atfunc], [Don't use 'at' functions for scanning], [-D_NO_AT_FUNC])\n\nAX_ENABLE_FLAG( [fid2path-leading-slash], [must be enabled if fid2path() returns a leading slash], [-D_FID2PATH_LEADING_SLASH] )\n\nAC_ARG_ENABLE( [data-version], AS_HELP_STRING([--disable-data-version],\n               [Disable the use of llapi_get_data_version()]),\n               [support_dv=\"$enableval\"],[support_dv=\"yes\"] )\nDV_CMD=\"stat -c %Y-%s\"\n\nAC_ARG_ENABLE([common-rpms], AS_HELP_STRING([--disable-common-rpms],\n              [Disable build of common RPMs, only build FS specific packages]),\n              [common_rpms=\"$enableval\"],[common_rpms=\"yes\"])\nAM_CONDITIONAL(COMMON_RPMS,test  \"x$common_rpms\" = \"xyes\")\n\nbuild_lustre=\"OFF\"\nbuild_backup=\"OFF\"\nbuild_lhsm=\"OFF\"\nbuild_shook=\"OFF\"\n\nAC_ARG_ENABLE([lustre], AS_HELP_STRING([--disable-lustre],\n              [Disable all lustre specific features]),\n              [support_lustre=\"$enableval\"],[support_lustre=\"yes\"])\nAC_ARG_ENABLE([shook], AS_HELP_STRING([--disable-shook],\n              [Disable build of shook specific modules]),\n              [support_shook=\"$enableval\"],[support_shook=\"yes\"])\n\n# default input option is --scan\nINPUT_OPT=\"--scan\"\n\n# shook requires Lustre + FID support + shook library\n# hsm_lite requires Lustre + FID support\n# lustre_hsm requires Lustre >= 2.5\n\nif test \"x$support_lustre\" = \"xyes\" ; then\n    # Lustre location\n    AC_ARG_WITH( [lustre], AS_HELP_STRING([--with-lustre=<lustre_src_dir>],[indicate alternative location for lustre]),\n                 LDIR=\"$withval\")\n\n    if test -n \"$LDIR\"; then\n        CFLAGS=\"$CFLAGS -I$LDIR/lustre/include\"\n        LDFLAGS=\"$LDFLAGS -L$LDIR/lustre/utils\"\n    fi\n\n    AC_CHECK_LIB([lustreapi], [llapi_obd_statfs], [have_liblustre_api=\"yes\"])\n    AM_CONDITIONAL( LUSTRE, test  \"x$have_liblustre_api\" = \"xyes\" )\n    AC_SUBST(LUSTRE)\n\n    # lustre relative information and checks\n    if test \"x$have_liblustre_api\" = \"xyes\" ; then\n\n        AC_DEFINE(_LUSTRE, 1, [liblustreapi is available])\n        build_lustre=\"ON\"\n\n        AC_CHECK_HEADER([lustre/lustreapi.h], have_new_lustre_header=\"true\", have_new_lustre_header=\"false\")\n        if test \"$have_new_lustre_header\" = \"true\"; then\n            AC_DEFINE(_LUSTRE_API_HEADER, 1, [New lustreapi header])\n            # this comes with Lustre 2.4, so disable features of Lustre < 2.4 at the same time\n            AC_DEFINE(_MDT_SPECIFIC_LOVEA, 1, [MDT LOV EA is no longer the same as lov_user_md])\n        fi\n\n        AC_CHECK_HEADER([lustre/lustre_idl.h], have_lustre_idl=\"true\", have_lustre_idl=\"false\", [\n            #define LPX64 \"%#llx\"\n            #include <sys/types.h>\n            #include <asm/types.h>\n            #include <lustre/lustre_user.h>\n        ])\n        if test \"$have_lustre_idl\" = \"true\"; then\n            AC_DEFINE(_LUSTRE_IDL_HEADER, 1, [lustre_idl header exists])\n        fi\n\n        # this defines LVERSION variable\n        if test -z \"$LDIR\" ; then\n            AX_LUSTRE_VERSION\n        else\n            # get version from sources\n            AX_LUSTRE_SRC_VERSION(\"$LDIR\")\n        fi\n\n        LDEFINES=\"\"\n        if test -n \"$LVERSION\"; then\n            LDEFINES=\"--define=\\\"lversion $LVERSION\\\"\"\n            AC_DEFINE_UNQUOTED(LUSTRE_VERSION, \"$LVERSION\",  [Lustre version])\n        fi\n        if test -n \"$LPACKAGE\"; then\n            LDEFINES=\"$LDEFINES --define=\\\"lpackage $LPACKAGE\\\"\"\n        fi\n        AC_SUBST(LDEFINES)\n\n        FS_LDFLAGS=\"-llustreapi\"\n        AC_SUBST(FS_LDFLAGS)\n\n        # log functions\n        AC_CHECK_LIB([lustreapi], [llapi_msg_set_level], [have_llapi_msg_level=\"yes\"])\n        test \"x$have_llapi_msg_level\" = \"xyes\" && AC_DEFINE(HAVE_LLAPI_MSG_LEVEL, 1, [llapi_msg_set_level is available])\n\n        AC_CHECK_LIB([lustreapi], [llapi_error_callback_set],\n                     [have_llapi_error_callback_set=\"yes\"])\n        test \"x$have_llapi_error_callback_set\" = \"xyes\" &&\n            AC_DEFINE(HAVE_LLAPI_LOG_CALLBACKS, 1, [llapi log callbacks are available])\n\n        AC_CHECK_LIB([lustreapi], [llapi_get_mdt_index_by_fid],\n                     [have_llapi_get_mdt_index_by_fid=\"yes\"])\n        test \"x$have_llapi_get_mdt_index_by_fid\" = \"xyes\" &&\n            AC_DEFINE(HAVE_LLAPI_GET_MDT_INDEX_BY_FID, 1,\n                      [llapi_get_mdt_index_by_fid available])\n\n        # check if struct statfs is defined in lustre user\n        AC_CHECK_TYPE(struct obd_statfs,[have_obd_statfs=\"yes\"],[have_obd_statfs=\"no\"],[\n            #define LPX64 \"%#llx\"\n            #include <sys/types.h>\n            #include <asm/types.h>\n            #include <lustre/lustre_user.h>\n        ])\n        test \"x$have_obd_statfs\" = \"xyes\" && AC_DEFINE(HAVE_OBD_STATFS, 1, [struct obd_stafs is defined])\n\n        AC_ARG_ENABLE( [fid-support], AS_HELP_STRING([--disable-fid-support],\n                       [Don't address files by fid]),\n                       fid_support=\"$enableval\", fid_support=\"yes\" )\n\n            # only check for fid if fid support is not disabled\n        if test \"x$fid_support\" = \"xyes\" ; then\n                AC_CHECK_LIB([lustreapi], [llapi_fid2path], [have_fid=\"yes\"])\n                if test \"x$have_fid\" = \"xyes\"; then\n                    AC_DEFINE(_HAVE_FID, 1, [lustre supports fids])\n                    AC_DEFINE(_HSM_LITE, 1, [HSM lite support])\n                    build_backup=\"ON\"\n                fi\n        fi\n\n        # check if fd2fid function exists\n        AC_CHECK_LIB([lustreapi], [llapi_fd2fid], [have_fd2fid=\"yes\"])\n        test \"x$have_fd2fid\" = \"xyes\" && AC_DEFINE(HAVE_FD2FID, 1, [llapi_fd2fid function is available])\n\n        # check for DNE support\n        AC_CHECK_LIB([lustreapi], [llapi_file_fget_mdtidx], [have_dne_support=\"yes\"])\n        test \"x$have_dne_support\" = \"xyes\" && AC_DEFINE(HAVE_DNE, 1, [this version of Lustre supports DNE]) # FIXME wrong test (works with 2.1!)\n\n        AC_ARG_ENABLE( [changelogs], AS_HELP_STRING([--disable-changelogs],[Don't use ChangeLogs]),\n                   use_changelogs=$enableval, use_changelogs='yes' )\n\n    \tif test \"x$use_changelogs\" == \"xyes\" ; then\n\n    \t\t# test changelog functions\n\n\t    \tAC_CHECK_LIB([lustreapi],[llapi_changelog_start],[have_llapi_changelog_start=\"yes\"])\n\t\t    if test \"x$have_llapi_changelog_start\" = \"xyes\"; then\n\t\t        AC_DEFINE(HAVE_CHANGELOGS, 1, [Lustre changelogs records are structures])\n                AC_CHECK_DECL([CL_IOCTL],AC_DEFINE(_HAVE_CL_IOCTL,1,[CL_IOCTL is defined]),[],[\n                    #define LPX64 \"%#llx\"\n                    #include <sys/types.h>\n                    #include <asm/types.h>\n                    #include <lustre/lustre_user.h>\n                ])\n                AC_CHECK_DECL([CL_LAYOUT],AC_DEFINE(HAVE_CL_LAYOUT,1,[Layout change emit changelog records]),[],[\n                    #define LPX64 \"%#llx\"\n                    #include <sys/types.h>\n                    #include <asm/types.h>\n                    #include <lustre/lustre_user.h>\n                ])\n\n                # changelog_ext_rec was removed by commit 0f22e4,\n                # which added the flexible changelog format. That\n                # commit added CLF_RENAME, so check for that symbol.\n                AC_CHECK_DECLS([CLF_RENAME],[],[],[\n                    #define LPX64 \"%#llx\"\n                    #include <sys/types.h>\n                    #include <asm/types.h>\n                    #include <lustre/lustre_user.h>\n                ])\n                # default option for sysconfig\n                INPUT_OPT=\"--readlog\"\n            fi\n\n            if test \"x$have_llapi_changelog_start\" = \"xyes\" && \\\n               test \"x$have_fid\" != \"xyes\"; then\n                AC_MSG_ERROR([Conflicting options: fid must be enabled for enabling MDT changelogs management])\n            fi\n    \tfi\n        if test \"x$support_dv\" = \"xyes\" ; then\n            AC_CHECK_LIB([lustreapi], [llapi_get_data_version], [have_data_version=\"yes\"])\n        fi\n        test \"x$have_data_version\" = \"xyes\" && DV_CMD=\"lfs data_version\"\n\n        # llapi_swap_layouts exists since Lustre2.4 release\n        # since then, entry striping can change so scanning must update file stripe information\n        AC_CHECK_LIB([lustreapi], [llapi_fswap_layouts], [have_llapi_fswap_layouts=\"yes\"])\n        test \"x$have_llapi_fswap_layouts\" = \"xyes\" && AC_DEFINE(HAVE_LLAPI_FSWAP_LAYOUTS, 1, [llapi_fswap_layouts is available])\n\n        AX_ENABLE_FLAG( [llapi-fork-support], [Must be enabled if liblustreapi uses fork()], [-D_LLAPI_FORKS] )\n        AX_ENABLE_FLAG( [mds-stat], [ioctl() to MDC instead of POSIX stat()], [-D_MDS_STAT_SUPPORT] )\n\n        # pool functions\n        AC_CHECK_LIB([lustreapi], [llapi_get_poollist], [have_llapi_poollist=\"yes\"])\n        AC_CHECK_LIB([lustreapi], [llapi_get_poolmembers], [have_llapi_poolmembers=\"yes\"])\n        test \"x$have_llapi_poollist\" = \"xyes\" && test \"x$have_llapi_poolmembers\" = \"xyes\"  && \\\n            AC_DEFINE(HAVE_LLAPI_GETPOOL_INFO, 1, [llapi_getpool functions are available])\n\n\n        AC_CHECK_MEMBER([struct lov_user_ost_data_v1.l_object_seq], [have_obj_seq=\"yes\"],  [have_obj_seq=\"no\"], [\n                    #define LPX64 \"%#llx\"\n                    #include <sys/types.h>\n                    #include <asm/types.h>\n                    #include <lustre/lustre_user.h>\n        ])\n        test \"x$have_obj_seq\" = \"xyes\" && AC_DEFINE(HAVE_OBJ_SEQ, 1, [lov_user_ost_data_v1 has l_object_seq field])\n\n        AC_CHECK_MEMBER([struct lov_user_ost_data_v1.l_object_id], [have_obj_id=\"yes\"],  [have_obj_id=\"no\"], [\n                    #define LPX64 \"%#llx\"\n                    #include <sys/types.h>\n                    #include <asm/types.h>\n                    #include <lustre/lustre_user.h>\n        ])\n        test \"x$have_obj_id\" = \"xyes\" && AC_DEFINE(HAVE_OBJ_ID, 1, [lov_user_ost_data_v1 has l_object_id field])\n\n        # Lustre/HSM feature needs Lustre 2.5.0. As some hsm calls were already landed as empty nutshells in 2.4\n        # we rely on this new call of 2.5.0: llapi_hsm_state_get_fd().\n        AC_CHECK_LIB([lustreapi], llapi_hsm_state_get_fd, [have_lustre_hsm=\"yes\"])\n        if test \"x$have_lustre_hsm\" = \"xyes\"; then\n            AC_DEFINE([_LUSTRE_HSM], [1], [Lustre/HSM feature is present])\n            build_lhsm=\"ON\"\n        fi\n\n        if test \"x$support_shook\" = \"xyes\" ; then\n            # test if library is available for shook mode\n            AC_CHECK_LIB([shooksvr], [shook_release], [have_shook=\"yes\"])\n            # fix with dynamic module management\n            if test \"x$have_shook\" = \"xyes\"; then\n                build_shook=\"ON\"\n            fi\n            # check shook to lhsm conversion functions\n            AC_CHECK_LIB([shooksvr], [shook_lhsmify], [have_shook_lhsmify=\"yes\"])\n            test \"x$have_shook_lhsmify\" = \"xyes\" && \\\n                AC_DEFINE([HAVE_SHOOK_LHSMIFY], [1], [shook_lhsmify function available])\n        fi\n    fi\n\n    AM_CONDITIONAL(CHANGELOGS,  test  \"x$have_llapi_changelog_start\" = \"xyes\" )\n    AM_CONDITIONAL(USER_LOVEA, [test  \"x$have_new_lustre_header\" != \"xtrue\" && test \"x$have_llapi_changelog_start\" = \"xyes\"])\n    AM_CONDITIONAL(LUSTRE_HSM,  test \"x$have_lustre_hsm\" = \"xyes\")\n    AM_CONDITIONAL(SHOOK,       test  \"x$have_shook\" = \"xyes\")\n    # XXX HSM-LITE tagged sections of the code require Lustre 2.x\n    AM_CONDITIONAL(HSM_LITE,    test \"x$have_fid\" = \"xyes\")\nelse\n    AM_CONDITIONAL(LUSTRE, test 0 = 1 )\n    AM_CONDITIONAL(CHANGELOGS, test 0 = 1 )\n    AM_CONDITIONAL(USER_LOVEA, test  0 = 1 )\n    AM_CONDITIONAL(LUSTRE_HSM,     test 0 = 1)\n    AM_CONDITIONAL(SHOOK,       test  0 = 1)\n    AM_CONDITIONAL(HSM_LITE,    test 0 = 1)\nfi # end of Lustre support\n\nAC_SUBST(DV_CMD)\nAC_SUBST(INPUT_OPT)\n\nAC_CHECK_HEADERS([sys/xattr.h],[],[AC_MSG_ERROR([glibc-devel is not installed.])])\n\nAC_CHECK_LIB([pthread], [pthread_getsequence_np], [have_pthread_getsequence_np=\"yes\"],[have_pthread_getsequence_np=\"no\"])\n    test \"x$have_pthread_getsequence_np\" = \"xyes\" && AC_DEFINE(HAVE_PTHREAD_GETSEQUENCE_NP, 1, [pthread_getsequence_np function exists])\n\nAC_ARG_ENABLE([jemalloc], AS_HELP_STRING([--disable-jemalloc],\n              [Use standard memory allocator instead of jemalloc]),\n              [use_jemalloc=\"$enableval\"],[use_jemalloc=\"yes\"])\nAM_CONDITIONAL(USE_JEMALLOC, test \"x$use_jemalloc\" == \"xyes\" )\n\nif test \"x$use_jemalloc\" = \"xyes\" ; then\n    AC_CHECK_LIB([jemalloc], [malloc_stats_print], LDFLAGS=\"$LDFLAGS -ljemalloc\", \\\n        AC_MSG_ERROR([jemalloc library not found (needs jemalloc and jemalloc-devel)]))\nfi\n\nAC_SUBST(PURPOSE_CFLAGS)\nAC_SUBST(PURPOSE_LDFLAGS)\n\nCFLAGS=\"$CFLAGS -I\\$(top_srcdir)/src/include\"\n\n# Db ?\n#AC_ARG_WITH( [db], AS_HELP_STRING([--with-db=MYSQL|SQLITE (default=MYSQL)],[type of database engine] ),\n#             DB=\"$withval\", DB=\"MYSQL\")\n# SQLITE support is deprecated\nDB=\"MYSQL\"\n\nAM_CONDITIONAL(USE_MYSQL_DB,    test \"$DB\" = \"MYSQL\")\nAM_CONDITIONAL(USE_SQLITE_DB,   test \"$DB\" = \"SQLITE\")\n\n# @TODO check database libraries and path\n\n# Db dependent checks and flags\ncase $DB in\n    MYSQL)\n\n        # check mysql version and mysql_config program\n        AX_MYSQL_INFO\n\n        AC_CHECK_HEADERS([mysql/mysql.h])\n        AC_CHECK_HEADER([mysql/mysql.h], HAVE_MYSQLCLNT=\"true\",\n                        AC_MSG_ERROR([MySQL client header not found (mysql/mysql.h). mysql-devel may not be installed.]))\n\n        DB_CFLAGS=\"-D_MYSQL `$MYSQL_CONFIG --include`\"\n        DB_LDFLAGS=`mysql_config --libs_r`\n\n        if test \"$MYSQL_VERSION\" -lt \"5\" ; then\n            AC_MSG_WARN([MySQL version is too old (<5), optimized accounting won't be supported.])\n        else\n            DB_CFLAGS=\"$DB_CFLAGS -D_MYSQL5\"\n        fi\n        ;;\n\n    SQLITE)\n        # check lib and headers\n        AC_CHECK_HEADER([sqlite3.h], HAVE_SQLITE_HEADER=\"true\",\n                    AC_MSG_ERROR([sqlite-devel not installed]))\n        AC_CHECK_LIB([sqlite3], [sqlite3_exec], HAVE_SQLITE_LIB=\"true\",\n                    AC_MSG_ERROR([sqlite3 library not found]))\n        DB_CFLAGS=\"-D_SQLITE\"\n        DB_LDFLAGS=\"-lsqlite3\"\n        ;;\n    *)\n        AC_MSG_ERROR([This Database is not supported yet])\n        ;;\nesac\n\nAC_SUBST(DB_CFLAGS)\nAC_SUBST(DB_LDFLAGS)\n\n# Checks for header files.\nAC_HEADER_STDC\nAC_CHECK_HEADERS([string.h sys/param.h])\n\n# Checks for typedefs, structures, and compiler characteristics.\nAC_C_CONST\nAC_TYPE_UID_T\nAC_TYPE_SIZE_T\n\n# Check if getmntent_r exists\nAC_CHECK_FUNC([getmntent_r],[getmntent_r=yes],[getmntent_r=no])\ntest \"$getmntent_r\" = \"yes\" && AC_DEFINE(HAVE_GETMNTENT_R, 1, [Reentrant version of getmntent exists])\nAM_CONDITIONAL(MNTENTCOMPAT, test \"$getmntent_r\" = \"no\" )\n\n# Check if fallocate(2) exists.\nAC_CHECK_FUNC([fallocate],[fallocate=yes],[fallocate=no])\ntest \"$fallocate\" = \"yes\" && AC_DEFINE(HAVE_FALLOCATE, 1, [File preallocation available])\n\nAS_AC_EXPAND(CONFDIR, $sysconfdir)\nif test $prefix = NONE && test \"$CONFDIR\" = \"/usr/etc\"  ; then\n    CONFDIR=\"/etc\"\nfi\nAC_MSG_NOTICE([Using config dir $CONFDIR])\nAC_DEFINE_UNQUOTED([SYSCONFDIR],\"$CONFDIR\", [Configuration directory])\nAC_SUBST(CONFDIR)\n\nAS_AC_EXPAND(SBINDIR, $sbindir)\nAC_SUBST(SBINDIR)\n\nAS_AC_EXPAND(LIBDIR, $libdir)\nAC_SUBST(LIBDIR)\n\nif test \"$ac_configure_args\" = \"\" ; then\n    ac_configure_args=\"''\"\nfi\n# for exporting to Makefile.in\nAC_SUBST(ac_configure_args)\n\nAC_OUTPUT\n\nAC_MSG_NOTICE([Summary:])\nAC_MSG_NOTICE([Lustre     support is $build_lustre])\nAC_MSG_NOTICE([Backup     support is $build_backup])\nAC_MSG_NOTICE([Lustre/HSM support is $build_lhsm])\ntest -d \"/ccc\" && AC_MSG_NOTICE([Shook      support is $build_shook])\n\nexit 0\n"
  },
  {
    "path": "doc/Makefile.am",
    "content": "\nSUBDIRS = templates\n"
  },
  {
    "path": "doc/admin_guides/MOVED_TO",
    "content": "Documentation is now managed as wiki pages on project web site.\nIt is available here:\n    https://github.com/cea-hpc/robinhood/wiki/Documentation\n\nTo edit it:\n    git clone https://github.com/cea-hpc/robinhood.wiki.git\n"
  },
  {
    "path": "doc/doxygen/doxygen.cfg.in",
    "content": "# Doxyfile 1.4.7\n\n# This file describes the settings to be used by the documentation system\n# doxygen (www.doxygen.org) for a project\n#\n# All text after a hash (#) is considered a comment and will be ignored\n# The format is:\n#       TAG = value [value, ...]\n# For lists items can also be appended using:\n#       TAG += value [value, ...]\n# Values that contain spaces should be placed between quotes (\" \")\n\n#---------------------------------------------------------------------------\n# Project related configuration options\n#---------------------------------------------------------------------------\n\n# The PROJECT_NAME tag is a single word (or a sequence of words surrounded \n# by quotes) that should identify the project.\n\nPROJECT_NAME           = @PACKAGE@\n\n# The PROJECT_NUMBER tag can be used to enter a project or revision number. \n# This could be handy for archiving the generated documentation or \n# if some version control system is used.\n\nPROJECT_NUMBER         = @VERSION@\n\n# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) \n# base path where the generated documentation will be put. \n# If a relative path is entered, it will be relative to the location \n# where doxygen was started. If left blank the current directory will be used.\n\nOUTPUT_DIRECTORY       = .\n\n# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create \n# 4096 sub-directories (in 2 levels) under the output directory of each output \n# format and will distribute the generated files over these directories. \n# Enabling this option can be useful when feeding doxygen a huge amount of \n# source files, where putting all generated files in the same directory would \n# otherwise cause performance problems for the file system.\n\nCREATE_SUBDIRS         = NO\n\n# The OUTPUT_LANGUAGE tag is used to specify the language in which all \n# documentation generated by doxygen is written. Doxygen will use this \n# information to generate all constant output in the proper language. \n# The default language is English, other supported languages are: \n# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, \n# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, \n# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, \n# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, \n# Swedish, and Ukrainian.\n\nOUTPUT_LANGUAGE        = English\n\n# This tag can be used to specify the encoding used in the generated output. \n# The encoding is not always determined by the language that is chosen, \n# but also whether or not the output is meant for Windows or non-Windows users. \n# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES \n# forces the Windows encoding (this is the default for the Windows binary), \n# whereas setting the tag to NO uses a Unix-style encoding (the default for \n# all platforms other than Windows).\n\nUSE_WINDOWS_ENCODING   = NO\n\n# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will \n# include brief member descriptions after the members that are listed in \n# the file and class documentation (similar to JavaDoc). \n# Set to NO to disable this.\n\nBRIEF_MEMBER_DESC      = YES\n\n# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend \n# the brief description of a member or function before the detailed description. \n# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the \n# brief descriptions will be completely suppressed.\n\nREPEAT_BRIEF           = YES\n\n# This tag implements a quasi-intelligent brief description abbreviator \n# that is used to form the text in various listings. Each string \n# in this list, if found as the leading text of the brief description, will be \n# stripped from the text and the result after processing the whole list, is \n# used as the annotated text. Otherwise, the brief description is used as-is. \n# If left blank, the following values are used (\"$name\" is automatically \n# replaced with the name of the entity): \"The $name class\" \"The $name widget\" \n# \"The $name file\" \"is\" \"provides\" \"specifies\" \"contains\" \n# \"represents\" \"a\" \"an\" \"the\"\n\nABBREVIATE_BRIEF       = \n\n# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then \n# Doxygen will generate a detailed section even if there is only a brief \n# description.\n\nALWAYS_DETAILED_SEC    = NO\n\n# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all \n# inherited members of a class in the documentation of that class as if those \n# members were ordinary class members. Constructors, destructors and assignment \n# operators of the base classes will not be shown.\n\nINLINE_INHERITED_MEMB  = NO\n\n# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full \n# path before files name in the file list and in the header files. If set \n# to NO the shortest path that makes the file name unique will be used.\n\nFULL_PATH_NAMES        = NO\n\n# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag \n# can be used to strip a user-defined part of the path. Stripping is \n# only done if one of the specified strings matches the left-hand part of \n# the path. The tag can be used to show relative paths in the file list. \n# If left blank the directory from which doxygen is run is used as the \n# path to strip.\n\nSTRIP_FROM_PATH        = \n\n# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of \n# the path mentioned in the documentation of a class, which tells \n# the reader which header file to include in order to use a class. \n# If left blank only the name of the header file containing the class \n# definition is used. Otherwise one should specify the include paths that \n# are normally passed to the compiler using the -I flag.\n\nSTRIP_FROM_INC_PATH    = \n\n# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter \n# (but less readable) file names. This can be useful is your file systems \n# doesn't support long names like on DOS, Mac, or CD-ROM.\n\nSHORT_NAMES            = NO\n\n# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen \n# will interpret the first line (until the first dot) of a JavaDoc-style \n# comment as the brief description. If set to NO, the JavaDoc \n# comments will behave just like the Qt-style comments (thus requiring an \n# explicit @brief command for a brief description.\n\nJAVADOC_AUTOBRIEF      = YES\n\n# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen \n# treat a multi-line C++ special comment block (i.e. a block of //! or /// \n# comments) as a brief description. This used to be the default behaviour. \n# The new default is to treat a multi-line C++ comment block as a detailed \n# description. Set this tag to YES if you prefer the old behaviour instead.\n\nMULTILINE_CPP_IS_BRIEF = NO\n\n# If the DETAILS_AT_TOP tag is set to YES then Doxygen \n# will output the detailed description near the top, like JavaDoc.\n# If set to NO, the detailed description appears after the member \n# documentation.\n\nDETAILS_AT_TOP         = NO\n\n# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented \n# member inherits the documentation from any documented member that it \n# re-implements.\n\nINHERIT_DOCS           = YES\n\n# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce \n# a new page for each member. If set to NO, the documentation of a member will \n# be part of the file/class/namespace that contains it.\n\nSEPARATE_MEMBER_PAGES  = NO\n\n# The TAB_SIZE tag can be used to set the number of spaces in a tab. \n# Doxygen uses this value to replace tabs by spaces in code fragments.\n\nTAB_SIZE               = 4\n\n# This tag can be used to specify a number of aliases that acts \n# as commands in the documentation. An alias has the form \"name=value\". \n# For example adding \"sideeffect=\\par Side Effects:\\n\" will allow you to \n# put the command \\sideeffect (or @sideeffect) in the documentation, which \n# will result in a user-defined paragraph with heading \"Side Effects:\". \n# You can put \\n's in the value part of an alias to insert newlines.\n\nALIASES                = \"TODO=@todo\"\n\n# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C \n# sources only. Doxygen will then generate output that is more tailored for C. \n# For instance, some of the names that are used will be different. The list \n# of all members will be omitted, etc.\n\nOPTIMIZE_OUTPUT_FOR_C  = YES\n\n# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java \n# sources only. Doxygen will then generate output that is more tailored for Java. \n# For instance, namespaces will be presented as packages, qualified scopes \n# will look different, etc.\n\nOPTIMIZE_OUTPUT_JAVA   = NO\n\n# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to \n# include (a tag file for) the STL sources as input, then you should \n# set this tag to YES in order to let doxygen match functions declarations and \n# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. \n# func(std::string) {}). This also make the inheritance and collaboration \n# diagrams that involve STL classes more complete and accurate.\n\nBUILTIN_STL_SUPPORT    = NO\n\n# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC \n# tag is set to YES, then doxygen will reuse the documentation of the first \n# member in the group (if any) for the other members of the group. By default \n# all members of a group must be documented explicitly.\n\nDISTRIBUTE_GROUP_DOC   = NO\n\n# Set the SUBGROUPING tag to YES (the default) to allow class member groups of \n# the same type (for instance a group of public functions) to be put as a \n# subgroup of that type (e.g. under the Public Functions section). Set it to \n# NO to prevent subgrouping. Alternatively, this can be done per class using \n# the \\nosubgrouping command.\n\nSUBGROUPING            = YES\n\n#---------------------------------------------------------------------------\n# Build related configuration options\n#---------------------------------------------------------------------------\n\n# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in \n# documentation are documented, even if no documentation was available. \n# Private class members and static file members will be hidden unless \n# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES\n\nEXTRACT_ALL            = YES\n\n# If the EXTRACT_PRIVATE tag is set to YES all private members of a class \n# will be included in the documentation.\n\nEXTRACT_PRIVATE        = NO \n\n# If the EXTRACT_STATIC tag is set to YES all static members of a file \n# will be included in the documentation.\n\nEXTRACT_STATIC         = YES\n\n# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) \n# defined locally in source files will be included in the documentation. \n# If set to NO only classes defined in header files are included.\n\nEXTRACT_LOCAL_CLASSES  = YES\n\n# This flag is only useful for Objective-C code. When set to YES local \n# methods, which are defined in the implementation section but not in \n# the interface are included in the documentation. \n# If set to NO (the default) only methods in the interface are included.\n\nEXTRACT_LOCAL_METHODS  = NO\n\n# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all \n# undocumented members of documented classes, files or namespaces. \n# If set to NO (the default) these members will be included in the \n# various overviews, but no documentation section is generated. \n# This option has no effect if EXTRACT_ALL is enabled.\n\nHIDE_UNDOC_MEMBERS     = NO\n\n# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all \n# undocumented classes that are normally visible in the class hierarchy. \n# If set to NO (the default) these classes will be included in the various \n# overviews. This option has no effect if EXTRACT_ALL is enabled.\n\nHIDE_UNDOC_CLASSES     = NO\n\n# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all \n# friend (class|struct|union) declarations. \n# If set to NO (the default) these declarations will be included in the \n# documentation.\n\nHIDE_FRIEND_COMPOUNDS  = NO\n\n# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any \n# documentation blocks found inside the body of a function. \n# If set to NO (the default) these blocks will be appended to the \n# function's detailed documentation block.\n\nHIDE_IN_BODY_DOCS      = NO\n\n# The INTERNAL_DOCS tag determines if documentation \n# that is typed after a \\internal command is included. If the tag is set \n# to NO (the default) then the documentation will be excluded. \n# Set it to YES to include the internal documentation.\n\nINTERNAL_DOCS          = NO\n\n# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate \n# file names in lower-case letters. If set to YES upper-case letters are also \n# allowed. This is useful if you have classes or files whose names only differ \n# in case and if your file system supports case sensitive file names. Windows \n# and Mac users are advised to set this option to NO.\n\nCASE_SENSE_NAMES       = YES\n\n# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen \n# will show members with their full class and namespace scopes in the \n# documentation. If set to YES the scope will be hidden.\n\nHIDE_SCOPE_NAMES       = NO\n\n# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen \n# will put a list of the files that are included by a file in the documentation \n# of that file.\n\nSHOW_INCLUDE_FILES     = YES\n\n# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] \n# is inserted in the documentation for inline members.\n\nINLINE_INFO            = YES\n\n# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen \n# will sort the (detailed) documentation of file and class members \n# alphabetically by member name. If set to NO the members will appear in \n# declaration order.\n\nSORT_MEMBER_DOCS       = YES\n\n# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the \n# brief documentation of file, namespace and class members alphabetically \n# by member name. If set to NO (the default) the members will appear in \n# declaration order.\n\nSORT_BRIEF_DOCS        = NO\n\n# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be \n# sorted by fully-qualified names, including namespaces. If set to \n# NO (the default), the class list will be sorted only by class name, \n# not including the namespace part. \n# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.\n# Note: This option applies only to the class list, not to the \n# alphabetical list.\n\nSORT_BY_SCOPE_NAME     = NO\n\n# The GENERATE_TODOLIST tag can be used to enable (YES) or \n# disable (NO) the todo list. This list is created by putting \\todo \n# commands in the documentation.\n\nGENERATE_TODOLIST      = YES\n\n# The GENERATE_TESTLIST tag can be used to enable (YES) or \n# disable (NO) the test list. This list is created by putting \\test \n# commands in the documentation.\n\nGENERATE_TESTLIST      = YES\n\n# The GENERATE_BUGLIST tag can be used to enable (YES) or \n# disable (NO) the bug list. This list is created by putting \\bug \n# commands in the documentation.\n\nGENERATE_BUGLIST       = YES\n\n# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or \n# disable (NO) the deprecated list. This list is created by putting \n# \\deprecated commands in the documentation.\n\nGENERATE_DEPRECATEDLIST= YES\n\n# The ENABLED_SECTIONS tag can be used to enable conditional \n# documentation sections, marked by \\if sectionname ... \\endif.\n\nENABLED_SECTIONS       = \n\n# The MAX_INITIALIZER_LINES tag determines the maximum number of lines \n# the initial value of a variable or define consists of for it to appear in \n# the documentation. If the initializer consists of more lines than specified \n# here it will be hidden. Use a value of 0 to hide initializers completely. \n# The appearance of the initializer of individual variables and defines in the \n# documentation can be controlled using \\showinitializer or \\hideinitializer \n# command in the documentation regardless of this setting.\n\nMAX_INITIALIZER_LINES  = 30\n\n# Set the SHOW_USED_FILES tag to NO to disable the list of files generated \n# at the bottom of the documentation of classes and structs. If set to YES the \n# list will mention the files that were used to generate the documentation.\n\nSHOW_USED_FILES        = YES\n\n# If the sources in your project are distributed over multiple directories \n# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy \n# in the documentation. The default is NO.\n\nSHOW_DIRECTORIES       = YES\n\n# The FILE_VERSION_FILTER tag can be used to specify a program or script that \n# doxygen should invoke to get the current version for each file (typically from the \n# version control system). Doxygen will invoke the program by executing (via \n# popen()) the command <command> <input-file>, where <command> is the value of \n# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file \n# provided by doxygen. Whatever the program writes to standard output \n# is used as the file version. See the manual for examples.\n\nFILE_VERSION_FILTER    = \n\n#---------------------------------------------------------------------------\n# configuration options related to warning and progress messages\n#---------------------------------------------------------------------------\n\n# The QUIET tag can be used to turn on/off the messages that are generated \n# by doxygen. Possible values are YES and NO. If left blank NO is used.\n\nQUIET                  = NO\n\n# The WARNINGS tag can be used to turn on/off the warning messages that are \n# generated by doxygen. Possible values are YES and NO. If left blank \n# NO is used.\n\nWARNINGS               = YES\n\n# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings \n# for undocumented members. If EXTRACT_ALL is set to YES then this flag will \n# automatically be disabled.\n\nWARN_IF_UNDOCUMENTED   = YES\n\n# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for \n# potential errors in the documentation, such as not documenting some \n# parameters in a documented function, or documenting parameters that \n# don't exist or using markup commands wrongly.\n\nWARN_IF_DOC_ERROR      = YES\n\n# This WARN_NO_PARAMDOC option can be abled to get warnings for \n# functions that are documented, but have no documentation for their parameters \n# or return value. If set to NO (the default) doxygen will only warn about \n# wrong or incomplete parameter documentation, but not about the absence of \n# documentation.\n\nWARN_NO_PARAMDOC       = NO\n\n# The WARN_FORMAT tag determines the format of the warning messages that \n# doxygen can produce. The string should contain the $file, $line, and $text \n# tags, which will be replaced by the file and line number from which the \n# warning originated and the warning text. Optionally the format may contain \n# $version, which will be replaced by the version of the file (if it could \n# be obtained via FILE_VERSION_FILTER)\n\nWARN_FORMAT            = \"$file:$line: $text\"\n\n# The WARN_LOGFILE tag can be used to specify a file to which warning \n# and error messages should be written. If left blank the output is written \n# to stderr.\n\nWARN_LOGFILE           = \n\n#---------------------------------------------------------------------------\n# configuration options related to the input files\n#---------------------------------------------------------------------------\n\n# The INPUT tag can be used to specify the files and/or directories that contain \n# documented source files. You may enter file names like \"myfile.cpp\" or \n# directories like \"/usr/src/myproject\". Separate the files or directories \n# with spaces.\n\nINPUT                  = @top_srcdir@/src/include\n\n# If the value of the INPUT tag contains directories, you can use the \n# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp \n# and *.h) to filter out the source-files in the directories. If left \n# blank the following patterns are tested: \n# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx \n# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py\n\nFILE_PATTERNS          = *.h\n\n# The RECURSIVE tag can be used to turn specify whether or not subdirectories \n# should be searched for input files as well. Possible values are YES and NO. \n# If left blank NO is used.\n\nRECURSIVE              = NO\n\n# The EXCLUDE tag can be used to specify files and/or directories that should \n# excluded from the INPUT source files. This way you can easily exclude a \n# subdirectory from a directory tree whose root is specified with the INPUT tag.\n\nEXCLUDE                = \n\n# The EXCLUDE_SYMLINKS tag can be used select whether or not files or \n# directories that are symbolic links (a Unix filesystem feature) are excluded \n# from the input.\n\nEXCLUDE_SYMLINKS       = NO\n\n# If the value of the INPUT tag contains directories, you can use the \n# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude \n# certain files from those directories. Note that the wildcards are matched \n# against the file with absolute path, so to exclude all test directories \n# for example use the pattern */test/*\n\nEXCLUDE_PATTERNS       = config.h\n\n# The EXAMPLE_PATH tag can be used to specify one or more files or \n# directories that contain example code fragments that are included (see \n# the \\include command).\n\nEXAMPLE_PATH           = \n\n# If the value of the EXAMPLE_PATH tag contains directories, you can use the \n# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp \n# and *.h) to filter out the source-files in the directories. If left \n# blank all files are included.\n\nEXAMPLE_PATTERNS       = \n\n# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be \n# searched for input files to be used with the \\include or \\dontinclude \n# commands irrespective of the value of the RECURSIVE tag. \n# Possible values are YES and NO. If left blank NO is used.\n\nEXAMPLE_RECURSIVE      = NO\n\n# The IMAGE_PATH tag can be used to specify one or more files or \n# directories that contain image that are included in the documentation (see \n# the \\image command).\n\nIMAGE_PATH             = \n\n# The INPUT_FILTER tag can be used to specify a program that doxygen should \n# invoke to filter for each input file. Doxygen will invoke the filter program \n# by executing (via popen()) the command <filter> <input-file>, where <filter> \n# is the value of the INPUT_FILTER tag, and <input-file> is the name of an \n# input file. Doxygen will then use the output that the filter program writes \n# to standard output.  If FILTER_PATTERNS is specified, this tag will be \n# ignored.\n\nINPUT_FILTER           = \n\n# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern \n# basis.  Doxygen will compare the file name with each pattern and apply the \n# filter if there is a match.  The filters are a list of the form: \n# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further \n# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER \n# is applied to all files.\n\nFILTER_PATTERNS        = \n\n# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using \n# INPUT_FILTER) will be used to filter the input files when producing source \n# files to browse (i.e. when SOURCE_BROWSER is set to YES).\n\nFILTER_SOURCE_FILES    = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to source browsing\n#---------------------------------------------------------------------------\n\n# If the SOURCE_BROWSER tag is set to YES then a list of source files will \n# be generated. Documented entities will be cross-referenced with these sources. \n# Note: To get rid of all source code in the generated output, make sure also \n# VERBATIM_HEADERS is set to NO.\n\nSOURCE_BROWSER         = NO\n\n# Setting the INLINE_SOURCES tag to YES will include the body \n# of functions and classes directly in the documentation.\n\nINLINE_SOURCES         = NO\n\n# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct \n# doxygen to hide any special comment blocks from generated source code \n# fragments. Normal C and C++ comments will always remain visible.\n\nSTRIP_CODE_COMMENTS    = YES\n\n# If the REFERENCED_BY_RELATION tag is set to YES (the default) \n# then for each documented function all documented \n# functions referencing it will be listed.\n\nREFERENCED_BY_RELATION = YES\n\n# If the REFERENCES_RELATION tag is set to YES (the default) \n# then for each documented function all documented entities \n# called/used by that function will be listed.\n\nREFERENCES_RELATION    = YES\n\n# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)\n# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from\n# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will\n# link to the source code.  Otherwise they will link to the documentstion.\n\nREFERENCES_LINK_SOURCE = YES\n\n# If the USE_HTAGS tag is set to YES then the references to source code \n# will point to the HTML generated by the htags(1) tool instead of doxygen \n# built-in source browser. The htags tool is part of GNU's global source \n# tagging system (see http://www.gnu.org/software/global/global.html). You \n# will need version 4.8.6 or higher.\n\nUSE_HTAGS              = NO\n\n# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen \n# will generate a verbatim copy of the header file for each class for \n# which an include is specified. Set to NO to disable this.\n\nVERBATIM_HEADERS       = YES\n\n#---------------------------------------------------------------------------\n# configuration options related to the alphabetical class index\n#---------------------------------------------------------------------------\n\n# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index \n# of all compounds will be generated. Enable this if the project \n# contains a lot of classes, structs, unions or interfaces.\n\nALPHABETICAL_INDEX     = NO\n\n# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then \n# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns \n# in which this list will be split (can be a number in the range [1..20])\n\nCOLS_IN_ALPHA_INDEX    = 5\n\n# In case all classes in a project start with a common prefix, all \n# classes will be put under the same header in the alphabetical index. \n# The IGNORE_PREFIX tag can be used to specify one or more prefixes that \n# should be ignored while generating the index headers.\n\nIGNORE_PREFIX          = \n\n#---------------------------------------------------------------------------\n# configuration options related to the HTML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_HTML tag is set to YES (the default) Doxygen will \n# generate HTML output.\n\nGENERATE_HTML          = YES\n\n# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. \n# If a relative path is entered the value of OUTPUT_DIRECTORY will be \n# put in front of it. If left blank `html' will be used as the default path.\n\nHTML_OUTPUT            = html\n\n# The HTML_FILE_EXTENSION tag can be used to specify the file extension for \n# each generated HTML page (for example: .htm,.php,.asp). If it is left blank \n# doxygen will generate files with .html extension.\n\nHTML_FILE_EXTENSION    = .html\n\n# The HTML_HEADER tag can be used to specify a personal HTML header for \n# each generated HTML page. If it is left blank doxygen will generate a \n# standard header.\n\nHTML_HEADER            = \n\n# The HTML_FOOTER tag can be used to specify a personal HTML footer for \n# each generated HTML page. If it is left blank doxygen will generate a \n# standard footer.\n\nHTML_FOOTER            = \n\n# The HTML_STYLESHEET tag can be used to specify a user-defined cascading \n# style sheet that is used by each HTML page. It can be used to \n# fine-tune the look of the HTML output. If the tag is left blank doxygen \n# will generate a default style sheet. Note that doxygen will try to copy \n# the style sheet file to the HTML output directory, so don't put your own \n# stylesheet in the HTML output directory as well, or it will be erased!\n\nHTML_STYLESHEET        = \n\n# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, \n# files or namespaces will be aligned in HTML using tables. If set to \n# NO a bullet list will be used.\n\nHTML_ALIGN_MEMBERS     = YES\n\n# If the GENERATE_HTMLHELP tag is set to YES, additional index files \n# will be generated that can be used as input for tools like the \n# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) \n# of the generated HTML documentation.\n\nGENERATE_HTMLHELP      = NO\n\n# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can \n# be used to specify the file name of the resulting .chm file. You \n# can add a path in front of the file if the result should not be \n# written to the html output directory.\n\nCHM_FILE               = \n\n# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can \n# be used to specify the location (absolute path including file name) of \n# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run \n# the HTML help compiler on the generated index.hhp.\n\nHHC_LOCATION           = \n\n# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag \n# controls if a separate .chi index file is generated (YES) or that \n# it should be included in the master .chm file (NO).\n\nGENERATE_CHI           = NO\n\n# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag \n# controls whether a binary table of contents is generated (YES) or a \n# normal table of contents (NO) in the .chm file.\n\nBINARY_TOC             = NO\n\n# The TOC_EXPAND flag can be set to YES to add extra items for group members \n# to the contents of the HTML help documentation and to the tree view.\n\nTOC_EXPAND             = NO\n\n# The DISABLE_INDEX tag can be used to turn on/off the condensed index at \n# top of each HTML page. The value NO (the default) enables the index and \n# the value YES disables it.\n\nDISABLE_INDEX          = NO\n\n# This tag can be used to set the number of enum values (range [1..20]) \n# that doxygen will group on one line in the generated HTML documentation.\n\nENUM_VALUES_PER_LINE   = 4\n\n# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be\n# generated containing a tree-like index structure (just like the one that \n# is generated for HTML Help). For this to work a browser that supports \n# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, \n# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are \n# probably better off using the HTML help feature.\n\nGENERATE_TREEVIEW      = NO\n\n# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be \n# used to set the initial width (in pixels) of the frame in which the tree \n# is shown.\n\nTREEVIEW_WIDTH         = 250\n\n#---------------------------------------------------------------------------\n# configuration options related to the LaTeX output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will \n# generate Latex output.\n\nGENERATE_LATEX         = NO\n\n# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. \n# If a relative path is entered the value of OUTPUT_DIRECTORY will be \n# put in front of it. If left blank `latex' will be used as the default path.\n\nLATEX_OUTPUT           = latex\n\n# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be \n# invoked. If left blank `latex' will be used as the default command name.\n\nLATEX_CMD_NAME         = latex\n\n# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to \n# generate index for LaTeX. If left blank `makeindex' will be used as the \n# default command name.\n\nMAKEINDEX_CMD_NAME     = makeindex\n\n# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact \n# LaTeX documents. This may be useful for small projects and may help to \n# save some trees in general.\n\nCOMPACT_LATEX          = NO\n\n# The PAPER_TYPE tag can be used to set the paper type that is used \n# by the printer. Possible values are: a4, a4wide, letter, legal and \n# executive. If left blank a4wide will be used.\n\nPAPER_TYPE             = a4wide\n\n# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX \n# packages that should be included in the LaTeX output.\n\nEXTRA_PACKAGES         = \n\n# The LATEX_HEADER tag can be used to specify a personal LaTeX header for \n# the generated latex document. The header should contain everything until \n# the first chapter. If it is left blank doxygen will generate a \n# standard header. Notice: only use this tag if you know what you are doing!\n\nLATEX_HEADER           = \n\n# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated \n# is prepared for conversion to pdf (using ps2pdf). The pdf file will \n# contain links (just like the HTML output) instead of page references \n# This makes the output suitable for online browsing using a pdf viewer.\n\nPDF_HYPERLINKS         = NO\n\n# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of \n# plain latex in the generated Makefile. Set this option to YES to get a \n# higher quality PDF documentation.\n\nUSE_PDFLATEX           = NO\n\n# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\\\batchmode. \n# command to the generated LaTeX files. This will instruct LaTeX to keep \n# running if errors occur, instead of asking the user for help. \n# This option is also used when generating formulas in HTML.\n\nLATEX_BATCHMODE        = NO\n\n# If LATEX_HIDE_INDICES is set to YES then doxygen will not \n# include the index chapters (such as File Index, Compound Index, etc.) \n# in the output.\n\nLATEX_HIDE_INDICES     = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the RTF output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output \n# The RTF output is optimized for Word 97 and may not look very pretty with \n# other RTF readers or editors.\n\nGENERATE_RTF           = NO\n\n# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. \n# If a relative path is entered the value of OUTPUT_DIRECTORY will be \n# put in front of it. If left blank `rtf' will be used as the default path.\n\nRTF_OUTPUT             = rtf\n\n# If the COMPACT_RTF tag is set to YES Doxygen generates more compact \n# RTF documents. This may be useful for small projects and may help to \n# save some trees in general.\n\nCOMPACT_RTF            = NO\n\n# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated \n# will contain hyperlink fields. The RTF file will \n# contain links (just like the HTML output) instead of page references. \n# This makes the output suitable for online browsing using WORD or other \n# programs which support those fields. \n# Note: wordpad (write) and others do not support links.\n\nRTF_HYPERLINKS         = NO\n\n# Load stylesheet definitions from file. Syntax is similar to doxygen's \n# config file, i.e. a series of assignments. You only have to provide \n# replacements, missing definitions are set to their default value.\n\nRTF_STYLESHEET_FILE    = \n\n# Set optional variables used in the generation of an rtf document. \n# Syntax is similar to doxygen's config file.\n\nRTF_EXTENSIONS_FILE    = \n\n#---------------------------------------------------------------------------\n# configuration options related to the man page output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_MAN tag is set to YES (the default) Doxygen will \n# generate man pages\n\nGENERATE_MAN           = NO\n\n# The MAN_OUTPUT tag is used to specify where the man pages will be put. \n# If a relative path is entered the value of OUTPUT_DIRECTORY will be \n# put in front of it. If left blank `man' will be used as the default path.\n\nMAN_OUTPUT             = man\n\n# The MAN_EXTENSION tag determines the extension that is added to \n# the generated man pages (default is the subroutine's section .3)\n\nMAN_EXTENSION          = .3\n\n# If the MAN_LINKS tag is set to YES and Doxygen generates man output, \n# then it will generate one additional man file for each entity \n# documented in the real man page(s). These additional files \n# only source the real man page, but without them the man command \n# would be unable to find the correct page. The default is NO.\n\nMAN_LINKS              = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the XML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_XML tag is set to YES Doxygen will \n# generate an XML file that captures the structure of \n# the code including all documentation.\n\nGENERATE_XML           = NO\n\n# The XML_OUTPUT tag is used to specify where the XML pages will be put. \n# If a relative path is entered the value of OUTPUT_DIRECTORY will be \n# put in front of it. If left blank `xml' will be used as the default path.\n\nXML_OUTPUT             = xml\n\n# The XML_SCHEMA tag can be used to specify an XML schema, \n# which can be used by a validating XML parser to check the \n# syntax of the XML files.\n\nXML_SCHEMA             = \n\n# The XML_DTD tag can be used to specify an XML DTD, \n# which can be used by a validating XML parser to check the \n# syntax of the XML files.\n\nXML_DTD                = \n\n# If the XML_PROGRAMLISTING tag is set to YES Doxygen will \n# dump the program listings (including syntax highlighting \n# and cross-referencing information) to the XML output. Note that \n# enabling this will significantly increase the size of the XML output.\n\nXML_PROGRAMLISTING     = YES\n\n#---------------------------------------------------------------------------\n# configuration options for the AutoGen Definitions output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will \n# generate an AutoGen Definitions (see autogen.sf.net) file \n# that captures the structure of the code including all \n# documentation. Note that this feature is still experimental \n# and incomplete at the moment.\n\nGENERATE_AUTOGEN_DEF   = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the Perl module output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_PERLMOD tag is set to YES Doxygen will \n# generate a Perl module file that captures the structure of \n# the code including all documentation. Note that this \n# feature is still experimental and incomplete at the \n# moment.\n\nGENERATE_PERLMOD       = NO\n\n# If the PERLMOD_LATEX tag is set to YES Doxygen will generate \n# the necessary Makefile rules, Perl scripts and LaTeX code to be able \n# to generate PDF and DVI output from the Perl module output.\n\nPERLMOD_LATEX          = NO\n\n# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be \n# nicely formatted so it can be parsed by a human reader.  This is useful \n# if you want to understand what is going on.  On the other hand, if this \n# tag is set to NO the size of the Perl module output will be much smaller \n# and Perl will parse it just the same.\n\nPERLMOD_PRETTY         = YES\n\n# The names of the make variables in the generated doxyrules.make file \n# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. \n# This is useful so different doxyrules.make files included by the same \n# Makefile don't overwrite each other's variables.\n\nPERLMOD_MAKEVAR_PREFIX = \n\n#---------------------------------------------------------------------------\n# Configuration options related to the preprocessor   \n#---------------------------------------------------------------------------\n\n# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will \n# evaluate all C-preprocessor directives found in the sources and include \n# files.\n\nENABLE_PREPROCESSING   = NO \n\n# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro \n# names in the source code. If set to NO (the default) only conditional \n# compilation will be performed. Macro expansion can be done in a controlled \n# way by setting EXPAND_ONLY_PREDEF to YES.\n\nMACRO_EXPANSION        = NO\n\n# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES \n# then the macro expansion is limited to the macros specified with the \n# PREDEFINED and EXPAND_AS_DEFINED tags.\n\nEXPAND_ONLY_PREDEF     = NO\n\n# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files \n# in the INCLUDE_PATH (see below) will be search if a #include is found.\n\nSEARCH_INCLUDES        = NO \n\n# The INCLUDE_PATH tag can be used to specify one or more directories that \n# contain include files that are not input files but should be processed by \n# the preprocessor.\n\nINCLUDE_PATH           = \n\n# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard \n# patterns (like *.h and *.hpp) to filter out the header-files in the \n# directories. If left blank, the patterns specified with FILE_PATTERNS will \n# be used.\n\nINCLUDE_FILE_PATTERNS  = \n\n# The PREDEFINED tag can be used to specify one or more macro names that \n# are defined before the preprocessor is started (similar to the -D option of \n# gcc). The argument of the tag is a list of macros of the form: name \n# or name=definition (no spaces). If the definition and the = are \n# omitted =1 is assumed. To prevent a macro definition from being \n# undefined via #undef or recursively expanded use the := operator \n# instead of the = operator.\n\nPREDEFINED             = \n\n# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then \n# this tag can be used to specify a list of macro names that should be expanded. \n# The macro definition that is found in the sources will be used. \n# Use the PREDEFINED tag if you want to use a different macro definition.\n\nEXPAND_AS_DEFINED      = \n\n# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then \n# doxygen's preprocessor will remove all function-like macros that are alone \n# on a line, have an all uppercase name, and do not end with a semicolon. Such \n# function macros are typically used for boiler-plate code, and will confuse \n# the parser if not removed.\n\nSKIP_FUNCTION_MACROS   = YES\n\n#---------------------------------------------------------------------------\n# Configuration::additions related to external references   \n#---------------------------------------------------------------------------\n\n# The TAGFILES option can be used to specify one or more tagfiles. \n# Optionally an initial location of the external documentation \n# can be added for each tagfile. The format of a tag file without \n# this location is as follows: \n#   TAGFILES = file1 file2 ... \n# Adding location for the tag files is done as follows: \n#   TAGFILES = file1=loc1 \"file2 = loc2\" ... \n# where \"loc1\" and \"loc2\" can be relative or absolute paths or \n# URLs. If a location is present for each tag, the installdox tool \n# does not have to be run to correct the links.\n# Note that each tag file must have a unique name\n# (where the name does NOT include the path)\n# If a tag file is not located in the directory in which doxygen \n# is run, you must also specify the path to the tagfile here.\n\nTAGFILES               = \n\n# When a file name is specified after GENERATE_TAGFILE, doxygen will create \n# a tag file that is based on the input files it reads.\n\nGENERATE_TAGFILE       = \n\n# If the ALLEXTERNALS tag is set to YES all external classes will be listed \n# in the class index. If set to NO only the inherited external classes \n# will be listed.\n\nALLEXTERNALS           = NO\n\n# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed \n# in the modules index. If set to NO, only the current project's groups will \n# be listed.\n\nEXTERNAL_GROUPS        = YES\n\n# The PERL_PATH should be the absolute path and name of the perl script \n# interpreter (i.e. the result of `which perl').\n\nPERL_PATH              = /usr/bin/perl\n\n#---------------------------------------------------------------------------\n# Configuration options related to the dot tool   \n#---------------------------------------------------------------------------\n\n# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will \n# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base \n# or super classes. Setting the tag to NO turns the diagrams off. Note that \n# this option is superseded by the HAVE_DOT option below. This is only a \n# fallback. It is recommended to install and use dot, since it yields more \n# powerful graphs.\n\nCLASS_DIAGRAMS         = YES\n\n# If set to YES, the inheritance and collaboration graphs will hide \n# inheritance and usage relations if the target is undocumented \n# or is not a class.\n\nHIDE_UNDOC_RELATIONS   = YES\n\n# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is \n# available from the path. This tool is part of Graphviz, a graph visualization \n# toolkit from AT&T and Lucent Bell Labs. The other options in this section \n# have no effect if this option is set to NO (the default)\n\nHAVE_DOT               = NO\n\n# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen \n# will generate a graph for each documented class showing the direct and \n# indirect inheritance relations. Setting this tag to YES will force the \n# the CLASS_DIAGRAMS tag to NO.\n\nCLASS_GRAPH            = YES\n\n# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen \n# will generate a graph for each documented class showing the direct and \n# indirect implementation dependencies (inheritance, containment, and \n# class references variables) of the class with other documented classes.\n\nCOLLABORATION_GRAPH    = YES\n\n# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen \n# will generate a graph for groups, showing the direct groups dependencies\n\nGROUP_GRAPHS           = YES\n\n# If the UML_LOOK tag is set to YES doxygen will generate inheritance and \n# collaboration diagrams in a style similar to the OMG's Unified Modeling \n# Language.\n\nUML_LOOK               = NO\n\n# If set to YES, the inheritance and collaboration graphs will show the \n# relations between templates and their instances.\n\nTEMPLATE_RELATIONS     = NO\n\n# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT \n# tags are set to YES then doxygen will generate a graph for each documented \n# file showing the direct and indirect include dependencies of the file with \n# other documented files.\n\nINCLUDE_GRAPH          = YES\n\n# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and \n# HAVE_DOT tags are set to YES then doxygen will generate a graph for each \n# documented header file showing the documented files that directly or \n# indirectly include this file.\n\nINCLUDED_BY_GRAPH      = YES\n\n# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will \n# generate a call dependency graph for every global function or class method. \n# Note that enabling this option will significantly increase the time of a run. \n# So in most cases it will be better to enable call graphs for selected \n# functions only using the \\callgraph command.\n\nCALL_GRAPH             = NO\n\n# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will \n# generate a caller dependency graph for every global function or class method. \n# Note that enabling this option will significantly increase the time of a run. \n# So in most cases it will be better to enable caller graphs for selected \n# functions only using the \\callergraph command.\n\nCALLER_GRAPH           = NO\n\n# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen \n# will graphical hierarchy of all classes instead of a textual one.\n\nGRAPHICAL_HIERARCHY    = YES\n\n# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES \n# then doxygen will show the dependencies a directory has on other directories \n# in a graphical way. The dependency relations are determined by the #include\n# relations between the files in the directories.\n\nDIRECTORY_GRAPH        = YES\n\n# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images \n# generated by dot. Possible values are png, jpg, or gif\n# If left blank png will be used.\n\nDOT_IMAGE_FORMAT       = png\n\n# The tag DOT_PATH can be used to specify the path where the dot tool can be \n# found. If left blank, it is assumed the dot tool can be found in the path.\n\nDOT_PATH               = \n\n# The DOTFILE_DIRS tag can be used to specify one or more directories that \n# contain dot files that are included in the documentation (see the \n# \\dotfile command).\n\nDOTFILE_DIRS           = \n\n# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width \n# (in pixels) of the graphs generated by dot. If a graph becomes larger than \n# this value, doxygen will try to truncate the graph, so that it fits within \n# the specified constraint. Beware that most browsers cannot cope with very \n# large images.\n\nMAX_DOT_GRAPH_WIDTH    = 1024\n\n# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height \n# (in pixels) of the graphs generated by dot. If a graph becomes larger than \n# this value, doxygen will try to truncate the graph, so that it fits within \n# the specified constraint. Beware that most browsers cannot cope with very \n# large images.\n\nMAX_DOT_GRAPH_HEIGHT   = 1024\n\n# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the \n# graphs generated by dot. A depth value of 3 means that only nodes reachable \n# from the root by following a path via at most 3 edges will be shown. Nodes \n# that lay further from the root node will be omitted. Note that setting this \n# option to 1 or 2 may greatly reduce the computation time needed for large \n# code bases. Also note that a graph may be further truncated if the graph's \n# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH \n# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), \n# the graph is not depth-constrained.\n\nMAX_DOT_GRAPH_DEPTH    = 0\n\n# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent \n# background. This is disabled by default, which results in a white background. \n# Warning: Depending on the platform used, enabling this option may lead to \n# badly anti-aliased labels on the edges of a graph (i.e. they become hard to \n# read).\n\nDOT_TRANSPARENT        = NO\n\n# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output \n# files in one run (i.e. multiple -o and -T options on the command line). This \n# makes dot run faster, but since only newer versions of dot (>1.8.10) \n# support this, this feature is disabled by default.\n\nDOT_MULTI_TARGETS      = NO\n\n# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will \n# generate a legend page explaining the meaning of the various boxes and \n# arrows in the dot generated graphs.\n\nGENERATE_LEGEND        = YES\n\n# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will \n# remove the intermediate dot files that are used to generate \n# the various graphs.\n\nDOT_CLEANUP            = YES\n\n#---------------------------------------------------------------------------\n# Configuration::additions related to the search engine   \n#---------------------------------------------------------------------------\n\n# The SEARCHENGINE tag specifies whether or not a search engine should be \n# used. If set to NO the values of all tags below this one will be ignored.\n\nSEARCHENGINE           = NO\n"
  },
  {
    "path": "doc/templates/Makefile.am",
    "content": "\nconfigdir = @CONFDIR@/robinhood.d\ntemplatesdir = $(configdir)/templates\nincludesdir = $(configdir)/includes\n\ndist_templates_DATA = basic.conf example_alerts.conf example_checksum.conf \\\n\t\t      example_cleanup.conf example_lhsm.conf example_modeguard.conf \\\n\t\t      example_rmdir.conf example_shook.conf\n\ndist_includes_DATA = includes/alerts.inc includes/backup.inc includes/check.inc \\\n\t\t     includes/lhsm.inc includes/modeguard.inc includes/rmdir.inc \\\n\t\t     includes/rmdir_old.inc includes/shook.inc includes/tmpfs.inc\n"
  },
  {
    "path": "doc/templates/basic.conf",
    "content": "General {\n    fs_path = \"/path/to/fs\";\n    # filesystem type, as displayed by 'mount' (e.g. ext4, xfs, lustre, ...)\n    fs_type = xfs;\n}\n\nLog {\n    log_file = \"/var/log/robinhood.log\";\n    report_file = \"/var/log/robinhood_actions.log\";\n    alert_file = \"/var/log/robinhood_alerts.log\";\n}\n\nListManager {\n    MySQL {\n        server = localhost;\n        db = rbh_test;\n        user = robinhood;\n        password_file = /etc/robinhood.d/.dbpassword;\n    }\n}\n\n# Lustre 2.x only\nChangeLog {\n    MDT {\n        mdt_name = \"MDT0000\";\n        reader_id = \"cl1\";\n    }\n}\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n"
  },
  {
    "path": "doc/templates/example_alerts.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template for alerts\n%include \"includes/alerts.inc\"\n\n#### fileclass definitions ####\n\nFileClass small_files {\n    definition { type == file and size > 0 and size <= 16MB }\n    # report = yes (default)\n}\nFileClass std_files {\n    definition { type == file and size > 16MB and size <= 1GB }\n}\nFileClass big_files {\n    definition { type == file and size > 1GB }\n}\n\nFileClass largedir {\n    definition { type == directory and dircount > 10000 }\n}\n\nFileClass f1 {\n    definition { type == file and name == \"file.1\" }\n}\n\nFileClass f2 {\n    definition { type == file and name == \"file.2\" }\n}\n\n### Alerts specification \nalert_rules {\n    # don't check entries more frequently than daily\n    ignore { last_check < 1d }\n    # don't check entries while they are modified\n    ignore { last_mod < 1h }\n\n    rule raise_alert {\n        ## List all fileclasses that would raise alerts HERE:\n        target_fileclass = f1;\n        target_fileclass = f2;\n        target_fileclass = largedir;\n\n        # customize alert title:\n        action_params { title = \"entry matches '{fileclass}' ({rule})\"; }\n\n        # apply to all matching fileclasses in the policy scope\n        condition = true;\n    }\n\n    # clear alert status\n    rule default {\n        action = none;\n        action_params { alert = clear; }\n        # apply to all entries that don't match 'raise_alert'\n        condition = true;\n    }\n}\n\n# trigger alert check hourly\nalert_trigger {\n    trigger_on = periodic;\n    check_interval = 1h;\n}\n\n########### end of policy rules ############\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_checksum.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template for checksuming\n%include \"includes/check.inc\"\n\n########### checksum rules ############\n\nfileclass never_checked {\n    # never checked or no successful check\n    definition { checksum.last_success == 0 }\n    # don't display this fileclass in --classinfo reports.\n    report = no;\n}\n\nchecksum_parameters {\n    # max number of checksum computed in parallel\n    nb_threads = 4;\n\n    # limit checksuming throughput\n    schedulers = common.rate_limit;\n    rate_limit {\n        # max count per period\n        #max_count = 1000;\n        # max size per period: 1GB/s\n        max_size = 10GB;\n        # period, in milliseconds: 10s\n        period_ms = 10000;\n    }\n\n}\n\nchecksum_rules {\n    ignore { last_check < 7d }\n    ignore { last_mod < 1d }\n\n    rule initial_check {\n        target_fileclass = never_checked;\n        condition { last_mod > 1d }\n    }\n\n    rule default {\n       condition { last_mod > 1d and last_check > 7d }\n    }\n}\n\n# start checksum hourly\nchecksum_trigger {\n    trigger_on = periodic;\n    check_interval = 1h;\n}\n\n########### end of policy rules ############\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_cleanup.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template policy definitions for legacy TMPFS flavor\n%include \"includes/tmpfs.inc\"\n\n#### fileclass definitions ####\n\ncleanup_parameters {\n    #action_params {\n    #     # set to true if entries should be removed from DB by\n    #     # changelog or Garbage collection\n    #     invalidate_dbentry = false; # default behaviour\n    #}\n}\n\nFileClass even_files {\n    definition { type == file and name == \"*[02468]\" }\n    # only for policy matching, not to display in reports\n    report = no;\n}\n\nFileClass odd_files {\n    definition { type == file and name == \"*[13579]\" }\n    report = no;\n}\n\n# fileclasses to display in reports (can still be used in policies)\nFileClass empty_files {\n    definition { type == file and size == 0 }\n    # report = yes (default)\n}\nFileClass small_files {\n    definition { type == file and size > 0 and size <= 16MB }\n    # report = yes (default)\n}\nFileClass std_files {\n    definition { type == file and size > 16MB and size <= 1GB }\n}\nFileClass big_files {\n    definition { type == file and size > 1GB }\n}\n\nFileClass largedir {\n    definition { type == directory and dircount > 10000 }\n}\n\nFileClass f1 {\n    definition { type == file and name == \"file.1\" }\n}\n\nFileClass f2 {\n    definition { type == file and name == \"file.2\" }\n}\n\n\n#### Deleting old unused files #######\n\ncleanup_rules {\n\n    ignore { last_mod < 1d }\n    ignore_fileclass = empty_files;\n\n    rule clean_f {\n        target_fileclass = f1;\n        target_fileclass = f2;\n\n        condition { last_access > 100d }\n    }\n\n\n    # rule for other entries\n    rule default {\n        condition { last_access > 30d }\n    }\n}\n\n# clean when inode count > 100M\ncleanup_trigger {\n    trigger_on = global_usage;\n    high_threshold_cnt = 100M;\n    low_threshold_cnt  = 100M;\n    check_interval     = 5min;\n}\n\n########### end of policy rules ############\n\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_lhsm.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template policy definitions for Lustre/HSM\n%include \"includes/lhsm.inc\"\n\n#### fileclass definitions ####\n\nFileClass even_files {\n    definition { type == file and name == \"*[02468]\" }\n    # only for policy matching, not to display in reports\n    report = no;\n}\n\nFileClass odd_files {\n    definition { type == file and name == \"*[13579]\" }\n    lhsm_archive_action_params { archive_id = 2; }\n    report = no;\n}\n\n# fileclasses to display in reports (can still be used in policies)\nFileClass empty_files {\n    definition { type == file and size == 0 }\n    # report = yes (default)\n}\nFileClass small_files {\n    definition { type == file and size > 0 and size <= 16MB }\n    # report = yes (default)\n}\nFileClass std_files {\n    definition { type == file and size > 16MB and size <= 1GB }\n}\nFileClass big_files {\n    definition { type == file and size > 1GB }\n}\n\nFileClass largedir {\n    definition { type == directory and dircount > 10000 }\n}\n\nFileClass f1 {\n    definition { type == file and name == \"file.1\" }\n}\n\nFileClass f2 {\n    definition { type == file and name == \"file.2\" }\n}\n\n#### Common Lustre/HSM parameters ####\n\nlhsm_config {\n    # used for 'undelete': command to change the fid of an entry in archive\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n#### Lustre/HSM archive configuration ####\n\n\nlhsm_archive_parameters {\n    nb_threads = 1;\n\n    # limit archive rate to avoid flooding the MDT coordinator\n    schedulers = common.rate_limit;\n    rate_limit {\n        # max count per period\n        max_count = 1000;\n        # max size per period: 1GB/s\n        #max_size = 10GB;\n        # period, in milliseconds: 10s\n        period_ms = 10000;\n    }\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct = 50%;\n    suspend_error_min= 100;\n\n    # overrides policy default action\n    action = cmd(\"lfs hsm_archive --archive {archive_id} /mnt/lustre/.lustre/fid/{fid}\");\n\n    # default action parameters\n    action_params {\n        archive_id = 1;\n    }\n}\n\nlhsm_archive_rules {\n    ignore_fileclass = empty_files;\n\n    rule archive_small {\n        target_fileclass = small_files;\n        condition { last_mod >= 30min }\n\n        # overrides policy action\n        action = cmd(\"lfs hsm_archive {fullpath}\");\n        action_params { archive_id = 2; }\n    }\n\n    rule archive_std {\n        target_fileclass = std_files;\n        target_fileclass = big_files;\n        action_params { archive_id = 1; }\n        condition { last_mod >= 30min }\n    }\n\n    # fallback rule\n    rule default {\n        action_params { archive_id = 3; }\n        condition { last_mod >= 30min }\n    }\n}\n\n# run every 5 min\nlhsm_archive_trigger {\n    trigger_on = periodic;\n    check_interval = 5min;\n}\n\n#### Lustre/HSM release configuration ####\n\nlhsm_release_rules {\n    ignore_fileclass = empty_files;\n\n    # keep small files on disk as long as possible\n    rule release_small {\n        target_fileclass = small_files;\n        condition { last_access > 1y }\n    }\n\n    rule release_std {\n        target_fileclass = std_files;\n        target_fileclass = big_files;\n        condition { last_access > 1d }\n    }\n\n    # fallback rule\n    rule default {\n        condition { last_access > 6h }\n    }\n}\n\n# run 'lhsm_release' on full OSTs\nlhsm_release_trigger {\n    trigger_on = ost_usage;\n    high_threshold_pct = 85%;\n    low_threshold_pct  = 80%;\n    check_interval     = 5min;\n}\n\nlhsm_release_parameters {\n    nb_threads = 4;\n## purge 1000 files max at once\n#    max_action_count = 1000;\n#    max_action_volume = 1TB;\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct = 50%;\n    suspend_error_min= 100;\n}\n\n#### Lustre/HSM remove configuration ####\nlhsm_remove_rules\n{\n    # cleanup backend files after 30d\n    rule default {\n        condition { rm_time >= 30d }\n    }\n}\n\n# run daily\nlhsm_remove_trigger\n{\n    trigger_on = periodic;\n    check_interval = 1d;\n}\n\n########### end of policy rules ############\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_modeguard.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template for modeguard\n%include \"includes/modeguard.inc\"\n\n# Make modeguard enforce setgid bit on directories.\n# Directory setgid is inherited from parent but a user or some copy\n# tool might remove it. This is just an example, you can easily\n# modify the set/clear masks below using the octal notation.\n#\nmodeguard_config {\n        set_mask = \"2000\";\n#        clear_mask = \"0002\";\n}\n\n#### fileclass definitions ####\n#\n# fileclasses to display in reports (can still be used in policies)\nFileClass empty_files {\n    definition { type == file and size == 0 }\n    # report = yes (default)\n}\nFileClass small_files {\n    definition { type == file and size > 0 and size <= 16MB }\n    # report = yes (default)\n}\nFileClass std_files {\n    definition { type == file and size > 16MB and size <= 1GB }\n}\nFileClass big_files {\n    definition { type == file and size > 1GB }\n}\n\nFileClass f1 {\n    definition { type == file and name == \"file.1\" }\n}\n\nFileClass f2 {\n    definition { type == file and name == \"file.2\" }\n}\n\n############# modeguard rules ############\n\nmodeguard_rules {\n    ignore_fileclass = f1;\n    \n    rule default {\n        condition { modeguard.status != ok }\n    }\n}\n\n# Run often, using Lustre changelogs is recommended.\nmodeguard_trigger {\n    trigger_on = scheduled;\n    check_interval = 1h;\n}\n\n########### end of policy rules ############\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_rmdir.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n#### policy definitions ####\n\n# include template policy definitions for removing directories\n%include \"includes/rmdir.inc\"\n\n#### fileclasses definition ####\n\nfileclass empty_dir {\n    definition { type == directory and dircount == 0 }\n}\n\nfileclass batch_dir {\n    definition { type == directory\n                 and path == \"/mnt/lustre/jobs/*/batch.*\" }\n}\n\nfileclass tmp_dirs {\n    definition { type == directory\n                 and name == \"tmp.*\" }\n}\n\nfileclass log_dirs {\n    definition { type == directory\n                 and path == \"/mnt/lustre/system/logs/node*\" }\n}\n  \n\n############# rmdir rules ############\n\nrmdir_parameters {\n    lru_sort_attr = none;\n}\n\nrmdir_trigger {\n    trigger_on = periodic;\n    check_interval = 1h;\n}\n\nrmdir_rules {\n    # preserve root directories\n    ignore { depth < 4 }\n\n    # remove empty directories after 30d\n    rule rmdir_empty {\n        target_fileclass = empty_dir;\n        action = common.rmdir;\n\n        condition { last_mod > 30d }\n    }\n\n    # remove some directories recursively after 1d\n    rule rmdir_recurse1d {\n        target_fileclass = batch_dir;\n        target_fileclass = tmp_dirs;\n        action = cmd(\"rm -rf {fullpath}\");\n\n        condition { last_mod > 1d }\n    }\n\n    # remove some other directories after 30d\n    rule rmdir_recurse30d {\n        target_fileclass = log_dirs;\n        action = cmd(\"rm -rf {fullpath}\");\n\n        condition { last_mod > 30d }\n    }\n}\n\n########### end of policy rules ############\n\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog {\n    # 1 MDT block for each MDT :\n    MDT {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/lustre.log\";\n\n    # File for reporting purge events\n    report_file = \"/var/log/robinhood/lustre_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/lustre_alerts.log\";\n    alert_show_attrs = yes;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/example_shook.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"includes/shook.inc\"\n\n######## simple migration policy ##########\nshook_archive_parameters {\n\taction = cmd(\"/usr/sbin/rbhext_tool ARCHIVE /mnt/lustre/.lustre/fid/{fid} {targetpath}\");\n}\n\nshook_archive_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition { last_mod > 6h }\n    }\n}\n\nshook_release_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition { last_mod > 6h }\n    }\n}\n\n\n# purge based on OST_levels\nshook_release_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 92% ;\n    low_threshold_pct  = 90% ;\n    check_interval = 5min;\n}\n\n\n##### basic HSM remove policy ######\n\nshook_remove_rules\n{\n    # cleanup backend files after 30d\n    rule default {\n    \tcondition { rm_time >= 30d }\n    }\n}\n\n# run daily\nshook_remove_trigger\n{\n    trigger_on = periodic;\n    check_interval = 1d;\n}\n\n##### general Filesystem info ####\nGeneral\n{\n\tfs_path = /mnt/lustre;\n}\n\n#### Backend configuration ####\nshook_config\n{\n    root     = \"/tmp/backend\";\n    check_mounted = false;\n    mnt_type = xfs;\n\n    # set newly created file to the right status\n    recovery_action = shook.recover;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    dump_file = \"/tmp/changelogs.log\";\n    queue_max_size   = 1;\n    queue_max_age    = 1;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = \"/var/log/robinhood/rbh.log\";\n\n    # File for reporting migration events\n    report_file = \"/var/log/robinhood/rbh_actions.log\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/var/log/robinhood/rbh_alerts.log\";\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_shook\";\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        #password_file = \"/etc/robinhood.d/backup/.dbpassword\" ;\n\t}\n}\n"
  },
  {
    "path": "doc/templates/includes/alerts.inc",
    "content": "# new implementation of alerts as policies in robinhood v3\n\ndefine_policy alert {\n    status_manager = alerter;\n    scope = all;\n    default_lru_sort_attr = last_check; # oldest check first, 0==unchecked\n    default_action = alerter.alert;\n}\n\nalert_parameters {\n    action_params { alert = raise; }\n    report_actions = no;\n    recheck_ignored_entries = no;\n}\n\nalert_trigger {\n    trigger_on = scheduled;\n    check_interval = 6h;\n}\n"
  },
  {
    "path": "doc/templates/includes/backup.inc",
    "content": "# used to be rbh 2.5 \"migration\" policy for backup mode\ndefine_policy backup_archive {\n    scope { (type == file or type == symlink) and (status == new or status == modified) }\n    status_manager = backup(archive);\n    status_current = archiving;\n    default_action = common.copy;\n    default_lru_sort_attr = last_mod;\n}\n\n# used to be rbh 2.5 \"hsm_rm\" policy for backup mode\ndefine_policy backup_remove {\n    scope { type == file or type == symlink }\n    status_manager = backup(removed);\n    # this action is executed with a fake 'path' attribute which\n    # is actually the path in backend\n    default_action = common.unlink;\n    default_lru_sort_attr = rm_time;\n}\n"
  },
  {
    "path": "doc/templates/includes/check.inc",
    "content": "define_policy checksum {\n    status_manager = checker;\n    scope { type == file }\n    default_lru_sort_attr = last_check; # oldest check first, 0==unchecked\n    # 'output' stands for previous value\n    default_action = cmd(\"/usr/sbin/rbh_cksum.sh '{output}' '{path}'\");\n}\n"
  },
  {
    "path": "doc/templates/includes/lhsm.inc",
    "content": "#####################################################\n#   Policy definitions for Lustre/HSM\n#   Do not modify without the agreement\n#   of your robinhood support!\n#####################################################\n\n# used to be rbh 2.5 \"migration\" policy for Lustre/HSM\ndefine_policy lhsm_archive {\n    scope { type == file\n            and no_archive != 1\n            and (status == new or status == modified) }\n    status_manager = lhsm(archive);\n    status_current = archiving;\n    default_action = lhsm.archive;\n    default_lru_sort_attr = last_mod;\n}\n\n# used to be rbh 2.5 \"purge\" policy for Lustre/HSM\ndefine_policy lhsm_release {\n    scope { type == file\n            and no_release != 1\n            and status == synchro }\n    status_manager = lhsm(release);\n    default_action = lhsm.release;\n    default_lru_sort_attr = last_access;\n}\n\n# used to be rbh 2.5 \"hsm_rm\" policy for Lustre/HSM\ndefine_policy lhsm_remove {\n    scope { type == file }\n    status_manager = lhsm(removed);\n    default_action = lhsm.hsm_remove;\n    default_lru_sort_attr = rm_time;\n}\n\n# vim:expandtab:shiftwidth=4:tabstop=4:\n"
  },
  {
    "path": "doc/templates/includes/modeguard.inc",
    "content": "# Robinhood Policy definition for modeguard\n\ndefine_policy modeguard {\n    status_manager = modeguard;\n    scope { type == directory }\n    default_action = modeguard.enforce_mode;\n    default_lru_sort_attr = last_mod;\n}\n"
  },
  {
    "path": "doc/templates/includes/rmdir.inc",
    "content": "# This rmdir policy applies to all directories (empty or non-empty)\n# This allow applying policies to both empty and non empty directories\n# in a single policy run.\n# - Explicitely add conditions \"dircount == 0\" in targeted fileclass\n# to apply action to empty directories only.\n# - Explicitely set action = cmd(\"rm -fr {fullpath}\") in the policy rules\n# to recursively remove non-empty directories.\n\ndefine_policy rmdir {\n    scope {type == directory}\n    status_manager = none;\n    default_action = common.rmdir;\n    default_lru_sort_attr = last_mod;\n}\n"
  },
  {
    "path": "doc/templates/includes/rmdir_old.inc",
    "content": "# definition of 2 rmdir policies:\n# 1 for empty directories\n# 1 for non-empty directories (recursive removal)\n\ndefine_policy rmdir_empty {\n    scope {type == directory and dircount == 0}\n    status_manager = none;\n    default_action = common.rmdir;\n    default_lru_sort_attr = last_mod;\n}\n\ndefine_policy rmdir_recurse {\n    scope {type == directory}\n    status_manager = none;\n    default_action = cmd(\"rm -rf {fullpath}\");\n    default_lru_sort_attr = last_mod;\n}\n"
  },
  {
    "path": "doc/templates/includes/shook.inc",
    "content": "define_policy shook_archive {\n    scope { (type == file or type == symlink)\n            and (status == new or status == modified) }\n    status_manager = shook(archive);\n    status_current = archiving;\n    default_action = common.copy;\n    default_lru_sort_attr = last_mod;\n}\n\ndefine_policy shook_release {\n    scope { type == file and status == synchro }\n    status_manager = shook(release);\n    status_current = release_pending;\n    default_action = shook.release;\n    default_lru_sort_attr = last_access;\n}\n\ndefine_policy shook_remove {\n    scope { type == file }\n    status_manager = shook(removed);\n    # this action is executed with a fake 'path' attribute which\n    # is actually the path in backend\n    default_action = common.unlink;\n    default_lru_sort_attr = rm_time;\n}\n\n# vim:expandtab:shiftwidth=4:tabstop=4:\n"
  },
  {
    "path": "doc/templates/includes/tmpfs.inc",
    "content": "# used to be rbh 2.5 \"purge\" policy in TMPFS mode\ndefine_policy cleanup {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = last_access;\n}\n"
  },
  {
    "path": "man/Makefile.am",
    "content": "dist_man_MANS=robinhood.1 rbh-report.1 rbh-find.1 rbh-du.1 rbh-diff.1\n\nif LUSTRE_HSM\ndist_man_MANS+=lhsmtool_cmd.1\nendif\n\n# Manually generate man pages from each executable --help\nmanpages:\n\texport VERSION=${VERSION}; ../scripts/make_mans.sh\n"
  },
  {
    "path": "man/lhsmtool_cmd.1",
    "content": ".\\\" Man page generated from reStructuredText.\n.\n.TH LHSMTOOL_CMD 1 \"2017-12-13\" \"0.1\" \"\"\n.SH NAME\nlhsmtool_cmd \\- Turn any data copy command into a Lustre/HSM copytool\n.\n.nr rst2man-indent-level 0\n.\n.de1 rstReportMargin\n\\\\$1 \\\\n[an-margin]\nlevel \\\\n[rst2man-indent-level]\nlevel margin: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n-\n\\\\n[rst2man-indent0]\n\\\\n[rst2man-indent1]\n\\\\n[rst2man-indent2]\n..\n.de1 INDENT\n.\\\" .rstReportMargin pre:\n. RS \\\\$1\n. nr rst2man-indent\\\\n[rst2man-indent-level] \\\\n[an-margin]\n. nr rst2man-indent-level +1\n.\\\" .rstReportMargin post:\n..\n.de UNINDENT\n. RE\n.\\\" indent \\\\n[an-margin]\n.\\\" old: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.nr rst2man-indent-level -1\n.\\\" new: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.in \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]u\n..\n.SH SYNOPSIS\n.INDENT 0.0\n.INDENT 3.5\nlhsmtool_cmd [options] <lustre mount point>\n.UNINDENT\n.UNINDENT\n.SH DESCRIPTION\n.sp\nlhsmtool_cmd turns any command that can copy data from / to a file (or a file\ndescriptor) into a fully\\-fledged Lustre/HSM copytool.\n.sp\nThe tool receives action items from the Lustre/HSM coordinator, opens the file\nand notify Lustre that actions are starting, then spawns subcommands for the\nactual data transfer, with lustre file descriptor and FID as parameters.\n.sp\nThe commands to execute are specified in a configuration file, with well\\-known\npatterns for the variable sections of the commands.\n.SH CONFIGURATION FILE\n.sp\nBy default, the lhsmtool_cmd configuration file lives in /etc/lhsmtool_cmd.conf.\nIt follows the syntax of .ini files, with a single section, named \\fI[commands]\\fP\nwhich contains entries under the form \\fIkey = value\\fP\\&.\n.sp\nThe keys are the actions that trigger the attached commands. They can be:\n.INDENT 0.0\n.IP \\(bu 2\narchive\n.IP \\(bu 2\nrestore\n.IP \\(bu 2\nremove\n.IP \\(bu 2\ncancel\n.UNINDENT\n.sp\nThe values are the commands to execute. They can be anything. The following\nparameter templates are available: \\fI{fid}\\fP and \\fI{fd}\\fP\\&.  The first template\n\\fI{fid}\\fP is replaced by the lustre FID of the file that is being manipulated.\nCommands can typically use it to derive a name for the corresponding object in\nthe archive.  The second template \\fI{fd}\\fP is an integer representing a file\ndescriptor to the file being manipulated in Lustre. Subcommands can directly use\nthis file descriptor to read (in the archive case) or write (for a restore) the\nfile in Lustre.\n.sp\nFor commands that do only operate on paths, one may want to use\n\\fI/proc/self/{fd}\\fP as demonstrated below with the \\fBdd\\fP command.\n.SH EXAMPLES\n.sp\nThe following example illustrates a possible use of \\fIdd(1)\\fP to use a directory\nas an archive, in which files are simply named after their FID in Lustre.\n.INDENT 0.0\n.INDENT 3.5\n.sp\n.nf\n.ft C\n[commands]\narchive = dd if=/proc/self/fd/{fd} of=/tmp/arch/{fid}\nrestore = dd if=/tmp/arch/{fid} of=/proc/self/fd/{fd}\nremove = rm /tmp/arch/{fid}\n.ft P\n.fi\n.UNINDENT\n.UNINDENT\n.SH OPTIONS\n.INDENT 0.0\n.TP\n.BI \\-A\\fP,\\fB  \\-\\-archive\\fB= <n>\nArchive number to serve. This option can be repeated\n.TP\n.B \\-\\-abort\\-on\\-error\nExit on first major error\n.TP\n.BI \\-c\\fP,\\fB  \\-\\-config\\fB= <path>\nSpecify a non\\-default configuration file\n.TP\n.B \\-\\-daemon\nDaemonize process: run in background\n.TP\n.B \\-\\-dry\\-run\nDo not actually run but log what would have been done\n.TP\n.BI \\-f\\fP,\\fB  \\-\\-event\\-fifo\\fB= <path>\nWrite event JSON descriptions at \\fI<path>\\fP\n.TP\n.BI \\-F\\fP,\\fB  \\-\\-fanout\\fB= <n>\nMax number of commands that can be spawned in parallel\n.TP\n.B \\-q\\fP,\\fB  \\-\\-quiet\nReduce verbosity\n.TP\n.B \\-v\\fP,\\fB  \\-\\-verbose\nIncrease verbosity\n.TP\n.B \\-\\-help\\fP,\\fB  \\-h\nShow the help message and exit\n.UNINDENT\n.SH SEE ALSO\n.INDENT 0.0\n.IP \\(bu 2\n\\fBman lfs\\fP\n.IP \\(bu 2\n\\fBman robinhood\\fP\n.UNINDENT\n.SH BUGS\n.sp\nNote that not all operations are fully supported. Typically, CANCEL is not yet\nimplemented, although it could easily be added using a fid/pid mapping and by\ndelivering signals to the subcommands.\n.SH AUTHOR\nhenri.doreau@cea.fr\n.SH COPYRIGHT\nGPLv2\n.\\\" Generated by docutils manpage writer.\n.\n"
  },
  {
    "path": "man/lhsmtool_cmd.rst",
    "content": "==============\n lhsmtool_cmd\n==============\n\n------------------------------------------------------\nTurn any data copy command into a Lustre/HSM copytool\n------------------------------------------------------\n\n:Author: henri.doreau@cea.fr\n:Date: 2017-12-13\n:Copyright: GPLv2\n:Version: 0.1\n:Manual section: 1\n\nSYNOPSIS\n========\n\n    lhsmtool_cmd [options] <lustre mount point>\n\nDESCRIPTION\n===========\n\nlhsmtool_cmd turns any command that can copy data from / to a file (or a file\ndescriptor) into a fully-fledged Lustre/HSM copytool.\n\nThe tool receives action items from the Lustre/HSM coordinator, opens the file\nand notify Lustre that actions are starting, then spawns subcommands for the\nactual data transfer, with lustre file descriptor and FID as parameters.\n\nThe commands to execute are specified in a configuration file, with well-known\npatterns for the variable sections of the commands.\n\nCONFIGURATION FILE\n==================\n\nBy default, the lhsmtool_cmd configuration file lives in /etc/lhsmtool_cmd.conf.\nIt follows the syntax of .ini files, with a single section, named `[commands]`\nwhich contains entries under the form `key = value`.\n\nThe keys are the actions that trigger the attached commands. They can be:\n\n- archive\n- restore\n- remove\n- cancel\n\nThe values are the commands to execute. They can be anything. The following\nparameter templates are available: `{fid}` and `{fd}`.  The first template\n`{fid}` is replaced by the lustre FID of the file that is being manipulated.\nCommands can typically use it to derive a name for the corresponding object in\nthe archive.  The second template `{fd}` is an integer representing a file\ndescriptor to the file being manipulated in Lustre. Subcommands can directly use\nthis file descriptor to read (in the archive case) or write (for a restore) the\nfile in Lustre.\n\nFor commands that do only operate on paths, one may want to use\n`/proc/self/{fd}` as demonstrated below with the ``dd`` command.\n\nEXAMPLES\n========\n\nThe following example illustrates a possible use of `dd(1)` to use a directory\nas an archive, in which files are simply named after their FID in Lustre.\n\n::\n\n    [commands]\n    archive = dd if=/proc/self/fd/{fd} of=/tmp/arch/{fid}\n    restore = dd if=/tmp/arch/{fid} of=/proc/self/fd/{fd}\n    remove = rm /tmp/arch/{fid}\n\nOPTIONS\n=======\n\n-A, --archive=<n>         Archive number to serve. This option can be repeated\n--abort-on-error          Exit on first major error\n-c, --config=<path>       Specify a non-default configuration file\n--daemon                  Daemonize process: run in background\n--dry-run                 Do not actually run but log what would have been done\n-f, --event-fifo=<path>   Write event JSON descriptions at `<path>`\n-F, --fanout=<n>          Max number of commands that can be spawned in parallel\n-q, --quiet               Reduce verbosity\n-v, --verbose             Increase verbosity\n--help, -h                Show the help message and exit\n\nSEE ALSO\n========\n* ``man lfs``\n* ``man robinhood``\n\nBUGS\n====\nNote that not all operations are fully supported. Typically, CANCEL is not yet\nimplemented, although it could easily be added using a fid/pid mapping and by\ndelivering signals to the subcommands.\n"
  },
  {
    "path": "man/rbh-diff.1",
    "content": ".\\\" Text automatically generated by txt2man\n.TH rbh-diff 1 \"07 July 2016\" \"\" \"Robinhood 3.0\"\n.SH NAME\n\\fBrbh-diff \\fP- list differences between robinhood database and the filesystem\n.SH SYNOPSIS\n.nf\n.fam C\n  \\fBrbh-diff\\fP [\\fIoptions\\fP]\n\n.fam T\n.fi\n.fam T\n.fi\n.SH OPTIONS\n\n.TP\n.B\n\\fB-s\\fP dir, \\fB--scan\\fP=dir\nOnly scan the specified subdir.\n.TP\n.B\n\\fB-d\\fP attrset, \\fB--diff\\fP=attrset :\nDisplay changes for the given set of attributes.\nattrset is a list of \\fIoptions\\fP in: path,posix,stripe,all,status,notimes,noatime.\n.TP\n.B\n\\fB-a\\fP {\\fIfs\\fP|\\fIdb\\fP}, \\fB--apply\\fP[={\\fIfs\\fP|\\fIdb\\fP}]\n\\fIdb\\fP (default): apply changes to the database using the filesystem as the reference.\n\\fIfs\\fP: revert changes in the filesystem using the database as the reference.\n.TP\n.B\n\\fB--dry-run\\fP\nIf \\fB--apply\\fP=\\fIfs\\fP, display operations on filesystem without performing them.\n.TP\n.B\n\\fB-b\\fP, \\fB--from-backend\\fP\nWhen applying changes to the filesystem (\\fB--apply\\fP=\\fIfs\\fP), recover objects from the backend storage\n(otherwise, recover orphaned objects on OSTs).\n.SH CONFIG FILE OPTIONS\n\n.TP\n.B\n\\fB-f\\fP file, \\fB--config-file\\fP=\\fIconfigfile\\fP\nPath to configuration file (or short name).\n.SH MISCELLANEOUS OPTIONS\n\n.TP\n.B\n\\fB-l\\fP level, \\fB--log-level\\fP=\\fIloglevel\\fP\nForce the log verbosity level (overrides configuration value).\nAllowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\n.TP\n.B\n\\fB-h\\fP, \\fB--help\\fP\nDisplay a short help about command line \\fIoptions\\fP.\n.TP\n.B\n\\fB-V\\fP, \\fB--version\\fP\nDisplay version info\n.SH SEE ALSO\n\\fBrobinhood\\fP(1), \\fBrbh-report\\fP(1), \\fBrbh-find\\fP(1), \\fBrbh-du\\fP(1)\n"
  },
  {
    "path": "man/rbh-du.1",
    "content": ".\\\" Text automatically generated by txt2man\n.TH rbh-du 1 \"07 July 2016\" \"\" \"Robinhood 3.0\"\n.SH NAME\n\\fBrbh-du \\fP- du clone that query robinhood DB\n.SH SYNOPSIS\n.nf\n.fam C\n  \\fBrbh-du\\fP [\\fIoptions\\fP] [path|fid]\n\n.fam T\n.fi\n.fam T\n.fi\n.SH FILTERS\n\n\\fB-u\\fP \\fIuser\\fP\n.PP\n\\fB-g\\fP \\fIgroup\\fP\n.TP\n.B\n\\fB-t\\fP \\fItype\\fP\n\\(cqf' (file), 'd' (dir), 'l' (symlink), 'b' (block), 'c' (char), 'p' (named pipe/FIFO), 's' (socket)\n.PP\n\\fB-S\\fP <status_name>:<status_value>\n.SH OUTPUT OPTIONS\n\n.TP\n.B\n\\fB-s\\fP, \\fB--sum\\fP\ndisplay total instead of stats per argument\n.TP\n.B\n\\fB-c\\fP, \\fB--count\\fP\ndisplay entry count instead of disk usage\n.TP\n.B\n\\fB-b\\fP, \\fB--bytes\\fP\ndisplay size instead of disk usage (display in bytes)\n.TP\n.B\n\\fB-k\\fP, \\fB--kilo\\fP\ndisplay disk usage in blocks of 1K (default)\n.TP\n.B\n\\fB-m\\fP, \\fB--mega\\fP\ndisplay disk usage in blocks of 1M\n.TP\n.B\n\\fB-H\\fP, \\fB--human-readable\\fP\ndisplay in human readable format (e.g 512K 123.7M)\n.TP\n.B\n\\fB-d\\fP, \\fB--details\\fP\nshow detailed stats: \\fItype\\fP, count, size, disk usage\n(display in bytes by default)\n.SH PROGRAM OPTIONS\n\n\\fB-f\\fP \\fIconfig_file\\fP\n.PP\n\\fB-l\\fP \\fIlog_level\\fP\n.TP\n.B\n\\fB-h\\fP, \\fB--help\\fP\nDisplay a short help about command line \\fIoptions\\fP.\n.TP\n.B\n\\fB-V\\fP, \\fB--version\\fP\nDisplay version info\n.SH SEE ALSO\n\\fBrobinhood\\fP(1), \\fBrbh-report\\fP(1), \\fBrbh-find\\fP(1), \\fBrbh-diff\\fP(1)\n"
  },
  {
    "path": "man/rbh-find.1",
    "content": ".\\\" Text automatically generated by txt2man\n.TH rbh-find 1 \"07 July 2016\" \"\" \"Robinhood 3.0\"\n.SH NAME\n\\fBrbh-find \\fP- find clone that query robinhood DB\n.SH SYNOPSIS\n.nf\n.fam C\n  \\fBrbh-find\\fP [\\fIoptions\\fP] [path|fid]\\.\\.\\.\n\n.fam T\n.fi\n.fam T\n.fi\n.SH FILTERS\n\n-user \\fIuser\\fP\n.PP\n-group \\fIgroup\\fP\n.PP\n\\fB-nouser\\fP\n.PP\n\\fB-nogroup\\fP\n.TP\n.B\n-type \\fItype\\fP\n\\(cqf' (file), 'd' (dir), 'l' (symlink), 'b' (block), 'c' (char), 'p' (named pipe/FIFO), 's' (socket)\n.TP\n.B\n\\fB-size\\fP \\fIsize_crit\\fP\n[-|+]<val>[K|M|G|T]\n.PP\n\\fB-name\\fP \\fIfilename\\fP\n.TP\n.B\n\\fB-crtime\\fP \\fItime_crit\\fP\n[-|+]<val>[s|m|h|d|y] (s: sec, m: min, h: hour, d:day, y:year. default unit is days)\n.TP\n.B\n\\fB-ctime\\fP \\fItime_crit\\fP\n[-|+]<val>[s|m|h|d|y] (s: sec, m: min, h: hour, d:day, y:year. default unit is days)\n.TP\n.B\n\\fB-mtime\\fP \\fItime_crit\\fP\n[-|+]<val>[s|m|h|d|y] (s: sec, m: min, h: hour, d:day, y:year. default unit is days)\n.TP\n.B\n\\fB-mmin\\fP \\fIminute_crit\\fP\nsame as '\\fB-mtime\\fP Nm'\n.TP\n.B\n\\fB-msec\\fP \\fIsecond_crit\\fP\nsame as '\\fB-mtime\\fP Ns'\n.TP\n.B\n\\fB-atime\\fP \\fItime_crit\\fP\n[-|+]<val>[s|m|h|d|y] (s: sec, m: min, h: hour, d:day, y:year. default unit is days)\n.TP\n.B\n\\fB-amin\\fP \\fIminute_crit\\fP\nsame as '\\fB-atime\\fP Nm'\n.PP\n\\fB-links\\fP \\fIcount\\fP\n.PP\n\\fB-ost\\fP \\fIost_index\\fP\n.PP\n\\fB-pool\\fP \\fIost_pool\\fP\n.PP\n\\fB-status\\fP \\fIstatus_name:status_value\\fP\n.PP\n\\fB-class\\fP \\fIclass\\fP\n.TP\n.B\n\\fB-not\\fP, -!\nNegate next argument\n.SH OUTPUT OPTIONS\n\n.TP\n.B\n\\fB-ls\\fP\nDisplay attributes\n.TP\n.B\n\\fB-lsost\\fP\nDisplay OST information\n.TP\n.B\n\\fB-lsclass\\fP\nDisplay fileclass information\n.TP\n.B\n\\fB-lsstatus\\fP[=policy]\nDisplay \\fIstatus\\fP information (optionally: only for the given policy).\n.TP\n.B\n\\fB-print\\fP\nDisplay the fullpath of matching entries (this is the default, unless \\fB-ls\\fP, \\fB-lsost\\fP or \\fB-exec\\fP are used).\n.TP\n.B\n\\fB-printf\\fP\nFormat string to display the matching entries.\nThe supported escapes and directives are a subset of those of `find`,\nwith some Robinhood additions prefixed with %R:\n.RS\n.TP\n.B\n%%\nEscapes %\n.TP\n.B\n%A\nRobinhood’s \"last access time\", which is a compound of the file's atime and mtime, unless the global configuration last_access_only_atime is set, in which case it is exactly the atime of the file. An \\fBstrftime\\fP(1) directive must be added. For example: %Ap %AT. This option can also take an strftime format option between brackets. For instance: %A{%A, %B %dth, %Y %F}.\n.TP\n.B\n%b\nNumber of blocks\n.TP\n.B\n%C\nRobinhood’s \"last MD change\" which is the ctime of the file.\n.TP\n.B\n%d\nDepth\n.TP\n.B\n%f\nFile name, without its path\n.TP\n.B\n%g\nGroup name\n.TP\n.B\n%M\nFile mode as a string, similar to the output of `ls`\n.TP\n.B\n%m\nFile mode in octal\n.TP\n.B\n%n\nNumber of hard links\n.TP\n.B\n%p\nFull file name\n.TP\n.B\n%s\nFile size\n.TP\n.B\n%T\nRobinhood’s \"modification time\", which is the file's mtime.\n.TP\n.B\n%u\nFile owner\n.TP\n.B\n%Y\nFull file \\fItype\\fP (file, dir, fifo, \\.\\.\\.)\n.TP\n.B\n%y\nFile \\fItype\\fP as one letter (f, d, p, \\.\\.\\.)\n.TP\n.B\n%RC\nRobinhood’s \"creation time\", which is the oldest ctime seen for that file. It is always lesser or equal to the current ctime of the file. When Lustre changelogs are used, \"creation time\" is really the creation time. An \\fBstrftime\\fP(1) directive must be added. For example: %RCc. This option can also take an strftime format option between curly brackets. For instance: %RC{%A, %B %dth, %Y %F}.\n.TP\n.B\n%Rc\nFile class\n.TP\n.B\n%Rf\nLustre FID\n.TP\n.B\n%Rm\nStatus manager module attribute, with the name specified between curly bracket. The name is the \\fIstatus\\fP manager module name, followed by a dot, followed by the attribute name. For example: %Rm{lhsm.archive_id}.\n.TP\n.B\n%Ro\nLustre OSTS\n.TP\n.B\n%Rp\nLustre parent FID\n.TP\n.B\n\\\\\\\\\nEscapes \\\\\n.TP\n.B\n\\\\n\nNewline\n.TP\n.B\n\\\\t\nTab\n.TP\n.B\n\\\\NNN\nByte with octal value NNN (1 to 3 digits)\n.TP\n.B\n\\\\xHH\nByte with hexadecimal value HH (1 to 2 digits)\n.RE\n.TP\n.B\n\\fB-escaped\\fP\nWhen \\fB-printf\\fP is used, escape unprintable characters.\n.TP\n.B\n\\fB-print0\\fP\nPrint file name followed by a null character. Same as \\fB -printf \"%p\\\\0\"\\fP\n.SH ACTIONS\n\n.TP\n.B\n\\fB-exec\\fP \"cmd\"\nExecute the given command for each matching entry. Unlike classical 'find',\ncmd must be a single (quoted) shell param, not necessarily terminated with ';'.\n\\(cq{}' is replaced by the entry path. Example: \\fB-exec\\fP 'md5sum {}'\n.SH BEHAVIOR\n\n.TP\n.B\n\\fB-nobulk\\fP\nWhen running \\fBrbh-find\\fP on the filesystem root, \\fBrbh-find\\fP automatically switches\nto bulk DB request instead of browsing the namespace from the DB.\nThis speeds up the query, but this may result in an arbitrary output ordering,\nand a single path may be displayed in case of multiple hardlinks.\nUse \\fB-nobulk\\fP to disable this optimization.\n.SH PROGRAM OPTIONS\n\n\\fB-f\\fP \\fIconfig_file\\fP\n.TP\n.B\n\\fB-d\\fP \\fIlog_level\\fP\nCRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n.TP\n.B\n\\fB-h\\fP, \\fB--help\\fP\nDisplay a short help about command line \\fIoptions\\fP.\n.TP\n.B\n\\fB-V\\fP, \\fB--version\\fP\nDisplay version info\n.SH SEE ALSO\n\\fBrobinhood\\fP(1), \\fBrbh-report\\fP(1), \\fBrbh-du\\fP(1), \\fBrbh-diff\\fP(1)\n"
  },
  {
    "path": "man/rbh-report.1",
    "content": ".\\\" Text automatically generated by txt2man\n.TH rbh-report 1 \"07 July 2016\" \"\" \"Robinhood 3.0\"\n.SH NAME\n\\fBrbh-report \\fP- querying command for robinhood policy engine\n.SH SYNOPSIS\n.nf\n.fam C\n  \\fBrbh-report\\fP [\\fIoptions\\fP]\n\n.fam T\n.fi\n.fam T\n.fi\n.SH AVAILABLE STATS\n\n.TP\n.B\n\\fB--activity\\fP, \\fB-a\\fP\nDisplay stats about daemon activity.\n.TP\n.B\n\\fB--fs-info\\fP, \\fB-i\\fP\nDisplay statistics about filesystem contents.\n.TP\n.B\n\\fB--class-info\\fP[=\\fIclass_expr\\fP]\nDisplay Fileclasses summary. Use optional parameter \\fIclass_expr\\fP\nfor retrieving stats about matching fileclasses.\n.TP\n.B\n\\fB--status-info\\fP \\fIstatus_name\\fP[:\\fIstatus_value\\fP]\nDisplay status summary for the given policy or status name.\nOptionally filter on \\fIstatus_value\\fP.\n.TP\n.B\n\\fB--entry-info\\fP \\fIpath\\fP|\\fIid\\fP, \\fB-e\\fP \\fIpath\\fP|\\fIid\\fP\nDisplay all information about the given entry.\n.TP\n.B\n\\fB--user-info\\fP[=\\fIusername\\fP], \\fB-u\\fP \\fIusername\\fP\nDisplay user statistics. Use optional parameter \\fIusername\\fP for retrieving stats about a single user.\n.TP\n.B\n\\fB--group-info\\fP[=\\fIgroupname\\fP], \\fB-g\\fP \\fIgroupname\\fP\nDisplay group statistics. Use optional parameter \\fIgroupname\\fP for retrieving stats about a single group.\n.TP\n.B\n\\fB--project-info\\fP[=\\fIprojid\\fP]\nDisplay project summary. Use optional parameter \\fIprojid\\fP\nfor retrieving stats about a single project id.\n.TP\n.B\n\\fB--top-dirs\\fP[=\\fIcnt\\fP], \\fB-d\\fP \\fIcnt\\fP\nDisplay largest directories. Optional argument indicates the number of directories to be returned (default: 20).\n.TP\n.B\n\\fB--top-size\\fP[=\\fIcnt\\fP], \\fB-s\\fP \\fIcnt\\fP\nDisplay largest files. Optional argument indicates the number of files to be returned (default: 20).\n.TP\n.B\n\\fB--top-users\\fP[=\\fIcnt\\fP], \\fB-U\\fP \\fIcnt\\fP\nDisplay top disk space consumers. Optional argument indicates the number of users to be returned (default: 20).\n.TP\n.B\n\\fB--oldest-files\\fP[=\\fIcnt\\fP], \\fB-o\\fP \\fIcnt\\fP\nDisplay oldest files in the filesystem (ordered by access time).\nOptional argument indicates the number of entries to be displayed (default: 20).\nTip: use '\\fB--reverse\\fP' option to display newest files.\n.TP\n.B\n\\fB--oldest-empty-dirs\\fP[=\\fIcnt\\fP], \\fB-O\\fP \\fIcnt\\fP\nDisplay oldest empty directories in the filesystem (ordered by modification time).\nOptional argument indicates the number of dirs to be returned (default: 20).\n.TP\n.B\n\\fB--deferred-rm\\fP, \\fB-R\\fP\nDisplay files to be removed from HSM.\n.TP\n.B\n\\fB--dump\\fP, \\fB-D\\fP\nDump all filesystem entries.\n.TP\n.B\n\\fB--dump-user\\fP \\fIusername\\fP\nDump all entries for the given user.\n.TP\n.B\n\\fB--dump-group\\fP \\fIgroupname\\fP\nDump all entries for the given group.\n.TP\n.B\n\\fB--dump-ost\\fP \\fIost_index\\fP|\\fIost_set\\fP\nDump all entries on the given OST or set of OSTs (e.g. 3,5-8).\n.TP\n.B\n\\fB--dump-status\\fP \\fIstatus_name\\fP:\\fIstatus_value\\fP\nDump all entries with the given status (e.g. lhsm_status:released).\n.SH MAINTENANCE SCHEDULING\n\n.TP\n.B\n\\fB--next-maintenance\\fP[=date_time]\nSet/display time of the next maintenance.\nExpected date_time format is yyyymmddHHMM[SS].\n.TP\n.B\n\\fB--cancel-maintenance\\fP\nCancel the next scheduled maintenance.\n.SH FILTER OPTIONS\nThe following filters can be specified for reports:\n.TP\n.B\n\\fB-P\\fP \\fIpath\\fP, \\fB--filter-path\\fP \\fIpath\\fP\nDisplay the report only for objects in the given \\fIpath\\fP.\n.TP\n.B\n\\fB-C\\fP \\fIclass_expr\\fP, \\fB--filter-class\\fP \\fIclass_expr\\fP\nOnly report entries in the matching fileclasses.\n.TP\n.B\n\\fB-p\\fP \\fIproject_id\\fP, \\fB--filter-project\\fP \\fIproject_id\\fP\nOnly report entries with the given project id (Lustre only).\n.TP\n.B\n\\fB--count-min\\fP \\fIcnt\\fP\nDisplay only topuser/userinfo with at least \\fIcnt\\fP entries\n.SH ACCOUNTING REPORT OPTIONS\n\n.TP\n.B\n\\fB--size-profile\\fP, \\fB--szprof\\fP\nDisplay size profile statistics\n.TP\n.B\n\\fB--by-count\\fP\nSort by count\n.TP\n.B\n\\fB--by-avgsize\\fP\nSort by average file size\n.TP\n.B\n\\fB--by-size-ratio\\fP \\fIrange\\fP, \\fB--by-szratio\\fP \\fIrange\\fP\nSort on the ratio of files in the given size-range\n\\fIrange\\fP: <val><sep><val>- or <val><sep><val-1> or <val><sep>inf\n<val>: 0, 1, 32, 1K 32K, 1M, 32M, 1G, 32G, 1T\n<sep>: ~ or ..\ne.g: 1G..inf, 1..1K-, 0..31M\n.TP\n.B\n\\fB--reverse\\fP\nReverse sort order\n.TP\n.B\n\\fB-S\\fP, \\fB--split-user-groups\\fP\nDisplay the report by user AND group\n.TP\n.B\n\\fB-J\\fP, \\fB--split-user-projects\\fP\nSplit users report per project id (Lustre only)\n.TP\n\n.B\n\\fB-F\\fP, \\fB--force-no-acct\\fP\nGenerate the report without using accounting table (slower)\n.SH CONFIG FILE OPTIONS\n\n.TP\n.B\n\\fB-f\\fP \\fIcfg_file\\fP, \\fB--config-file\\fP=\\fIcfg_file\\fP\nPath to configuration file (or short name).\n.SH OUTPUT FORMAT OPTIONS\n\n.TP\n.B\n\\fB-c\\fP , \\fB--csv\\fP\nOutput stats in a csv-like format for parsing\n.TP\n.B\n\\fB-q\\fP , \\fB--no-header\\fP\nDon't display column headers/footers\n.SH MISCELLANEOUS OPTIONS\n\n.TP\n.B\n\\fB-l\\fP \\fIloglevel\\fP, \\fB--log-level\\fP=\\fIloglevel\\fP\nForce the log verbosity level (overides configuration value).\nAllowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\n.TP\n.B\n\\fB-h\\fP, \\fB--help\\fP\nDisplay a short help about command line \\fIoptions\\fP.\n.TP\n.B\n\\fB-V\\fP, \\fB--version\\fP\nDisplay version info\n.SH SEE ALSO\n\\fBrobinhood\\fP(1), \\fBrbh-find\\fP(1), \\fBrbh-du\\fP(1), \\fBrbh-diff\\fP(1)\n"
  },
  {
    "path": "man/robinhood.1",
    "content": ".\\\" Text automatically generated by txt2man\n.TH robinhood 1 \"07 July 2016\" \"\" \"Robinhood 3.0\"\n.SH NAME\n\\fBrobinhood \\fP- policy engine and statistics tool for large file systems\n.SH SYNOPSIS\n.nf\n.fam C\n  \\fBrobinhood\\fP [\\fIoptions\\fP]\n\n.fam T\n.fi\n.fam T\n.fi\n.SH ACTIONS\n\n.TP\n.B\n\\fB-S\\fP, \\fB--scan\\fP[=\\fIdir\\fP]\nScan the filesystem namespace. If \\fIdir\\fP is specified, only scan the specified subdir.\n.TP\n.B\n\\fB-r\\fP, \\fB--read-log\\fP[=\\fImdt_idx\\fP]\nRead events from MDT ChangeLog.\nIf \\fImdt_idx\\fP is specified, only read ChangeLogs for the given MDT.\nElse, start 1 changelog reader thread per MDT (with DNE).\n.TP\n.B\n\\fB--run\\fP[=all]\nRun all polices (based on triggers).\n.TP\n.B\n\\fB--run\\fP=\\fIpolicy1\\fP(\\fIargs\\fP),\\fIpolicy2\\fP(\\fIargs\\fP)\\.\\.\\.\nRun the given policies with the specified arguments. \nSee \"Policy run \\fIoptions\\fP\" for details about \\fIargs\\fP.\n.TP\n.B\n\\fB-C\\fP \\fIpolicy1\\fP,\\fIpolicy2\\fP\\.\\.\\., \\fB--check-thresholds\\fP[=\\fIpolicy1\\fP,\\fIpolicy2\\fP\\.\\.\\.]\nOnly check trigger thresholds without applying policy actions.\nIf no policy is specified (or 'all'), check all triggers.\n.SH POLICY RUN OPTIONS\n\n\\fIargs\\fP\nComma-separated list of <param>=<value>.\n.PP\n.nf\n.fam C\n           e.g. --run=cleanup(target=user:foo,max-count=1000)\n\n.fam T\n.fi\n.RS\nThe following parameters are allowed:\n.PP\ntarget=\\fItgt\\fP\n.PP\n.nf\n.fam C\n           Targeted subset of entries for the policy run.\n\n           tgt can be one of:\n\n.nf\n.fam C\n               all (all entries), user:username, group:grpname, file:path,\n\n               class:fileclass, ost:ost_idx, pool:poolname, projid:projid.\n\n.fam T\n.fi\nmax-count=\\fInbr\\fP\n.PP\n.nf\n.fam C\n           Max number of actions to execute for a policy run.\n\n.fam T\n.fi\nmax-vol=\\fIsize\\fP\n.PP\n.nf\n.fam C\n           Max volume of entries impacted by a policy run.\n\n.fam T\n.fi\ntarget-usage=\\fIpct\\fP\n.PP\n.nf\n.fam C\n           Targeted filesystem or OST usage for a policy run, in percent.\n\n\n\n.fam T\n.fi\n.RE\n.TP\n.B\n\\fB-t\\fP \\fItgt\\fP, \\fB--target\\fP=\\fItgt\\fP\nSpecify the default target for policy runs (see target syntax above).\n.TP\n.B\n\\fB--target-usage\\fP=\\fIpct\\fP\nSpecifies the default target disk usage (in \\fIpct\\fP) for 'all', 'ost' or 'pool' targets.\n.TP\n.B\n\\fB-I\\fP, \\fB--ignore-conditions\\fP\nApply policy to all entries in policy scope, without checking policy rule conditions.\n.TP\n.B\n\\fB-F\\fP, \\fB--force\\fP\nForce applying policies even if no full scan has never been done (partial DB contents).\n.TP\n.B\n\\fB--no-limit\\fP\nDon't limit the maximum number/volume of policy actions per pass.\n.TP\n.B\n\\fB--dry-run\\fP\nOnly report policy actions that would be performed without really doing them.\nNote: Robinhood DB is impacted as if the reported actions were really done.\n.TP\n.B\n\\fB--force-all\\fP\nForce applying a policy to all eligible entries, without considering\npolicy limits and rule conditions.\nThis is equivalent to: \\fB--once\\fP \\fB--no-limit\\fP \\fB--ignore-conditions\\fP \\fB--force\\fP\n.SH SCANNING OPTIONS\n\n.TP\n.B\n\\fB--no-gc\\fP\nGarbage collection of entries in DB is a long operation when terminating\na scan. This skips this operation if you don't care about removed\nentries (or don't expect entries to be removed).\nThis is also recommended for partial scanning (see \\fB-scan\\fP=\\fIdir\\fP option).\n.SH OUTPUT OPTIONS\n\n.TP\n.B\n\\fB--diff\\fP=\\fIattrset\\fP\nWhen scanning or reading changelogs, display changes for the given set of attributes (to stdout).\n\\fIattrset\\fP is a list of values in: path,posix,stripe,all,status,notimes,noatime.\n.SH BEHAVIOR OPTIONS\n\n.TP\n.B\n\\fB-O\\fP, \\fB--once\\fP\nPerform only one pass of the specified action and exit.\n.TP\n.B\n\\fB-d\\fP, \\fB--detach\\fP\nDaemonize the process (detach from parent process).\n.TP\n.B\n\\fB--alter-db\\fP\nAllow database schema modifications (backup your DB before using this).\n.SH CONFIG FILE OPTIONS\n\n.TP\n.B\n\\fB-f\\fP \\fIcfg_file\\fP, \\fB--config-file\\fP=\\fIcfg_file\\fP\nPath to configuration file (or short name).\n.TP\n.B\n\\fB-T\\fP \\fIoutput_file\\fP, \\fB--template\\fP[=\\fIoutput_file\\fP]\nWrite a configuration file template to the specified file.\n.TP\n.B\n\\fB-D\\fP, \\fB--defaults\\fP\nDisplay default configuration values.\n.TP\n.B\n\\fB--test-syntax\\fP\nCheck configuration file and exit.\n.SH LOG OPTIONS\n\n.TP\n.B\n\\fB-L\\fP \\fIlogfile\\fP, \\fB--log-file\\fP=\\fIlogfile\\fP\nForce the path to the log file (overrides configuration value).\nSpecial values \"stdout\" and \"stderr\" can be used.\n.TP\n.B\n\\fB-l\\fP \\fIloglevel\\fP, \\fB--log-level\\fP=\\fIloglevel\\fP\nForce the log verbosity level (overrides configuration value).\nAllowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\n.SH MISCELLANEOUS OPTIONS\n\n.TP\n.B\n\\fB-h\\fP, \\fB--help\\fP\nDisplay a short help about command line \\fIoptions\\fP.\n.TP\n.B\n\\fB-V\\fP, \\fB--version\\fP\nDisplay version info\n.TP\n.B\n\\fB-p\\fP \\fIpidfile\\fP, \\fB--pid-file\\fP=\\fIpidfile\\fP\nPid file (used for service management).\n.SH SEE ALSO\n\\fBrbh-report\\fP(1), \\fBrbh-find\\fP(1), \\fBrbh-du\\fP(1), \\fBrbh-diff\\fP(1)\n"
  },
  {
    "path": "robinhood.spec.in",
    "content": "#default ON\n%bcond_without mysql\n%bcond_without common_rpms\n%bcond_without jemalloc\n\n#default OFF\n%bcond_with lustre\n\n%if %{with lustre}\n\t%global lswitch --enable-lustre\n\t# default ON\n\t%bcond_without lhsm\n\t%bcond_without backup\n%else\n\t%global lswitch --disable-lustre\n\t# default OFF\n\t%bcond_with lhsm\n\t%bcond_with backup\n%endif\n\n# default OFF\n%bcond_with shook\n%bcond_with recovtools\n\n%if ( 0%{?fedora} >= 18 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 )\n%global with_systemd 1\n%else\n%global with_systemd 0\n%endif\n\n%if %{with lustre}\n# default value for lustre package and version, if not defined in rpmbuild options\n%{!?lpackage: %global lpackage lustre-client}\n%if %{undefined lversion}\n%{warn:WARNING: No target lustre version specified. You should --define \"lversion x.y\"\n         to prevent incompatibility issues.\n}\n%endif\n%endif\n\n%if %{with jemalloc}\n\t%global jemalloc_switch\t--enable-jemalloc\n%else\n\t%global jemalloc_switch\t--disable-jemalloc\n%endif\n\n# target install dir for web gui\n%define installdir_www  /var/www\n\n###### end of macro definitions #####\n\nName: @PACKAGE@\nVersion: @VERSION@\n\nVendor: CEA, HPC department <http://www-hpc.cea.fr>\nPrefix: %{_prefix}\n\n%if %{with lustre}\n%if %{defined lversion}\n%define config_dependant .lustre%{lversion}\n%else\n%define config_dependant .lustre\n%endif\n%endif\n\nRelease: @RELEASE@%{?config_dependant}%{?dist}\n\nSummary: Robinhood - Policy engine and reporting tool for large filesystems\nLicense: CeCILL-C\nUrl: http://robinhood.sourceforge.net\nSource0: @PACKAGE@-%{version}.tar.gz\nBuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)\nBuildRequires: glib2-devel >= 2.16\nBuildRequires: libattr-devel\n%if %{with_systemd}\n%if %{defined suse_version}\nBuildRequires: systemd-rpm-macros\n%else\nBuildRequires: systemd\n%systemd_requires # post/preun/postun\n%endif\n%endif\n%if %{with lustre}\n%if %{defined lversion}\nBuildRequires: %{lpackage} >= %{lversion}\n%else\nBuildRequires: %{lpackage}\n%endif\n%endif\n%if %{with mysql}\nBuildRequires: /usr/include/mysql/mysql.h\n%endif\n%if %{with jemalloc}\nBuildRequires: jemalloc\nBuildRequires: jemalloc-devel\n%endif\n\n%description\nRobinhood is a tool for monitoring and applying policies to file system entries.\nIt is designed to process all its tasks in parallel, so it is particularly adapted\nfor managing large file systems with millions of entries and petabytes of data.\n\nWith support for: %{?with_lustre:Lustre} %{?with_backup:Backup} %{?with_shook:shook}\n\n%{?configure_flags:Generated using options: }%{?configure_flags}\n\n%if %{with lustre}\n# Package robinhood-lustre includes robinhood for Lustre filesystem\n# which is not compatible with robinhood-posix.\n%package lustre\n\nSummary: Robinhood Policy Engine for Lustre filesystems\nGroup: Applications/System\n%if %{defined lversion}\nRequires: %{lpackage} >= %{lversion}\n%else\nRequires: %{lpackage}\n%endif\nConflicts: robinhood-posix\nProvides: robinhood = %{version}-%{release}\nObsoletes: robinhood-tmpfs < 3.0, robinhood-tmpfs-lustre < 3.0\nObsoletes: robinhood-backup < 3.0, robinhood-lhsm < 3.0\nRequires: /usr/bin/mailx\n%if %{with jemalloc}\nRequires: jemalloc\n%endif\n\n%description lustre\nPolicy engine for Lustre filesystems.\n\n%{?configure_flags:Generated using options: }%{?configure_flags}\n\n%else\n\n# Package robinhood-posix includes robinhood for other POSIX filesystems\n# It is not compatible with robinhood-lustre.\n%package posix\n\nSummary: Robinhood Policy engine for POSIX filesystems\nGroup: Applications/System\nConflicts: robinhood-lustre\nProvides: robinhood = %{version}-%{release}\nObsoletes: robinhood-tmpfs < 3.0, robinhood-tmpfs-posix < 3.0\nRequires: /usr/bin/mailx\n%if %{with jemalloc}\nRequires: jemalloc\n%endif\n\n%description posix\nPolicy engine for POSIX filesystems.\n\n%{?configure_flags:Generated using options: }%{?configure_flags}\n\n%endif\n\n%if %{with common_rpms}\n%package adm\nSummary: admin/config helper for Robinhood PolicyEngine\nGroup: Applications/System\n\n%description adm\nThis RPM provides an admin/config helper for Robinhood PolicyEngine (command rbh-config).\n\n\n%package webgui\nSummary: Web interface to vizualize filesystems stats\nGroup: Applications/System\nRequires: php, php-pdo\n%if 0%{?rhel} <= 7\nRequires: php-mysql\n%else\nRequires: php-mysqlnd\n%endif\n\n%description webgui\nWeb interface to vizualize filesystems stats.\nThis uses robinhood database to display misc. user and group stats.\n\n\n%if %{with recovtools}\n%package recov-tools\nSummary: Tools for MDS recovery.\nGroup: Applications/System\n\n%description recov-tools\nTools for MDS recovery.\n%endif\n\n%package tests\nSummary: Test suite for Robinhood\nGroup: Applications/System\nRequires: robinhood robinhood-adm bc strace\n# mariadb or mysql\nRequires: /usr/bin/mysql\n\n%description tests\nLustre and Posix tests for Robinhood.\n\n%endif\n\n%if %{with lhsm} || %{with shook}\n%package tools\nSummary: Annex tools for robinhood.\nGroup: Applications/System\n\n%description tools\nAnnex tools for robinhood.\n%endif\n\n%if %{with shook}\n%package mod-shook\nSummary: Shook module for robinhood\nGroup: Applications/System\nRequires: robinhood-lustre = %{version}\nRequires: shook-server\nBuildRequires: shook-devel\nBuildRequires: shook-server\n\n%description mod-shook\nShook module for robinhood\n%endif\n\n%prep\n%setup -q -n @PACKAGE@-%{version}\n\n%build\n./configure %{lswitch} %{jemalloc_switch} %{?configure_flags} \\\n\t--mandir=%{_mandir} --libdir=%{_libdir}\nmake %{?_smp_mflags}\n\n%install\nrm -rf $RPM_BUILD_ROOT\nmkdir -p $RPM_BUILD_ROOT\nmake install DESTDIR=$RPM_BUILD_ROOT\n\n\nmkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig\ninstall -m 644 scripts/sysconfig_robinhood $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/robinhood\n\n%if %{with common_rpms}\nmkdir -p $RPM_BUILD_ROOT/%{installdir_www}/robinhood\ncp -r web_gui/gui_v3/* $RPM_BUILD_ROOT/%{installdir_www}/robinhood/.\ncp    web_gui/gui_v3/api/.htaccess $RPM_BUILD_ROOT/%{installdir_www}/robinhood/api/.\nmkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/httpd/conf.d/\ninstall -m 644 web_gui/robinhood.conf $RPM_BUILD_ROOT/%{_sysconfdir}/httpd/conf.d/.\n%endif\n\nrm -f $RPM_BUILD_ROOT/%{_libdir}/robinhood/librbh_mod_*.a\nrm -f $RPM_BUILD_ROOT/%{_libdir}/robinhood/librbh_mod_*.la\n\n# Add an unmutable copy of the templates for the tests\n%if %{with common_rpms}\nmkdir -p $RPM_BUILD_ROOT/%{_datadir}/robinhood/doc\ncp -a doc/templates $RPM_BUILD_ROOT/%{_datadir}/robinhood/doc\n%endif\n\n%if %{with_systemd}\nmkdir -p  $RPM_BUILD_ROOT/%{_unitdir}\ninstall -m 444 scripts/robinhood.service $RPM_BUILD_ROOT/%{_unitdir}/robinhood.service\ninstall -m 444 scripts/robinhood@.service $RPM_BUILD_ROOT/%{_unitdir}/robinhood@.service\n\n%if %{with lustre}\n%post lustre\n%else\n%post posix\n%endif\n/sbin/ldconfig\n%if %{defined suse_version}\n%service_add_post robinhood.service robinhood@.service\n%else\n%systemd_post robinhood.service\n%systemd_post robinhood@.service\n%endif\n\n%if %{with lustre}\n%preun lustre\n%else\n%preun posix\n%endif\n%if %{defined suse_version}\n%service_del_preun robinhood.service robinhood@.service\n%else\n%systemd_preun robinhood.service\n%systemd_preun robinhood@.service\n%endif\n\n\n%else # with_systemd\nmkdir -p $RPM_BUILD_ROOT/%{_initrddir}\n\n%if %{defined suse_version}\ninstall -m 755 scripts/robinhood.init.sles $RPM_BUILD_ROOT/%{_initrddir}/robinhood\n%else\ninstall -m 755 scripts/robinhood.init $RPM_BUILD_ROOT/%{_initrddir}/robinhood\n%endif\n\n%if %{with lustre}\n%post lustre\n%else\n%post posix\n%endif\n/sbin/ldconfig\nif [ -x %{_initrddir}/robinhood ]; then\n  if %{_initrddir}/robinhood status | grep running | grep -v \"not running\"  >/dev/null 2>&1; then\n    %{_initrddir}/robinhood stop\n    WASRUNNING=1\n  fi\n  [ -x /sbin/chkconfig ] && /sbin/chkconfig --del robinhood\n  [ -x /sbin/chkconfig ] && /sbin/chkconfig --add robinhood\n  if test x$WASRUNNING = x1; then\n    %{_initrddir}/robinhood start\n  fi\nfi\n\n%if %{with lustre}\n%preun lustre\n%else\n%preun posix\n%endif\n\nif [ \"$1\" = 0 ]; then\n  if [ -x %{_initrddir}/robinhood ]; then\n     [ -x /sbin/chkconfig ] && /sbin/chkconfig --del robinhood\n    if %{_initrddir}/robinhood status | grep running | grep -v \"not running\" >/dev/null 2>&1; then\n      %{_initrddir}/robinhood stop\n    fi\n  fi\nfi\n%endif # with_systemd\n\n%if %{with lustre}\n%postun lustre\n%else\n%postun posix\n%endif\n/sbin/ldconfig\n\n%if %{with_systemd}\n%if %{defined suse_version}\n%service_del_postun robinhood.service robinhood@.service\n%endif\n%endif\n\n%if %{with_systemd}\n%if %{defined suse_version}\n%if %{with lustre}\n%pre lustre\n%else\n%pre posix\n%endif\n%service_add_pre robinhood.service robinhood@.service\n%endif\n%endif\n\n%clean\nrm -rf $RPM_BUILD_ROOT\n\n%if %{with common_rpms}\n\n%files adm\n%{_sbindir}/rbh-config\n\n%if %{with recovtools}\n%files recov-tools\n%{_sbindir}/*lovea\n%{_sbindir}/gen_lov_objid\n%{_sbindir}/ost_fids_remap\n%endif\n\n%files webgui\n\n# set apache permissions\n%defattr(640, root, apache, 750)\n%{installdir_www}/robinhood\n%config(noreplace) %{_sysconfdir}/httpd/conf.d/robinhood.conf\n\n\n%files tests\n%defattr(-,root,root,-)\n%dir %{_datadir}/robinhood/\n%{_datadir}/robinhood/tests/\n%{_datadir}/robinhood/doc/\n\n%endif\n\n%if %{with shook}\n%files mod-shook\n%{_libdir}/robinhood/librbh_mod_shook*.so*\n%{_sysconfdir}/robinhood.d/includes/shook.inc\n%{_sysconfdir}/robinhood.d/templates/example_shook.conf\n%endif\n\n# robinhood RPM name for lustre is robinhood-lustre\n%if %{with lustre}\n%files lustre\n%else\n# robinhood RPM name for posix is robinhood-posix\n%files posix\n%endif\n\n%defattr(-,root,root,-)\n%doc README.md\n%doc COPYING\n%doc ChangeLog\n\n%if %{with backup}\n%{_sbindir}/rbhext_*\n%endif\n\n%{_sbindir}/robinhood\n%{_sbindir}/rbh-report\n%{_sbindir}/rbh-diff\n%{_sbindir}/rbh-undelete\n%{_sbindir}/rbh-rebind\n%{_sbindir}/rbh_cksum.sh\n%{_bindir}/rbh-du\n%{_bindir}/rbh-find\n\n%if %{with shook}\n%exclude %{_libdir}/robinhood/librbh_mod_shook*.so*\n%endif\n%{_libdir}/robinhood/librbh_mod_*.so*\n\n# All man pages but the lhsmtool_cmd one\n%{_mandir}/man1/r*\n\n%config(noreplace) %{_sysconfdir}/sysconfig/robinhood\n\n%dir %{_sysconfdir}/robinhood.d\n%dir %{_sysconfdir}/robinhood.d/includes\n%dir %{_sysconfdir}/robinhood.d/templates\n\n%{_sysconfdir}/ld.so.conf.d/robinhood.conf\n\n%exclude %{_sysconfdir}/robinhood.d/includes/shook.inc\n%exclude %{_sysconfdir}/robinhood.d/templates/example_shook.conf\n%config %{_sysconfdir}/robinhood.d/includes/*.inc\n%config %{_sysconfdir}/robinhood.d/templates/*.conf\n\n%if %{with_systemd}\n%{_unitdir}/robinhood.service\n%{_unitdir}/robinhood@.service\n%else\n%{_initrddir}/robinhood\n%endif\n\n%if %{with lhsm} || %{with shook}\n%files tools\n%if %{with lhsm}\n%{_sbindir}/lhsmtool_cmd\n%{_mandir}/man1/lhsmtool*\n%endif\n%endif\n\n%changelog\n\n* Mon Nov 24 2025 Sebastien Gougeaud <sebastien.gougeaud@cea.fr> 3.2.0-3\n- Fix report issue when using `-S` with group report\n- Make also install configuration and template files\n\n* Wed Nov 20 2024 Thomas Leibovici <thomas.leibovici@cea.fr> 3.2.0-2\n- Fix packaging error for WebUI on EL7\n\n* Wed Nov 20 2024 Thomas Leibovici <thomas.leibovici@cea.fr> 3.2.0-1\n- Robinhood 3.2.0\n\n* Mon Sep 05 2022 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1.8-1\n- Robinhood 3.1.8\n\n* Tue Mar 09 2021 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1.7-1\n- Robinhood 3.1.7\n\n* Fri Apr 03 2020 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1.6-1\n- Robinhood 3.1.6\n\n* Wed Mar 20 2019 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1.5-1\n- Robinhood 3.1.5\n\n* Fri Sep 21 2018 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1.4-1\n- Robinhood 3.1.4\n\n* Tue Sep 26 2017 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1-1\n- Robinhood 3.1\n\n* Tue Sep 12 2017 Thomas Leibovici <thomas.leibovici@cea.fr> 3.1-0.beta1\n- Robinhood 3.1 beta\n\n* Fri Sep 16 2016 Thomas Leibovici <thomas.leibovici@cea.fr> 3.0-1\n- Final Robinhood 3.0 release\n\n* Thu Jul 07 2016 Thomas Leibovici <thomas.leibovici@cea.fr> 3.0-0.rc1\n- Robinhood v3 rc1\n\n* Fri Mar 25 2016 Thomas Leibovici <thomas.leibovici@cea.fr> 3.0-0.alpha2\n- Robinhood v3 alpha2\n\n* Wed Dec 16 2015 Thomas Leibovici <thomas.leibovici@cea.fr> 3.0-0.alpha1\n- Robinhood v3 alpha1\n"
  },
  {
    "path": "scripts/Makefile.am",
    "content": "\nEXTRA_DIST= robinhood.init\t\t\t\\\n\trobinhood.init.in\t\t\t\\\n\trobinhood.init.sles         \t\t\\\n\trobinhood.init.sles.in      \t\t\\\n\trobinhood.service\t\t\t\\\n\trobinhood@.service\t\t\t\\\n\tsysconfig_robinhood\t\t\t\\\n\tsysconfig_robinhood.in\t\t\t\\\n\ttype_gen.pl\t\t\t\t\\\n\tindent.sh \t\t\t\t\\\n\tdemo/disk_usage.sh\t\t\t\\\n\tdemo/migr_purge.sh\t\t\t\\\n\tdemo/rh.migr_purge.conf\t\t\t\\\n\tdemo/write_data.sh  \t\t\t\\\n    cmd2man.sh make_mans.sh fix_man_options.sh  \\\n    ld.so.robinhood.conf.in rbh_cksum.sh.in\n\n\nldconfdir=@CONFDIR@/ld.so.conf.d\nldconf_DATA=robinhood.conf\n\nrobinhood.conf: ld.so.robinhood.conf\n\tmv ld.so.robinhood.conf robinhood.conf\n\ndist_sbin_SCRIPTS=rbh_cksum.sh\n\nif COMMON_RPMS\ndist_sbin_SCRIPTS+=rbh-config\nendif\n\nif HSM_LITE\ndist_sbin_SCRIPTS+=rbhext_tool rbhext_tool_svr rbhext_tool_clnt\nendif\n\nall-local: rbh_cksum.sh\n\tchmod 755 rbh_cksum.sh\n"
  },
  {
    "path": "scripts/bestiaire.sh",
    "content": "#!/bin/sh\n#\n# create a filesystem with various object types etc...\n#\n# usage: bestiaire.sh <root>\n#\n\nTESTDIR=$1\n\nif [ ! -d \"$TESTDIR\" ]; then\n    echo \"usage: $0 <root>\"\n    exit 1\nfi\n\nDIRNAME=\"directory\"\nFILENAME=\"file\"\nDEPTH=256\nFILEPERDIR=10\n\n#create files in root\nfor i in $(seq 1 $FILEPERDIR); do\n    dd if=/dev/zero of=\"$TESTDIR/$FILENAME.$i\" bs=1k count=$i || exit 1\ndone\n\n# create directories (until DEPTH)\n# create files at each level\nd=0\ncurr=\"$TESTDIR\"\nwhile (( $d < $DEPTH )); do\n    curr=\"$curr/$DIRNAME.$d\"\n    mkdir -p $curr || exit 1\n    for i in $(seq 1 $FILEPERDIR); do\n        dd if=/dev/zero of=\"$curr/$FILENAME.$d.$i\" bs=1k count=$i || exit 1\n    done\n    ((d=$d+1))\ndone\n\n# create various types\nln -s blablabla $TESTDIR/symlink || exit 1\nln -s $TESTDIR/$DIRNAME.0/file.0.1 $TESTDIR/$DIRNAME.0/symlink.0 || exit 1\n\nln $TESTDIR/$FILENAME.$i $TESTDIR/link.$i || exit 1\n"
  },
  {
    "path": "scripts/cfg_25to30.sh",
    "content": "#!/bin/bash\n#\n# helper to convert robinhood 2.5 to robinhood 3.0  config file.\n#\n\nfunction error\n{\n    echo \"$*\"\n    exit 1\n}\n\n\nif [ -z $1 ]; then\n    echo \"usage: $0 <cfg_file>\"\n    exit 1\nfi\ncfg=$1\n\ntmp=$cfg.new\ncp \"$cfg\" \"$tmp\" || error \"failed to create temporary copy of $cfg\"\n\nold_blocks=(\"db_update_policy\" \"user_acct\")\nnew_blocks=(\"db_update_params\" \"accounting\")\n\ni=0\nwhile [ -n \"${old_blocks[$i]}\" ]; do\n    sed -i -e \"s/\\s*${old_blocks[$i]}\\s*/${new_blocks[$i]}/i\" $tmp\n    ((i++))\ndone\n\necho \"new config file created: $tmp\"\n\n### change runtime_interval to trigger\n### change hsm_remove_policy parameters\n### backup_new_files is in policy scope\n\n"
  },
  {
    "path": "scripts/check_commit.sh",
    "content": "#!/bin/bash\n\ndir=$(dirname $(readlink -m $0))\n\nopt=\"\"\n\nfunction check_file\n{\n\texec git diff --cached --format=email $1 |\n\t\t$dir/checkpatch.pl --patch --no-tree -q -\n}\n\nif [ \"$1\" ]; then\n    cmd=\"cat $1\"\n    opt=\"--cached\"\nelse\n    cmd=\"git status\"\nfi\nfor f in $($cmd | grep -E 'added:|modified:|new file:' | cut -d ':' -f 2 | tr -d \" \"); do\n    check_file $f\ndone\n\nexit 0\n"
  },
  {
    "path": "scripts/check_rpm_reloc.sh",
    "content": "#!/bin/bash\n\n# without option:\n# expected:\n# rpm to install config to /etc/robinhood.d\n# rpm to install binaries to /usr/[s]bin\n# rpm relocated to /usr\n# rbh to search config in /etc/robinhood.d\n# init script to get config in /etc/robinhood.d\n\nfunction error\n{\n    echo \"ERROR: $@\"\n    exit 1\n}\n\nfunction get_cpu\n{\n    echo $(( $(cat /proc/cpuinfo  | grep -P \"processor\\s*:\" | cut -d ':' -f 2 | tail -1) + 1 ))\n}\n\nfunction check_build_dir\n{\n    local dir=$1\n    local rbhcfg=$2\n\n    # use 'strings' to read the path from the binary\n    for cfg in $(strings $dir/src/common/RobinhoodMisc.o  | grep robinhood.d); do\n        echo $cfg | egrep -E \"^$rbhcfg\"  > /dev/null || error \"wrong config location: $cfg not in $rbhcfg\"\n        echo \"$cfg OK\"\n    done\n\n    confdir=$(egrep -E \"^RH_CONF_DIR\" $dir/scripts/robinhood.init | awk '{print $1}' | cut -d '=' -f 2)\n    [ \"$confdir\" == \"$rbhcfg/tmpfs\" ] || error \"unexpected config dir in robinhood.init: $confdir\"\n    echo \"$confdir OK\"\n}\n\n\nfunction check_locations\n{\n    config_options=\"$1\"\n    expected_confdir=\"$2\"\n    expected_rbhconfdir=\"$2/robinhood.d\"\n    expected_binprefix=\"$3\"\n\n    echo \"./configure $config_options\"\n    ./configure $config_options | grep \"Using config dir\" || error \"configure error\"\n\n    echo \"building RPM...\"\n    make rpm 2>&1 | grep -E \"Using config dir|$expected_rbhconfdir\" || error \"make rpm error\"\n    \n    # check RPM content:\n    rpm=$(ls -tr ./rpms/RPMS/x86_64/robinhood-tmpfs* | tail -1)\n    [ -z $rpm ] && error \"no matching RPM found\"\n\n    echo \"checking RPM: $rpm\"\n\n    reloc=$(rpm -qpi $rpm | grep Relocation | awk '{print $(NF)}')\n    [ \"$reloc\" == \"$expected_binprefix\" ] || error \"wrong RPM relocation: $reloc\"\n    echo \"Relocation $reloc OK\"\n\n    sbin=$(dirname $(rpm -qpl $rpm | grep \"rbh-report\"))\n    [ \"$sbin\" == \"$expected_binprefix/sbin\" ] || error \"wrong sbin location: $sbin\"\n    echo \"sbindir $sbin OK\"\n\n    for cfg in $(rpm -qpl $rpm | grep \"robinhood.d/\"); do\n        # does it start with expected_rbhconfdir?\n        echo $cfg | egrep -E \"^$expected_rbhconfdir/\"  > /dev/null || error \"wrong config location: $cfg not in $expected_rbhconfdir\"\n        echo \"$cfg OK\"\n    done\n\n    last_build=$(ls -tr rpms/BUILD | tail -1)\n    [ -z $last_build ] && error \"no BUILD found\"\n\n    echo \"checking executables in rpms/BUILD/$last_build...\"\n    check_build_dir \"rpms/BUILD/$last_build\" \"$expected_rbhconfdir\"\n\n    echo \"building source tree...\"\n    make -j $(get_cpu) >/dev/null 2>&1 || error \"build error\"\n\n    echo \"checking executables in source tree...\"\n    check_build_dir \".\" \"$expected_rbhconfdir\"\n\n    echo \"make install...\"\n    export DESTDIR=/tmp/install.$$\n    for f in $(make install | grep '/bin/install' | awk '{print $(NF)}' | tr -d \"'\"); do\n        echo $f | egrep -E \"^$DESTDIR$expected_binprefix\" || error \"$f not in \\$DESTDIR$expected_binprefix\"\n    done\n    rm -rf $DESTDIR\n    unset DESTDIR\n\n    echo\n    return 0\n}\n\ncheck_locations \"\" \"/etc\" \"/usr\"\ncheck_locations \"--prefix=/opt/rbh\" \"/opt/rbh/etc\" \"/opt/rbh\"\ncheck_locations \"--prefix=/opt/rbh --sysconfdir=/opt/rbh/cfg\" \"/opt/rbh/cfg\" \"/opt/rbh\"\n\n\n\n\n\n\n\n\n\n\n#./configure  && make rpm | grep config && rpm -qpl /cea/home/gpocre/leibovi/robinhood.git/rpms/RPMS/x86_64/robinhood-tmpfs-2.4.3-2.lustre2.1.el6.x86_64.rpm && rpm -qpi /cea/home/gpocre/leibovi/robinhood.git/rpms/RPMS/x86_64/robinhood-tmpfs-2.4.3-2.lustre2.1.el6.x86_64.rpm\n"
  },
  {
    "path": "scripts/checkpatch.pl",
    "content": "#!/usr/bin/perl -w\n# (c) 2001, Dave Jones. (the file handling bit)\n# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)\n# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)\n# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>\n# Licensed under the terms of the GNU GPL License version 2\n\n# Based on linux/scripts/checkpatch.pl from 2.6.38 but uses Lustre\n# specific deprecated symbol/functions/include lists rather than\n# Documentation/feature-removal-schedule.txt.\n\n# TL 2014/12/18: no tabs\n\nuse strict;\n\nmy $P = $0;\n$P =~ s@.*/@@g;\n\nmy $V = '0.32';\n\nuse Getopt::Long qw(:config no_auto_abbrev);\n\nmy $quiet = 0;\nmy $tree = 0;\nmy $chk_signoff = 0;\nmy $chk_patch = 1;\nmy $tst_only;\nmy $emacs = 0;\nmy $terse = 0;\nmy $file = 0;\nmy $check = 0;\nmy $summary = 1;\nmy $mailback = 0;\nmy $summary_file = 0;\nmy $show_types = 0;\nmy $root;\nmy %debug;\nmy %ignore_type = ();\nmy @ignore = ();\nmy $help = 0;\nmy $configuration_file = \".checkpatch.conf\";\n\nsub help {\n\tmy ($exitcode) = @_;\n\n\tprint << \"EOM\";\nUsage: $P [OPTION]... [FILE]...\nVersion: $V\n\nOptions:\n  -q, --quiet                quiet\n  --no-tree                  run without a kernel tree\n  --no-signoff               do not check for 'Signed-off-by' line\n  --patch                    treat FILE as patchfile (default)\n  --emacs                    emacs compile window format\n  --terse                    one line per report\n  -f, --file                 treat FILE as regular source file\n  --subjective, --strict     enable more subjective tests\n  --ignore TYPE(,TYPE2...)   ignore various comma separated message types\n  --show-types               show the message \"types\" in the output\n  --root=PATH                PATH to the kernel tree root\n  --no-summary               suppress the per-file summary\n  --mailback                 only produce a report in case of warnings/errors\n  --summary-file             include the filename in summary\n  --debug KEY=[0|1]          turn on/off debugging of KEY, where KEY is one of\n                             'values', 'possible', 'type', and 'attr' (default\n                             is all off)\n  --test-only=WORD           report only warnings/errors containing WORD\n                             literally\n  -h, --help, --version      display this help and exit\n\nWhen FILE is - read standard input.\nEOM\n\n\texit($exitcode);\n}\n\nmy $conf = which_conf($configuration_file);\nif (-f $conf) {\n\tmy @conf_args;\n\topen(my $conffile, '<', \"$conf\")\n\t    or warn \"$P: Can't find a readable $configuration_file file $!\\n\";\n\n\twhile (<$conffile>) {\n\t\tmy $line = $_;\n\n\t\t$line =~ s/\\s*\\n?$//g;\n\t\t$line =~ s/^\\s*//g;\n\t\t$line =~ s/\\s+/ /g;\n\n\t\tnext if ($line =~ m/^\\s*#/);\n\t\tnext if ($line =~ m/^\\s*$/);\n\n\t\tmy @words = split(\" \", $line);\n\t\tforeach my $word (@words) {\n\t\t\tlast if ($word =~ m/^#/);\n\t\t\tpush (@conf_args, $word);\n\t\t}\n\t}\n\tclose($conffile);\n\tunshift(@ARGV, @conf_args) if @conf_args;\n}\n\nGetOptions(\n\t'q|quiet+'\t=> \\$quiet,\n\t'tree!'\t\t=> \\$tree,\n\t'signoff!'\t=> \\$chk_signoff,\n\t'patch!'\t=> \\$chk_patch,\n\t'emacs!'\t=> \\$emacs,\n\t'terse!'\t=> \\$terse,\n\t'f|file!'\t=> \\$file,\n\t'subjective!'\t=> \\$check,\n\t'strict!'\t=> \\$check,\n\t'ignore=s'\t=> \\@ignore,\n\t'show-types!'\t=> \\$show_types,\n\t'root=s'\t=> \\$root,\n\t'summary!'\t=> \\$summary,\n\t'mailback!'\t=> \\$mailback,\n\t'summary-file!'\t=> \\$summary_file,\n\n\t'debug=s'\t=> \\%debug,\n\t'test-only=s'\t=> \\$tst_only,\n\t'h|help'\t=> \\$help,\n\t'version'\t=> \\$help\n) or help(1);\n\nhelp(0) if ($help);\n\nmy $exit = 0;\n\nif ($#ARGV < 0) {\n\tprint \"$P: no input files\\n\";\n\texit(1);\n}\n\n@ignore = split(/,/, join(',',@ignore));\n\n# __packed if for kernel land\npush @ignore, 'PREFER_PACKED';\n# allow typedefs\npush @ignore, 'NEW_TYPEDEFS';\n\nforeach my $word (@ignore) {\n\t$word =~ s/\\s*\\n?$//g;\n\t$word =~ s/^\\s*//g;\n\t$word =~ s/\\s+/ /g;\n\t$word =~ tr/[a-z]/[A-Z]/;\n\n\tnext if ($word =~ m/^\\s*#/);\n\tnext if ($word =~ m/^\\s*$/);\n\n\t$ignore_type{$word}++;\n}\n\nmy $dbg_values = 0;\nmy $dbg_possible = 0;\nmy $dbg_type = 0;\nmy $dbg_attr = 0;\nfor my $key (keys %debug) {\n\t## no critic\n\teval \"\\${dbg_$key} = '$debug{$key}';\";\n\tdie \"$@\" if ($@);\n}\n\nmy $rpt_cleaners = 0;\n\nif ($terse) {\n\t$emacs = 1;\n\t$quiet++;\n}\n\nif ($tree) {\n\tif (defined $root) {\n\t\tif (!top_of_kernel_tree($root)) {\n\t\t\tdie \"$P: $root: --root does not point at a valid tree\\n\";\n\t\t}\n\t} else {\n\t\tif (top_of_kernel_tree('.')) {\n\t\t\t$root = '.';\n\t\t} elsif ($0 =~ m@(.*)/scripts/[^/]*$@ &&\n\t\t\t\t\t\ttop_of_kernel_tree($1)) {\n\t\t\t$root = $1;\n\t\t}\n\t}\n\n\tif (!defined $root) {\n\t\tprint \"Must be run from the top-level dir. of a kernel tree\\n\";\n\t\texit(2);\n\t}\n}\n\nmy $emitted_corrupt = 0;\n\nour $Ident\t= qr{\n\t\t\t[A-Za-z_][A-Za-z\\d_]*\n\t\t\t(?:\\s*\\#\\#\\s*[A-Za-z_][A-Za-z\\d_]*)*\n\t\t}x;\nour $Storage\t= qr{extern|static|asmlinkage};\nour $Sparse\t= qr{\n\t\t\t__user|\n\t\t\t__kernel|\n\t\t\t__force|\n\t\t\t__iomem|\n\t\t\t__must_check|\n\t\t\t__init_refok|\n\t\t\t__kprobes|\n\t\t\t__ref|\n\t\t\t__rcu\n\t\t}x;\n\n# Notes to $Attribute:\n# We need \\b after 'init' otherwise 'initconst' will cause a false positive in a check\nour $Attribute\t= qr{\n\t\t\tconst|\n\t\t\t__percpu|\n\t\t\t__nocast|\n\t\t\t__safe|\n\t\t\t__bitwise__|\n\t\t\t__packed__|\n\t\t\t__packed2__|\n\t\t\t__naked|\n\t\t\t__maybe_unused|\n\t\t\t__always_unused|\n\t\t\t__noreturn|\n\t\t\t__used|\n\t\t\t__cold|\n\t\t\t__noclone|\n\t\t\t__deprecated|\n\t\t\t__read_mostly|\n\t\t\t__kprobes|\n\t\t\t__(?:mem|cpu|dev|)(?:initdata|initconst|init\\b)|\n\t\t\t____cacheline_aligned|\n\t\t\t____cacheline_aligned_in_smp|\n\t\t\t____cacheline_internodealigned_in_smp|\n\t\t\t__weak\n\t\t  }x;\nour $Modifier;\nour $Inline\t= qr{inline|__always_inline|noinline};\nour $Member\t= qr{->$Ident|\\.$Ident|\\[[^]]*\\]};\nour $Lval\t= qr{$Ident(?:$Member)*};\n\nour $Constant\t= qr{(?i:(?:[0-9]+|0x[0-9a-f]+)[ul]*)};\nour $Assignment\t= qr{(?:\\*\\=|/=|%=|\\+=|-=|<<=|>>=|&=|\\^=|\\|=|=)};\nour $Compare    = qr{<=|>=|==|!=|<|>};\nour $Operators\t= qr{\n\t\t\t<=|>=|==|!=|\n\t\t\t=>|->|<<|>>|<|>|!|~|\n\t\t\t&&|\\|\\||,|\\^|\\+\\+|--|&|\\||\\+|-|\\*|\\/|%\n\t\t  }x;\n\nour $NonptrType;\nour $Type;\nour $Declare;\n\nour $NON_ASCII_UTF8\t= qr{\n\t[\\xC2-\\xDF][\\x80-\\xBF]               # non-overlong 2-byte\n\t|  \\xE0[\\xA0-\\xBF][\\x80-\\xBF]        # excluding overlongs\n\t| [\\xE1-\\xEC\\xEE\\xEF][\\x80-\\xBF]{2}  # straight 3-byte\n\t|  \\xED[\\x80-\\x9F][\\x80-\\xBF]        # excluding surrogates\n\t|  \\xF0[\\x90-\\xBF][\\x80-\\xBF]{2}     # planes 1-3\n\t| [\\xF1-\\xF3][\\x80-\\xBF]{3}          # planes 4-15\n\t|  \\xF4[\\x80-\\x8F][\\x80-\\xBF]{2}     # plane 16\n}x;\n\nour $UTF8\t= qr{\n\t[\\x09\\x0A\\x0D\\x20-\\x7E]              # ASCII\n\t| $NON_ASCII_UTF8\n}x;\n\nour $typeTypedefs = qr{(?x:\n\t(?:__)?(?:u|s|be|le)(?:8|16|32|64)|\n\tatomic_t\n)};\n\nour $logFunctions = qr{(?x:\n\tprintk(?:_ratelimited|_once|)|\n\t[a-z0-9]+_(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|\n\tWARN(?:_RATELIMIT|_ONCE|)|\n\tpanic|\n\tMODULE_[A-Z_]+\n)};\n\nour $signature_tags = qr{(?xi:\n\tSigned-off-by:|\n\tAcked-by:|\n\tTested-by:|\n\tReviewed-by:|\n\tReported-by:|\n\tTo:|\n\tCc:\n)};\n\nour @typeList = (\n\tqr{void},\n\tqr{(?:unsigned\\s+)?char},\n\tqr{(?:unsigned\\s+)?short},\n\tqr{(?:unsigned\\s+)?int},\n\tqr{(?:unsigned\\s+)?long},\n\tqr{(?:unsigned\\s+)?long\\s+int},\n\tqr{(?:unsigned\\s+)?long\\s+long},\n\tqr{(?:unsigned\\s+)?long\\s+long\\s+int},\n\tqr{unsigned},\n\tqr{float},\n\tqr{double},\n\tqr{bool},\n\tqr{struct\\s+$Ident},\n\tqr{union\\s+$Ident},\n\tqr{enum\\s+$Ident},\n\tqr{${Ident}_t},\n\tqr{${Ident}_handler},\n\tqr{${Ident}_handler_fn},\n    qr{G[A-Z][a-z]+}, # match glib types such as (GString, GHash, ...)\n    qr{g[a-z]+}, # match glib types such as (gchar, gpointer, ...)\n);\nour @modifierList = (\n\tqr{fastcall},\n);\n\nour $allowed_asm_includes = qr{(?x:\n\tirq|\n\tmemory\n)};\n# memory.h: ARM has a custom one\n\nsub build_types {\n\tmy $mods = \"(?x:  \\n\" . join(\"|\\n  \", @modifierList) . \"\\n)\";\n\tmy $all = \"(?x:  \\n\" . join(\"|\\n  \", @typeList) . \"\\n)\";\n\t$Modifier\t= qr{(?:$Attribute|$Sparse|$mods)};\n\t$NonptrType\t= qr{\n\t\t\t(?:$Modifier\\s+|const\\s+)*\n\t\t\t(?:\n\t\t\t\t(?:typeof|__typeof__)\\s*\\([^\\)]*\\)|\n\t\t\t\t(?:$typeTypedefs\\b)|\n\t\t\t\t(?:${all}\\b)\n\t\t\t)\n\t\t\t(?:\\s+$Modifier|\\s+const)*\n\t\t  }x;\n\t$Type\t= qr{\n\t\t\t$NonptrType\n\t\t\t(?:[\\s\\*]+\\s*const|[\\s\\*]+|(?:\\s*\\[\\s*\\])+)?\n\t\t\t(?:\\s+$Inline|\\s+$Modifier)*\n\t\t  }x;\n\t$Declare\t= qr{(?:$Storage\\s+)?$Type};\n}\nbuild_types();\n\nour $match_balanced_parentheses = qr/(\\((?:[^\\(\\)]+|(-1))*\\))/;\n\nour $Typecast\t= qr{\\s*(\\(\\s*$NonptrType\\s*\\)){0,1}\\s*};\nour $LvalOrFunc\t= qr{($Lval)\\s*($match_balanced_parentheses{0,1})\\s*};\nour $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)};\n\nsub deparenthesize {\n\tmy ($string) = @_;\n\treturn \"\" if (!defined($string));\n\t$string =~ s@^\\s*\\(\\s*@@g;\n\t$string =~ s@\\s*\\)\\s*$@@g;\n\t$string =~ s@\\s+@ @g;\n\treturn $string;\n}\n\n$chk_signoff = 0 if ($file);\n\nmy %dep_includes = (\n\t'asm/types.h',\t\t\t'linux/types.h',\n\t'asm/uaccess.h',\t\t'linux/uaccess.h',\n);\n\nmy %dep_functions = (\n\t'CFS_ATOMIC_INIT',\t\t'ATOMIC_INIT',\n\t'cfs_atomic_add',\t\t'atomic_add',\n\t'cfs_atomic_add_return',\t'atomic_add_return',\n\t'cfs_atomic_add_unless',\t'atomic_add_unless',\n\t'cfs_atomic_cmpxchg',\t\t'atomic_cmpxchg',\n\t'cfs_atomic_dec',\t\t'atomic_dec',\n\t'cfs_atomic_dec_and_lock',\t'atomic_dec_and_lock',\n\t'cfs_atomic_dec_and_test',\t'atomic_dec_and_test',\n\t'cfs_atomic_dec_return',\t'atomic_dec_return',\n\t'cfs_atomic_inc',\t\t'atomic_inc',\n\t'cfs_atomic_inc_and_test',\t'atomic_inc_and_test',\n\t'cfs_atomic_inc_not_zero',\t'atomic_inc_not_zero',\n\t'cfs_atomic_inc_return',\t'atomic_inc_return',\n\t'cfs_atomic_read',\t\t'atomic_read',\n\t'cfs_atomic_set',\t\t'atomic_set',\n\t'cfs_atomic_sub',\t\t'atomic_sub',\n\t'cfs_atomic_sub_and_test',\t'atomic_sub_and_test',\n\t'cfs_atomic_sub_return',\t'atomic_sub_return',\n\t'cfs_atomic_t',\t\t\t'atomic_t',\n\n\t'CFS_HLIST_HEAD',\t\t'HLIST_HEAD',\n\t'CFS_HLIST_HEAD_INIT',\t\t'HLIST_HEAD_INIT',\n\t'CFS_INIT_HLIST_HEAD',\t\t'INIT_HLIST_HEAD',\n\t'CFS_INIT_HLIST_NODE',\t\t'INIT_HLIST_NODE',\n\t'cfs_hlist_add_after',\t\t'hlist_add_after',\n\t'cfs_hlist_add_before',\t\t'hlist_add_before',\n\t'cfs_hlist_add_head',\t\t'hlist_add_head',\n\t'cfs_hlist_del',\t\t'hlist_del',\n\t'cfs_hlist_del_init',\t\t'hlist_del_init',\n\t'cfs_hlist_empty',\t\t'hlist_empty',\n\t'cfs_hlist_entry',\t\t'hlist_entry',\n\t'cfs_hlist_for_each',\t\t'hlist_for_each',\n\t'cfs_hlist_for_each_entry',\t'hlist_for_each_entry',\n\t'cfs_hlist_for_each_entry_continue',\n\t\t\t\t\t'hlist_for_each_entry_continue',\n\t'cfs_hlist_for_each_entry_from',\n\t\t\t\t\t'hlist_for_each_entry_from',\n\t'cfs_hlist_for_each_entry_safe',\n\t\t\t\t\t'hlist_for_each_entry_safe',\n\t'cfs_hlist_for_each_safe',\t'hlist_for_each_safe',\n\t'cfs_hlist_head_t',\t\t'struct hlist_head',\n\t'cfs_hlist_node_t',\t\t'struct hlist_node',\n\t'cfs_hlist_unhashed',\t\t'hlist_unhashed',\n\n\t'cfs_inode_t',\t\t\t'struct inode',\n\n\t'CFS_INIT_LIST_HEAD',\t\t'INIT_LIST_HEAD',\n\t'CFS_LIST_HEAD',\t\t'LIST_HEAD',\n\t'CFS_LIST_HEAD_INIT',\t\t'LIST_HEAD_INIT',\n\t'cfs_list_add',\t\t\t'list_add',\n\t'cfs_list_add_tail',\t\t'list_add_tail',\n\t'cfs_list_del',\t\t\t'list_del',\n\t'cfs_list_del_init',\t\t'list_del_init',\n\t'cfs_list_empty',\t\t'list_empty',\n\t'cfs_list_empty_careful',\t'list_empty_careful',\n\t'cfs_list_entry',\t\t'list_entry',\n\t'cfs_list_for_each',\t\t'list_for_each',\n\t'cfs_list_for_each_entry',\t'list_for_each_entry',\n\t'cfs_list_for_each_entry_continue',\n\t\t\t\t\t'list_for_each_entry_continue',\n\t'cfs_list_for_each_entry_reverse',\n\t\t\t\t\t'list_for_each_entry_reverse',\n\t'cfs_list_for_each_entry_safe',\n\t\t\t\t\t'list_for_each_entry_safe',\n\t'cfs_list_for_each_entry_safe_from',\n\t\t\t\t\t'list_for_each_entry_safe_from',\n\t'cfs_list_for_each_entry_safe_reverse',\n\t\t\t\t\t'list_for_each_entry_safe_reverse',\n\t'cfs_list_for_each_entry_safe_typed',\n\t\t\t\t\t'list_for_each_entry_safe_typed',\n\t'cfs_list_for_each_entry_typed',\n\t\t\t\t\t'list_for_each_entry_typed',\n\t'cfs_list_for_each_prev',\t'list_for_each_prev',\n\t'cfs_list_for_each_safe',\t'list_for_each_safe',\n\t'cfs_list_move',\t\t'list_move',\n\t'cfs_list_move_tail',\t\t'list_move_tail',\n\t'cfs_list_splice',\t\t'list_splice',\n\t'cfs_list_splice_init',\t\t'list_splice_init',\n\t'cfs_list_splice_tail',\t\t'list_splice_tail',\n\t'cfs_list_t',\t\t\t'struct list_head',\n\n\t'CFS_PAGE_MASK',\t\t'PAGE_CACHE_MASK or PAGE_MASK',\n\t'CFS_PAGE_SIZE',\t\t'PAGE_CACHE_SIZE or PAGE_SIZE',\n\n\t'cfs_proc_dir_entry_t',\t\t'struct proc_dir_entry',\n\n\t'cfs_rcu_head_t',\t\t'struct rcu_head',\n\n\t'alloca',\t\t\t'malloc',\n\t'mktemp',\t\t\t'mkstemp',\n\t'sprintf',\t\t\t'snprintf',\n\t'strcpy',\t\t\t'strncpy',\n\t'strcat',\t\t\t'strncat',\n\t'tempnam',\t\t\t'mkstemp',\n);\n\nmy @rawlines = ();\nmy @lines = ();\nmy $vname;\nfor my $filename (@ARGV) {\n\tmy $FILE;\n\tif ($file) {\n\t\topen($FILE, '-|', \"diff -u /dev/null $filename\") ||\n\t\t\tdie \"$P: $filename: diff failed - $!\\n\";\n\t} elsif ($filename eq '-') {\n\t\topen($FILE, '<&STDIN');\n\t} else {\n\t\topen($FILE, '<', \"$filename\") ||\n\t\t\tdie \"$P: $filename: open failed - $!\\n\";\n\t}\n\tif ($filename eq '-') {\n\t\t$vname = 'Your patch';\n\t} else {\n\t\t$vname = $filename;\n\t}\n\twhile (<$FILE>) {\n\t\tchomp;\n\t\tpush(@rawlines, $_);\n\t}\n\tclose($FILE);\n\tif (!process($filename)) {\n\t\t$exit = 1;\n\t}\n\t@rawlines = ();\n\t@lines = ();\n}\n\nexit($exit);\n\nsub top_of_kernel_tree {\n\tmy ($root) = @_;\n\n\tmy @tree_check = (\n\t\t\"COPYING\", \"CREDITS\", \"Kbuild\", \"MAINTAINERS\", \"Makefile\",\n\t\t\"README\", \"Documentation\", \"arch\", \"include\", \"drivers\",\n\t\t\"fs\", \"init\", \"ipc\", \"kernel\", \"lib\", \"scripts\",\n\t);\n\n\tforeach my $check (@tree_check) {\n\t\tif (! -e $root . '/' . $check) {\n\t\t\treturn 0;\n\t\t}\n\t}\n\treturn 1;\n    }\n\nsub parse_email {\n\tmy ($formatted_email) = @_;\n\n\tmy $name = \"\";\n\tmy $address = \"\";\n\tmy $comment = \"\";\n\n\tif ($formatted_email =~ /^(.*)<(\\S+\\@\\S+)>(.*)$/) {\n\t\t$name = $1;\n\t\t$address = $2;\n\t\t$comment = $3 if defined $3;\n\t} elsif ($formatted_email =~ /^\\s*<(\\S+\\@\\S+)>(.*)$/) {\n\t\t$address = $1;\n\t\t$comment = $2 if defined $2;\n\t} elsif ($formatted_email =~ /(\\S+\\@\\S+)(.*)$/) {\n\t\t$address = $1;\n\t\t$comment = $2 if defined $2;\n\t\t$formatted_email =~ s/$address.*$//;\n\t\t$name = $formatted_email;\n\t\t$name =~ s/^\\s+|\\s+$//g;\n\t\t$name =~ s/^\\\"|\\\"$//g;\n\t\t# If there's a name left after stripping spaces and\n\t\t# leading quotes, and the address doesn't have both\n\t\t# leading and trailing angle brackets, the address\n\t\t# is invalid. ie:\n\t\t#   \"joe smith joe@smith.com\" bad\n\t\t#   \"joe smith <joe@smith.com\" bad\n\t\tif ($name ne \"\" && $address !~ /^<[^>]+>$/) {\n\t\t\t$name = \"\";\n\t\t\t$address = \"\";\n\t\t\t$comment = \"\";\n\t\t}\n\t}\n\n\t$name =~ s/^\\s+|\\s+$//g;\n\t$name =~ s/^\\\"|\\\"$//g;\n\t$address =~ s/^\\s+|\\s+$//g;\n\t$address =~ s/^\\<|\\>$//g;\n\n\tif ($name =~ /[^\\w \\-]/i) { ##has \"must quote\" chars\n\t\t$name =~ s/(?<!\\\\)\"/\\\\\"/g; ##escape quotes\n\t\t$name = \"\\\"$name\\\"\";\n\t}\n\n\treturn ($name, $address, $comment);\n}\n\nsub format_email {\n\tmy ($name, $address) = @_;\n\n\tmy $formatted_email;\n\n\t$name =~ s/^\\s+|\\s+$//g;\n\t$name =~ s/^\\\"|\\\"$//g;\n\t$address =~ s/^\\s+|\\s+$//g;\n\n\tif ($name =~ /[^\\w \\-]/i) { ##has \"must quote\" chars\n\t\t$name =~ s/(?<!\\\\)\"/\\\\\"/g; ##escape quotes\n\t\t$name = \"\\\"$name\\\"\";\n\t}\n\n\tif (\"$name\" eq \"\") {\n\t\t$formatted_email = \"$address\";\n\t} else {\n\t\t$formatted_email = \"$name <$address>\";\n\t}\n\n\treturn $formatted_email;\n}\n\nsub which_conf {\n\tmy ($conf) = @_;\n\n\tforeach my $path (split(/:/, \".:$ENV{HOME}:.scripts\")) {\n\t\tif (-e \"$path/$conf\") {\n\t\t\treturn \"$path/$conf\";\n\t\t}\n\t}\n\n\treturn \"\";\n}\n\nsub expand_tabs {\n\tmy ($str) = @_;\n\n\tmy $res = '';\n\tmy $n = 0;\n\tfor my $c (split(//, $str)) {\n\t\tif ($c eq \"\\t\") {\n\t\t\t$res .= ' ';\n\t\t\t$n++;\n\t\t\tfor (; ($n % 8) != 0; $n++) {\n\t\t\t\t$res .= ' ';\n\t\t\t}\n\t\t\tnext;\n\t\t}\n\t\t$res .= $c;\n\t\t$n++;\n\t}\n\n\treturn $res;\n}\nsub copy_spacing {\n\t(my $res = shift) =~ tr/\\t/ /c;\n\treturn $res;\n}\n\nsub line_stats {\n\tmy ($line) = @_;\n\n\t# Drop the diff line leader and expand tabs\n\t$line =~ s/^.//;\n\t$line = expand_tabs($line);\n\n\t# Pick the indent from the front of the line.\n\tmy ($white) = ($line =~ /^(\\s*)/);\n\n\treturn (length($line), length($white));\n}\n\nmy $sanitise_quote = '';\n\nsub sanitise_line_reset {\n\tmy ($in_comment) = @_;\n\n\tif ($in_comment) {\n\t\t$sanitise_quote = '*/';\n\t} else {\n\t\t$sanitise_quote = '';\n\t}\n}\nsub sanitise_line {\n\tmy ($line) = @_;\n\n\tmy $res = '';\n\tmy $l = '';\n\n\tmy $qlen = 0;\n\tmy $off = 0;\n\tmy $c;\n\n\t# Always copy over the diff marker.\n\t$res = substr($line, 0, 1);\n\n\tfor ($off = 1; $off < length($line); $off++) {\n\t\t$c = substr($line, $off, 1);\n\n\t\t# Comments we are wacking completly including the begin\n\t\t# and end, all to $;.\n\t\tif ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {\n\t\t\t$sanitise_quote = '*/';\n\n\t\t\tsubstr($res, $off, 2, \"$;$;\");\n\t\t\t$off++;\n\t\t\tnext;\n\t\t}\n\t\tif ($sanitise_quote eq '*/' && substr($line, $off, 2) eq '*/') {\n\t\t\t$sanitise_quote = '';\n\t\t\tsubstr($res, $off, 2, \"$;$;\");\n\t\t\t$off++;\n\t\t\tnext;\n\t\t}\n\t\tif ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {\n\t\t\t$sanitise_quote = '//';\n\n\t\t\tsubstr($res, $off, 2, $sanitise_quote);\n\t\t\t$off++;\n\t\t\tnext;\n\t\t}\n\n\t\t# A \\ in a string means ignore the next character.\n\t\tif (($sanitise_quote eq \"'\" || $sanitise_quote eq '\"') &&\n\t\t    $c eq \"\\\\\") {\n\t\t\tsubstr($res, $off, 2, 'XX');\n\t\t\t$off++;\n\t\t\tnext;\n\t\t}\n\t\t# Regular quotes.\n\t\tif ($c eq \"'\" || $c eq '\"') {\n\t\t\tif ($sanitise_quote eq '') {\n\t\t\t\t$sanitise_quote = $c;\n\n\t\t\t\tsubstr($res, $off, 1, $c);\n\t\t\t\tnext;\n\t\t\t} elsif ($sanitise_quote eq $c) {\n\t\t\t\t$sanitise_quote = '';\n\t\t\t}\n\t\t}\n\n\t\t#print \"c<$c> SQ<$sanitise_quote>\\n\";\n\t\tif ($off != 0 && $sanitise_quote eq '*/' && $c ne \"\\t\") {\n\t\t\tsubstr($res, $off, 1, $;);\n\t\t} elsif ($off != 0 && $sanitise_quote eq '//' && $c ne \"\\t\") {\n\t\t\tsubstr($res, $off, 1, $;);\n\t\t} elsif ($off != 0 && $sanitise_quote && $c ne \"\\t\") {\n\t\t\tsubstr($res, $off, 1, 'X');\n\t\t} else {\n\t\t\tsubstr($res, $off, 1, $c);\n\t\t}\n\t}\n\n\tif ($sanitise_quote eq '//') {\n\t\t$sanitise_quote = '';\n\t}\n\n\t# The pathname on a #include may be surrounded by '<' and '>'.\n\tif ($res =~ /^.\\s*\\#\\s*include\\s+\\<(.*)\\>/) {\n\t\tmy $clean = 'X' x length($1);\n\t\t$res =~ s@\\<.*\\>@<$clean>@;\n\n\t# The whole of a #error is a string.\n\t} elsif ($res =~ /^.\\s*\\#\\s*(?:error|warning)\\s+(.*)\\b/) {\n\t\tmy $clean = 'X' x length($1);\n\t\t$res =~ s@(\\#\\s*(?:error|warning)\\s+).*@$1$clean@;\n\t}\n\n\treturn $res;\n}\n\nsub ctx_statement_block {\n\tmy ($linenr, $remain, $off) = @_;\n\tmy $line = $linenr - 1;\n\tmy $blk = '';\n\tmy $soff = $off;\n\tmy $coff = $off - 1;\n\tmy $coff_set = 0;\n\n\tmy $loff = 0;\n\n\tmy $type = '';\n\tmy $level = 0;\n\tmy @stack = ();\n\tmy $p;\n\tmy $c;\n\tmy $len = 0;\n\n\tmy $remainder;\n\twhile (1) {\n\t\t@stack = (['', 0]) if ($#stack == -1);\n\n\t\t#warn \"CSB: blk<$blk> remain<$remain>\\n\";\n\t\t# If we are about to drop off the end, pull in more\n\t\t# context.\n\t\tif ($off >= $len) {\n\t\t\tfor (; $remain > 0; $line++) {\n\t\t\t\tlast if (!defined $lines[$line]);\n\t\t\t\tnext if ($lines[$line] =~ /^-/);\n\t\t\t\t$remain--;\n\t\t\t\t$loff = $len;\n\t\t\t\t$blk .= $lines[$line] . \"\\n\";\n\t\t\t\t$len = length($blk);\n\t\t\t\t$line++;\n\t\t\t\tlast;\n\t\t\t}\n\t\t\t# Bail if there is no further context.\n\t\t\t#warn \"CSB: blk<$blk> off<$off> len<$len>\\n\";\n\t\t\tif ($off >= $len) {\n\t\t\t\tlast;\n\t\t\t}\n\t\t\tif ($level == 0 && substr($blk, $off) =~ /^.\\s*#\\s*define/) {\n\t\t\t\t$level++;\n\t\t\t\t$type = '#';\n\t\t\t}\n\t\t}\n\t\t$p = $c;\n\t\t$c = substr($blk, $off, 1);\n\t\t$remainder = substr($blk, $off);\n\n\t\t#warn \"CSB: c<$c> type<$type> level<$level> remainder<$remainder> coff_set<$coff_set>\\n\";\n\n\t\t# Handle nested #if/#else.\n\t\tif ($remainder =~ /^#\\s*(?:ifndef|ifdef|if)\\s/) {\n\t\t\tpush(@stack, [ $type, $level ]);\n\t\t} elsif ($remainder =~ /^#\\s*(?:else|elif)\\b/) {\n\t\t\t($type, $level) = @{$stack[$#stack - 1]};\n\t\t} elsif ($remainder =~ /^#\\s*endif\\b/) {\n\t\t\t($type, $level) = @{pop(@stack)};\n\t\t}\n\n\t\t# Statement ends at the ';' or a close '}' at the\n\t\t# outermost level.\n\t\tif ($level == 0 && $c eq ';') {\n\t\t\tlast;\n\t\t}\n\n\t\t# An else is really a conditional as long as its not else if\n\t\tif ($level == 0 && $coff_set == 0 &&\n\t\t\t\t(!defined($p) || $p =~ /(?:\\s|\\}|\\+)/) &&\n\t\t\t\t$remainder =~ /^(else)(?:\\s|{)/ &&\n\t\t\t\t$remainder !~ /^else\\s+if\\b/) {\n\t\t\t$coff = $off + length($1) - 1;\n\t\t\t$coff_set = 1;\n\t\t\t#warn \"CSB: mark coff<$coff> soff<$soff> 1<$1>\\n\";\n\t\t\t#warn \"[\" . substr($blk, $soff, $coff - $soff + 1) . \"]\\n\";\n\t\t}\n\n\t\tif (($type eq '' || $type eq '(') && $c eq '(') {\n\t\t\t$level++;\n\t\t\t$type = '(';\n\t\t}\n\t\tif ($type eq '(' && $c eq ')') {\n\t\t\t$level--;\n\t\t\t$type = ($level != 0)? '(' : '';\n\n\t\t\tif ($level == 0 && $coff < $soff) {\n\t\t\t\t$coff = $off;\n\t\t\t\t$coff_set = 1;\n\t\t\t\t#warn \"CSB: mark coff<$coff>\\n\";\n\t\t\t}\n\t\t}\n\t\tif (($type eq '' || $type eq '{') && $c eq '{') {\n\t\t\t$level++;\n\t\t\t$type = '{';\n\t\t}\n\t\tif ($type eq '{' && $c eq '}') {\n\t\t\t$level--;\n\t\t\t$type = ($level != 0)? '{' : '';\n\n\t\t\tif ($level == 0) {\n\t\t\t\tif (substr($blk, $off + 1, 1) eq ';') {\n\t\t\t\t\t$off++;\n\t\t\t\t}\n\t\t\t\tlast;\n\t\t\t}\n\t\t}\n\t\t# Preprocessor commands end at the newline unless escaped.\n\t\tif ($type eq '#' && $c eq \"\\n\" && $p ne \"\\\\\") {\n\t\t\t$level--;\n\t\t\t$type = '';\n\t\t\t$off++;\n\t\t\tlast;\n\t\t}\n\t\t$off++;\n\t}\n\t# We are truly at the end, so shuffle to the next line.\n\tif ($off == $len) {\n\t\t$loff = $len + 1;\n\t\t$line++;\n\t\t$remain--;\n\t}\n\n\tmy $statement = substr($blk, $soff, $off - $soff + 1);\n\tmy $condition = substr($blk, $soff, $coff - $soff + 1);\n\n\t#warn \"STATEMENT<$statement>\\n\";\n\t#warn \"CONDITION<$condition>\\n\";\n\n\t#print \"coff<$coff> soff<$off> loff<$loff>\\n\";\n\n\treturn ($statement, $condition,\n\t\t\t$line, $remain + 1, $off - $loff + 1, $level);\n}\n\nsub statement_lines {\n\tmy ($stmt) = @_;\n\n\t# Strip the diff line prefixes and rip blank lines at start and end.\n\t$stmt =~ s/(^|\\n)./$1/g;\n\t$stmt =~ s/^\\s*//;\n\t$stmt =~ s/\\s*$//;\n\n\tmy @stmt_lines = ($stmt =~ /\\n/g);\n\n\treturn $#stmt_lines + 2;\n}\n\nsub statement_rawlines {\n\tmy ($stmt) = @_;\n\n\tmy @stmt_lines = ($stmt =~ /\\n/g);\n\n\treturn $#stmt_lines + 2;\n}\n\nsub statement_block_size {\n\tmy ($stmt) = @_;\n\n\t$stmt =~ s/(^|\\n)./$1/g;\n\t$stmt =~ s/^\\s*{//;\n\t$stmt =~ s/}\\s*$//;\n\t$stmt =~ s/^\\s*//;\n\t$stmt =~ s/\\s*$//;\n\n\tmy @stmt_lines = ($stmt =~ /\\n/g);\n\tmy @stmt_statements = ($stmt =~ /;/g);\n\n\tmy $stmt_lines = $#stmt_lines + 2;\n\tmy $stmt_statements = $#stmt_statements + 1;\n\n\tif ($stmt_lines > $stmt_statements) {\n\t\treturn $stmt_lines;\n\t} else {\n\t\treturn $stmt_statements;\n\t}\n}\n\nsub ctx_statement_full {\n\tmy ($linenr, $remain, $off) = @_;\n\tmy ($statement, $condition, $level);\n\n\tmy (@chunks);\n\n\t# Grab the first conditional/block pair.\n\t($statement, $condition, $linenr, $remain, $off, $level) =\n\t\t\t\tctx_statement_block($linenr, $remain, $off);\n\t#print \"F: c<$condition> s<$statement> remain<$remain>\\n\";\n\tpush(@chunks, [ $condition, $statement ]);\n\tif (!($remain > 0 && $condition =~ /^\\s*(?:\\n[+-])?\\s*(?:if|else|do)\\b/s)) {\n\t\treturn ($level, $linenr, @chunks);\n\t}\n\n\t# Pull in the following conditional/block pairs and see if they\n\t# could continue the statement.\n\tfor (;;) {\n\t\t($statement, $condition, $linenr, $remain, $off, $level) =\n\t\t\t\tctx_statement_block($linenr, $remain, $off);\n\t\t#print \"C: c<$condition> s<$statement> remain<$remain>\\n\";\n\t\tlast if (!($remain > 0 && $condition =~ /^(?:\\s*\\n[+-])*\\s*(?:else|do)\\b/s));\n\t\t#print \"C: push\\n\";\n\t\tpush(@chunks, [ $condition, $statement ]);\n\t}\n\n\treturn ($level, $linenr, @chunks);\n}\n\nsub ctx_block_get {\n\tmy ($linenr, $remain, $outer, $open, $close, $off) = @_;\n\tmy $line;\n\tmy $start = $linenr - 1;\n\tmy $blk = '';\n\tmy @o;\n\tmy @c;\n\tmy @res = ();\n\n\tmy $level = 0;\n\tmy @stack = ($level);\n\tfor ($line = $start; $remain > 0; $line++) {\n\t\tnext if ($rawlines[$line] =~ /^-/);\n\t\t$remain--;\n\n\t\t$blk .= $rawlines[$line];\n\n\t\t# Handle nested #if/#else.\n\t\tif ($lines[$line] =~ /^.\\s*#\\s*(?:ifndef|ifdef|if)\\s/) {\n\t\t\tpush(@stack, $level);\n\t\t} elsif ($lines[$line] =~ /^.\\s*#\\s*(?:else|elif)\\b/) {\n\t\t\t$level = $stack[$#stack - 1];\n\t\t} elsif ($lines[$line] =~ /^.\\s*#\\s*endif\\b/) {\n\t\t\t$level = pop(@stack);\n\t\t}\n\n\t\tforeach my $c (split(//, $lines[$line])) {\n\t\t\t##print \"C<$c>L<$level><$open$close>O<$off>\\n\";\n\t\t\tif ($off > 0) {\n\t\t\t\t$off--;\n\t\t\t\tnext;\n\t\t\t}\n\n\t\t\tif ($c eq $close && $level > 0) {\n\t\t\t\t$level--;\n\t\t\t\tlast if ($level == 0);\n\t\t\t} elsif ($c eq $open) {\n\t\t\t\t$level++;\n\t\t\t}\n\t\t}\n\n\t\tif (!$outer || $level <= 1) {\n\t\t\tpush(@res, $rawlines[$line]);\n\t\t}\n\n\t\tlast if ($level == 0);\n\t}\n\n\treturn ($level, @res);\n}\nsub ctx_block_outer {\n\tmy ($linenr, $remain) = @_;\n\n\tmy ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);\n\treturn @r;\n}\nsub ctx_block {\n\tmy ($linenr, $remain) = @_;\n\n\tmy ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);\n\treturn @r;\n}\nsub ctx_statement {\n\tmy ($linenr, $remain, $off) = @_;\n\n\tmy ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);\n\treturn @r;\n}\nsub ctx_block_level {\n\tmy ($linenr, $remain) = @_;\n\n\treturn ctx_block_get($linenr, $remain, 0, '{', '}', 0);\n}\nsub ctx_statement_level {\n\tmy ($linenr, $remain, $off) = @_;\n\n\treturn ctx_block_get($linenr, $remain, 0, '(', ')', $off);\n}\n\nsub ctx_locate_comment {\n\tmy ($first_line, $end_line) = @_;\n\n\t# Catch a comment on the end of the line itself.\n\tmy ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\\*.*\\*/)\\s*(?:\\\\\\s*)?$@);\n\treturn $current_comment if (defined $current_comment);\n\n\t# Look through the context and try and figure out if there is a\n\t# comment.\n\tmy $in_comment = 0;\n\t$current_comment = '';\n\tfor (my $linenr = $first_line; $linenr < $end_line; $linenr++) {\n\t\tmy $line = $rawlines[$linenr - 1];\n\t\t#warn \"           $line\\n\";\n\t\tif ($linenr == $first_line and $line =~ m@^.\\s*\\*@) {\n\t\t\t$in_comment = 1;\n\t\t}\n\t\tif ($line =~ m@/\\*@) {\n\t\t\t$in_comment = 1;\n\t\t}\n\t\tif (!$in_comment && $current_comment ne '') {\n\t\t\t$current_comment = '';\n\t\t}\n\t\t$current_comment .= $line . \"\\n\" if ($in_comment);\n\t\tif ($line =~ m@\\*/@) {\n\t\t\t$in_comment = 0;\n\t\t}\n\t}\n\n\tchomp($current_comment);\n\treturn($current_comment);\n}\nsub ctx_has_comment {\n\tmy ($first_line, $end_line) = @_;\n\tmy $cmt = ctx_locate_comment($first_line, $end_line);\n\n\t##print \"LINE: $rawlines[$end_line - 1 ]\\n\";\n\t##print \"CMMT: $cmt\\n\";\n\n\treturn ($cmt ne '');\n}\n\nsub raw_line {\n\tmy ($linenr, $cnt) = @_;\n\n\tmy $offset = $linenr - 1;\n\t$cnt++;\n\n\tmy $line;\n\twhile ($cnt) {\n\t\t$line = $rawlines[$offset++];\n\t\tnext if (defined($line) && $line =~ /^-/);\n\t\t$cnt--;\n\t}\n\n\treturn $line;\n}\n\nsub cat_vet {\n\tmy ($vet) = @_;\n\tmy ($res, $coded);\n\n\t$res = '';\n\twhile ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {\n\t\t$res .= $1;\n\t\tif ($2 ne '') {\n\t\t\t$coded = sprintf(\"^%c\", unpack('C', $2) + 64);\n\t\t\t$res .= $coded;\n\t\t}\n\t}\n\t$res =~ s/$/\\$/;\n\n\treturn $res;\n}\n\nmy $av_preprocessor = 0;\nmy $av_pending;\nmy @av_paren_type;\nmy $av_pend_colon;\n\nsub annotate_reset {\n\t$av_preprocessor = 0;\n\t$av_pending = '_';\n\t@av_paren_type = ('E');\n\t$av_pend_colon = 'O';\n}\n\nsub annotate_values {\n\tmy ($stream, $type) = @_;\n\n\tmy $res;\n\tmy $var = '_' x length($stream);\n\tmy $cur = $stream;\n\n\tprint \"$stream\\n\" if ($dbg_values > 1);\n\n\twhile (length($cur)) {\n\t\t@av_paren_type = ('E') if ($#av_paren_type < 0);\n\t\tprint \" <\" . join('', @av_paren_type) .\n\t\t\t\t\"> <$type> <$av_pending>\" if ($dbg_values > 1);\n\t\tif ($cur =~ /^(\\s+)/o) {\n\t\t\tprint \"WS($1)\\n\" if ($dbg_values > 1);\n\t\t\tif ($1 =~ /\\n/ && $av_preprocessor) {\n\t\t\t\t$type = pop(@av_paren_type);\n\t\t\t\t$av_preprocessor = 0;\n\t\t\t}\n\n\t\t} elsif ($cur =~ /^(\\(\\s*$Type\\s*)\\)/ && $av_pending eq '_') {\n\t\t\tprint \"CAST($1)\\n\" if ($dbg_values > 1);\n\t\t\tpush(@av_paren_type, $type);\n\t\t\t$type = 'c';\n\n\t\t} elsif ($cur =~ /^($Type)\\s*(?:$Ident|,|\\)|\\(|\\s*$)/) {\n\t\t\tprint \"DECLARE($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'T';\n\n\t\t} elsif ($cur =~ /^($Modifier)\\s*/) {\n\t\t\tprint \"MODIFIER($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'T';\n\n\t\t} elsif ($cur =~ /^(\\#\\s*define\\s*$Ident)(\\(?)/o) {\n\t\t\tprint \"DEFINE($1,$2)\\n\" if ($dbg_values > 1);\n\t\t\t$av_preprocessor = 1;\n\t\t\tpush(@av_paren_type, $type);\n\t\t\tif ($2 ne '') {\n\t\t\t\t$av_pending = 'N';\n\t\t\t}\n\t\t\t$type = 'E';\n\n\t\t} elsif ($cur =~ /^(\\#\\s*(?:undef\\s*$Ident|include\\b))/o) {\n\t\t\tprint \"UNDEF($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_preprocessor = 1;\n\t\t\tpush(@av_paren_type, $type);\n\n\t\t} elsif ($cur =~ /^(\\#\\s*(?:ifdef|ifndef|if))/o) {\n\t\t\tprint \"PRE_START($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_preprocessor = 1;\n\n\t\t\tpush(@av_paren_type, $type);\n\t\t\tpush(@av_paren_type, $type);\n\t\t\t$type = 'E';\n\n\t\t} elsif ($cur =~ /^(\\#\\s*(?:else|elif))/o) {\n\t\t\tprint \"PRE_RESTART($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_preprocessor = 1;\n\n\t\t\tpush(@av_paren_type, $av_paren_type[$#av_paren_type]);\n\n\t\t\t$type = 'E';\n\n\t\t} elsif ($cur =~ /^(\\#\\s*(?:endif))/o) {\n\t\t\tprint \"PRE_END($1)\\n\" if ($dbg_values > 1);\n\n\t\t\t$av_preprocessor = 1;\n\n\t\t\t# Assume all arms of the conditional end as this\n\t\t\t# one does, and continue as if the #endif was not here.\n\t\t\tpop(@av_paren_type);\n\t\t\tpush(@av_paren_type, $type);\n\t\t\t$type = 'E';\n\n\t\t} elsif ($cur =~ /^(\\\\\\n)/o) {\n\t\t\tprint \"PRECONT($1)\\n\" if ($dbg_values > 1);\n\n\t\t} elsif ($cur =~ /^(__attribute__)\\s*\\(?/o) {\n\t\t\tprint \"ATTR($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_pending = $type;\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(sizeof)\\s*(\\()?/o) {\n\t\t\tprint \"SIZEOF($1)\\n\" if ($dbg_values > 1);\n\t\t\tif (defined $2) {\n\t\t\t\t$av_pending = 'V';\n\t\t\t}\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(if|while|for)\\b/o) {\n\t\t\tprint \"COND($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_pending = 'E';\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~/^(case)/o) {\n\t\t\tprint \"CASE($1)\\n\" if ($dbg_values > 1);\n\t\t\t$av_pend_colon = 'C';\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\\b/o) {\n\t\t\tprint \"KEYWORD($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(\\()/o) {\n\t\t\tprint \"PAREN('$1')\\n\" if ($dbg_values > 1);\n\t\t\tpush(@av_paren_type, $av_pending);\n\t\t\t$av_pending = '_';\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(\\))/o) {\n\t\t\tmy $new_type = pop(@av_paren_type);\n\t\t\tif ($new_type ne '_') {\n\t\t\t\t$type = $new_type;\n\t\t\t\tprint \"PAREN('$1') -> $type\\n\"\n\t\t\t\t\t\t\tif ($dbg_values > 1);\n\t\t\t} else {\n\t\t\t\tprint \"PAREN('$1')\\n\" if ($dbg_values > 1);\n\t\t\t}\n\n\t\t} elsif ($cur =~ /^($Ident)\\s*\\(/o) {\n\t\t\tprint \"FUNC($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'V';\n\t\t\t$av_pending = 'V';\n\n\t\t} elsif ($cur =~ /^($Ident\\s*):(?:\\s*\\d+\\s*(,|=|;))?/) {\n\t\t\tif (defined $2 && $type eq 'C' || $type eq 'T') {\n\t\t\t\t$av_pend_colon = 'B';\n\t\t\t} elsif ($type eq 'E') {\n\t\t\t\t$av_pend_colon = 'L';\n\t\t\t}\n\t\t\tprint \"IDENT_COLON($1,$type>$av_pend_colon)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'V';\n\n\t\t} elsif ($cur =~ /^($Ident|$Constant)/o) {\n\t\t\tprint \"IDENT($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'V';\n\n\t\t} elsif ($cur =~ /^($Assignment)/o) {\n\t\t\tprint \"ASSIGN($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~/^(;|{|})/) {\n\t\t\tprint \"END($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'E';\n\t\t\t$av_pend_colon = 'O';\n\n\t\t} elsif ($cur =~/^(,)/) {\n\t\t\tprint \"COMMA($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'C';\n\n\t\t} elsif ($cur =~ /^(\\?)/o) {\n\t\t\tprint \"QUESTION($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(:)/o) {\n\t\t\tprint \"COLON($1,$av_pend_colon)\\n\" if ($dbg_values > 1);\n\n\t\t\tsubstr($var, length($res), 1, $av_pend_colon);\n\t\t\tif ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {\n\t\t\t\t$type = 'E';\n\t\t\t} else {\n\t\t\t\t$type = 'N';\n\t\t\t}\n\t\t\t$av_pend_colon = 'O';\n\n\t\t} elsif ($cur =~ /^(\\[)/o) {\n\t\t\tprint \"CLOSE($1)\\n\" if ($dbg_values > 1);\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^(-(?![->])|\\+(?!\\+)|\\*|\\&\\&|\\&)/o) {\n\t\t\tmy $variant;\n\n\t\t\tprint \"OPV($1)\\n\" if ($dbg_values > 1);\n\t\t\tif ($type eq 'V') {\n\t\t\t\t$variant = 'B';\n\t\t\t} else {\n\t\t\t\t$variant = 'U';\n\t\t\t}\n\n\t\t\tsubstr($var, length($res), 1, $variant);\n\t\t\t$type = 'N';\n\n\t\t} elsif ($cur =~ /^($Operators)/o) {\n\t\t\tprint \"OP($1)\\n\" if ($dbg_values > 1);\n\t\t\tif ($1 ne '++' && $1 ne '--') {\n\t\t\t\t$type = 'N';\n\t\t\t}\n\n\t\t} elsif ($cur =~ /(^.)/o) {\n\t\t\tprint \"C($1)\\n\" if ($dbg_values > 1);\n\t\t}\n\t\tif (defined $1) {\n\t\t\t$cur = substr($cur, length($1));\n\t\t\t$res .= $type x length($1);\n\t\t}\n\t}\n\n\treturn ($res, $var);\n}\n\nsub possible {\n\tmy ($possible, $line) = @_;\n\tmy $notPermitted = qr{(?:\n\t\t^(?:\n\t\t\t$Modifier|\n\t\t\t$Storage|\n\t\t\t$Type|\n\t\t\tDEFINE_\\S+\n\t\t)$|\n\t\t^(?:\n\t\t\tgoto|\n\t\t\treturn|\n\t\t\tcase|\n\t\t\telse|\n\t\t\tasm|__asm__|\n\t\t\tdo|\n\t\t\t\\#|\n\t\t\t\\#\\#|\n\t\t)(?:\\s|$)|\n\t\t^(?:typedef|struct|enum)\\b\n\t    )}x;\n\twarn \"CHECK<$possible> ($line)\\n\" if ($dbg_possible > 2);\n\tif ($possible !~ $notPermitted) {\n\t\t# Check for modifiers.\n\t\t$possible =~ s/\\s*$Storage\\s*//g;\n\t\t$possible =~ s/\\s*$Sparse\\s*//g;\n\t\tif ($possible =~ /^\\s*$/) {\n\n\t\t} elsif ($possible =~ /\\s/) {\n\t\t\t$possible =~ s/\\s*$Type\\s*//g;\n\t\t\tfor my $modifier (split(' ', $possible)) {\n\t\t\t\tif ($modifier !~ $notPermitted) {\n\t\t\t\t\twarn \"MODIFIER: $modifier ($possible) ($line)\\n\" if ($dbg_possible);\n\t\t\t\t\tpush(@modifierList, $modifier);\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\twarn \"POSSIBLE: $possible ($line)\\n\" if ($dbg_possible);\n\t\t\tpush(@typeList, $possible);\n\t\t}\n\t\tbuild_types();\n\t} else {\n\t\twarn \"NOTPOSS: $possible ($line)\\n\" if ($dbg_possible > 1);\n\t}\n}\n\nmy $prefix = '';\n\nsub show_type {\n       return !defined $ignore_type{$_[0]};\n}\n\nsub report {\n\tif (!show_type($_[1]) ||\n\t    (defined $tst_only && $_[2] !~ /\\Q$tst_only\\E/)) {\n\t\treturn 0;\n\t}\n\tmy $line;\n\tif ($show_types) {\n\t\t$line = \"$prefix$_[0]:$_[1]: $_[2]\\n\";\n\t} else {\n\t\t$line = \"$prefix$_[0]: $_[2]\\n\";\n\t}\n\t$line = (split('\\n', $line))[0] . \"\\n\" if ($terse);\n\n\tpush(our @report, $line);\n\n\treturn 1;\n}\nsub report_dump {\n\tour @report;\n}\n\nsub ERROR {\n\tif (report(\"ERROR\", $_[0], $_[1])) {\n\t\tour $clean = 0;\n\t\tour $cnt_error++;\n\t}\n}\nsub WARN {\n\tif (report(\"WARNING\", $_[0], $_[1])) {\n\t\tour $clean = 0;\n\t\tour $cnt_warn++;\n\t}\n}\nsub CHK {\n\tif ($check && report(\"CHECK\", $_[0], $_[1])) {\n\t\tour $clean = 0;\n\t\tour $cnt_chk++;\n\t}\n}\n\nsub check_absolute_file {\n\tmy ($absolute, $herecurr) = @_;\n\tmy $file = $absolute;\n\n\t##print \"absolute<$absolute>\\n\";\n\n\t# See if any suffix of this path is a path within the tree.\n\twhile ($file =~ s@^[^/]*/@@) {\n\t\tif (-f \"$root/$file\") {\n\t\t\t##print \"file<$file>\\n\";\n\t\t\tlast;\n\t\t}\n\t}\n\tif (! -f _)  {\n\t\treturn 0;\n\t}\n\n\t# It is, so see if the prefix is acceptable.\n\tmy $prefix = $absolute;\n\tsubstr($prefix, -length($file)) = '';\n\n\t##print \"prefix<$prefix>\\n\";\n\tif ($prefix ne \".../\") {\n\t\tWARN(\"USE_RELATIVE_PATH\",\n\t\t     \"use relative pathname instead of absolute in changelog text\\n\" . $herecurr);\n\t}\n}\n\nsub process {\n\tmy $filename = shift;\n\n\tmy $linenr=0;\n\tmy $prevline=\"\";\n\tmy $prevrawline=\"\";\n\tmy $stashline=\"\";\n\tmy $stashrawline=\"\";\n\n\tmy $length;\n\tmy $indent;\n\tmy $previndent=0;\n\tmy $stashindent=0;\n\n\tour $clean = 1;\n\tmy $signoff = 0;\n\tmy $is_patch = 0;\n\n\tmy $in_header_lines = 1;\n\tmy $in_commit_log = 0;\t\t#Scanning lines before patch\n\n\tour @report = ();\n\tour $cnt_lines = 0;\n\tour $cnt_error = 0;\n\tour $cnt_warn = 0;\n\tour $cnt_chk = 0;\n\n\t# Trace the real file/line as we go.\n\tmy $realfile = '';\n\tmy $realline = 0;\n\tmy $realcnt = 0;\n\tmy $here = '';\n\tmy $in_comment = 0;\n\tmy $comment_edge = 0;\n\tmy $first_line = 0;\n\tmy $p1_prefix = '';\n\n\tmy $prev_values = 'E';\n\n\t# suppression flags\n\tmy %suppress_ifbraces;\n\tmy %suppress_whiletrailers;\n\tmy %suppress_export;\n\tmy $suppress_statement = 0;\n\n\t# Pre-scan the patch sanitizing the lines.\n\t# Pre-scan the patch looking for any __setup documentation.\n\t#\n\tmy @setup_docs = ();\n\tmy $setup_docs = 0;\n\n\tsanitise_line_reset();\n\tmy $line;\n\tforeach my $rawline (@rawlines) {\n\t\t$linenr++;\n\t\t$line = $rawline;\n\n\t\tif ($rawline=~/^\\+\\+\\+\\s+(\\S+)/) {\n\t\t\t$setup_docs = 0;\n\t\t\tif ($1 =~ m@Documentation/kernel-parameters.txt$@) {\n\t\t\t\t$setup_docs = 1;\n\t\t\t}\n\t\t\t#next;\n\t\t}\n\t\tif ($rawline=~/^\\@\\@ -\\d+(?:,\\d+)? \\+(\\d+)(,(\\d+))? \\@\\@/) {\n\t\t\t$realline=$1-1;\n\t\t\tif (defined $2) {\n\t\t\t\t$realcnt=$3+1;\n\t\t\t} else {\n\t\t\t\t$realcnt=1+1;\n\t\t\t}\n\t\t\t$in_comment = 0;\n\n\t\t\t# Guestimate if this is a continuing comment.  Run\n\t\t\t# the context looking for a comment \"edge\".  If this\n\t\t\t# edge is a close comment then we must be in a comment\n\t\t\t# at context start.\n\t\t\tmy $edge;\n\t\t\tmy $cnt = $realcnt;\n\t\t\tfor (my $ln = $linenr + 1; $cnt > 0; $ln++) {\n\t\t\t\tnext if (defined $rawlines[$ln - 1] &&\n\t\t\t\t\t $rawlines[$ln - 1] =~ /^-/);\n\t\t\t\t$cnt--;\n\t\t\t\t#print \"RAW<$rawlines[$ln - 1]>\\n\";\n\t\t\t\tlast if (!defined $rawlines[$ln - 1]);\n\t\t\t\tif ($rawlines[$ln - 1] =~ m@(/\\*|\\*/)@ &&\n\t\t\t\t    $rawlines[$ln - 1] !~ m@\"[^\"]*(?:/\\*|\\*/)[^\"]*\"@) {\n\t\t\t\t\t($edge) = $1;\n\t\t\t\t\tlast;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (defined $edge && $edge eq '*/') {\n\t\t\t\t$in_comment = 1;\n\t\t\t}\n\n\t\t\t# Guestimate if this is a continuing comment.  If this\n\t\t\t# is the start of a diff block and this line starts\n\t\t\t# ' *' then it is very likely a comment.\n\t\t\tif (!defined $edge &&\n\t\t\t    $rawlines[$linenr] =~ m@^.\\s*(?:\\*\\*+| \\*)(?:\\s|$)@)\n\t\t\t{\n\t\t\t\t$in_comment = 1;\n\t\t\t}\n\n\t\t\t##print \"COMMENT:$in_comment edge<$edge> $rawline\\n\";\n\t\t\tsanitise_line_reset($in_comment);\n\n\t\t} elsif ($realcnt && $rawline =~ /^(?:\\+| |$)/) {\n\t\t\t# Standardise the strings and chars within the input to\n\t\t\t# simplify matching -- only bother with positive lines.\n\t\t\t$line = sanitise_line($rawline);\n\t\t}\n\t\tpush(@lines, $line);\n\n\t\tif ($realcnt > 1) {\n\t\t\t$realcnt-- if ($line =~ /^(?:\\+| |$)/);\n\t\t} else {\n\t\t\t$realcnt = 0;\n\t\t}\n\n\t\t#print \"==>$rawline\\n\";\n\t\t#print \"-->$line\\n\";\n\n\t\tif ($setup_docs && $line =~ /^\\+/) {\n\t\t\tpush(@setup_docs, $line);\n\t\t}\n\t}\n\n\t$prefix = '';\n\n\t$realcnt = 0;\n\t$linenr = 0;\n\tforeach my $line (@lines) {\n\t\t$linenr++;\n\n\t\tmy $rawline = $rawlines[$linenr - 1];\n\n#extract the line range in the file after the patch is applied\n\t\tif ($line=~/^\\@\\@ -\\d+(?:,\\d+)? \\+(\\d+)(,(\\d+))? \\@\\@/) {\n\t\t\t$is_patch = 1;\n\t\t\t$first_line = $linenr + 1;\n\t\t\t$realline=$1-1;\n\t\t\tif (defined $2) {\n\t\t\t\t$realcnt=$3+1;\n\t\t\t} else {\n\t\t\t\t$realcnt=1+1;\n\t\t\t}\n\t\t\tannotate_reset();\n\t\t\t$prev_values = 'E';\n\n\t\t\t%suppress_ifbraces = ();\n\t\t\t%suppress_whiletrailers = ();\n\t\t\t%suppress_export = ();\n\t\t\t$suppress_statement = 0;\n\t\t\tnext;\n\n# track the line number as we move through the hunk, note that\n# new versions of GNU diff omit the leading space on completely\n# blank context lines so we need to count that too.\n\t\t} elsif ($line =~ /^( |\\+|$)/) {\n\t\t\t$realline++;\n\t\t\t$realcnt-- if ($realcnt != 0);\n\n\t\t\t# Measure the line length and indent.\n\t\t\t($length, $indent) = line_stats($rawline);\n\n\t\t\t# Track the previous line.\n\t\t\t($prevline, $stashline) = ($stashline, $line);\n\t\t\t($previndent, $stashindent) = ($stashindent, $indent);\n\t\t\t($prevrawline, $stashrawline) = ($stashrawline, $rawline);\n\n\t\t\t#warn \"line<$line>\\n\";\n\n\t\t} elsif ($realcnt == 1) {\n\t\t\t$realcnt--;\n\t\t}\n\n\t\tmy $hunk_line = ($realcnt != 0);\n\n#make up the handle for any error we report on this line\n\t\t$prefix = \"$filename:$realline: \" if ($emacs && $file);\n\t\t$prefix = \"$filename:$linenr: \" if ($emacs && !$file);\n\n\t\t$here = \"#$linenr: \" if (!$file);\n\t\t$here = \"#$realline: \" if ($file);\n\n\t\t# extract the filename as it passes\n\t\tif ($line =~ /^diff --git.*?(\\S+)$/) {\n\t\t\t$realfile = $1;\n\t\t\t$realfile =~ s@^([^/]*)/@@;\n\t\t\t$in_commit_log = 0;\n\t\t} elsif ($line =~ /^\\+\\+\\+\\s+(\\S+)/) {\n\t\t\t$realfile = $1;\n\t\t\t$realfile =~ s@^([^/]*)/@@;\n\t\t\t$in_commit_log = 0;\n\n\t\t\t$p1_prefix = $1;\n\t\t\tif (!$file && $tree && $p1_prefix ne '' &&\n\t\t\t    -e \"$root/$p1_prefix\") {\n\t\t\t\tWARN(\"PATCH_PREFIX\",\n\t\t\t\t     \"patch prefix '$p1_prefix' exists, appears to be a -p0 patch\\n\");\n\t\t\t}\n\n\t\t\tif ($realfile =~ m@^include/asm/@) {\n\t\t\t\tERROR(\"MODIFIED_INCLUDE_ASM\",\n\t\t\t\t      \"do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\\n\" . \"$here$rawline\\n\");\n\t\t\t}\n\t\t\tnext;\n\t\t}\n\n\t\t$here .= \"FILE: $realfile:$realline:\" if ($realcnt != 0);\n\n\t\tmy $hereline = \"$here\\n$rawline\\n\";\n\t\tmy $herecurr = \"$here\\n$rawline\\n\";\n\t\tmy $hereprev = \"$here\\n$prevrawline\\n$rawline\\n\";\n\n\t\t$cnt_lines++ if ($realcnt != 0);\n\n# Check for incorrect file permissions\n\t\tif ($line =~ /^new (file )?mode.*[7531]\\d{0,2}$/) {\n\t\t\tmy $permhere = $here . \"FILE: $realfile\\n\";\n\t\t\tif ($realfile =~ /(Makefile|Kconfig|\\.c|\\.h|\\.S|\\.tmpl)$/) {\n\t\t\t\tERROR(\"EXECUTE_PERMISSIONS\",\n\t\t\t\t      \"do not set execute permissions for source files\\n\" . $permhere);\n\t\t\t}\n\t\t}\n\n# Check the patch for a signoff:\n\t\tif ($line =~ /^\\s*signed-off-by:/i) {\n\t\t\t$signoff++;\n\t\t\t$in_commit_log = 0;\n\t\t}\n\n# Check signature styles\n\t\tif (!$in_header_lines &&\n\t\t    $line =~ /^(\\s*)($signature_tags)(\\s*)(.*)/) {\n\t\t\tmy $space_before = $1;\n\t\t\tmy $sign_off = $2;\n\t\t\tmy $space_after = $3;\n\t\t\tmy $email = $4;\n\t\t\tmy $ucfirst_sign_off = ucfirst(lc($sign_off));\n\n\t\t\tif (defined $space_before && $space_before ne \"\") {\n\t\t\t\tWARN(\"BAD_SIGN_OFF\",\n\t\t\t\t     \"Do not use whitespace before $ucfirst_sign_off\\n\" . $herecurr);\n\t\t\t}\n\t\t\tif ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) {\n\t\t\t\tWARN(\"BAD_SIGN_OFF\",\n\t\t\t\t     \"'$ucfirst_sign_off' is the preferred signature form\\n\" . $herecurr);\n\t\t\t}\n\t\t\tif (!defined $space_after || $space_after ne \" \") {\n\t\t\t\tWARN(\"BAD_SIGN_OFF\",\n\t\t\t\t     \"Use a single space after $ucfirst_sign_off\\n\" . $herecurr);\n\t\t\t}\n\n\t\t\tmy ($email_name, $email_address, $comment) = parse_email($email);\n\t\t\tmy $suggested_email = format_email(($email_name, $email_address));\n\t\t\tif ($suggested_email eq \"\") {\n\t\t\t\tERROR(\"BAD_SIGN_OFF\",\n\t\t\t\t      \"Unrecognized email address: '$email'\\n\" . $herecurr);\n\t\t\t} else {\n\t\t\t\tmy $dequoted = $suggested_email;\n\t\t\t\t$dequoted =~ s/^\"//;\n\t\t\t\t$dequoted =~ s/\" </ </;\n\t\t\t\t# Don't force email to have quotes\n\t\t\t\t# Allow just an angle bracketed address\n\t\t\t\tif (\"$dequoted$comment\" ne $email &&\n\t\t\t\t    \"<$email_address>$comment\" ne $email &&\n\t\t\t\t    \"$suggested_email$comment\" ne $email) {\n\t\t\t\t\tWARN(\"BAD_SIGN_OFF\",\n\t\t\t\t\t     \"email address '$email' might be better as '$suggested_email$comment'\\n\" . $herecurr);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n# Check for wrappage within a valid hunk of the file\n\t\tif ($realcnt != 0 && $line !~ m{^(?:\\+|-| |\\\\ No newline|$)}) {\n\t\t\tERROR(\"CORRUPTED_PATCH\",\n\t\t\t      \"patch seems to be corrupt (line wrapped?)\\n\" .\n\t\t\t\t$herecurr) if (!$emitted_corrupt++);\n\t\t}\n\n# Check for absolute kernel paths.\n\t\tif ($tree) {\n\t\t\twhile ($line =~ m{(?:^|\\s)(/\\S*)}g) {\n\t\t\t\tmy $file = $1;\n\n\t\t\t\tif ($file =~ m{^(.*?)(?::\\d+)+:?$} &&\n\t\t\t\t    check_absolute_file($1, $herecurr)) {\n\t\t\t\t\t#\n\t\t\t\t} else {\n\t\t\t\t\tcheck_absolute_file($file, $herecurr);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php\n\t\tif (($realfile =~ /^$/ || $line =~ /^\\+/) &&\n\t\t    $rawline !~ m/^$UTF8*$/) {\n\t\t\tmy ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);\n\n\t\t\tmy $blank = copy_spacing($rawline);\n\t\t\tmy $ptr = substr($blank, 0, length($utf8_prefix)) . \"^\";\n\t\t\tmy $hereptr = \"$hereline$ptr\\n\";\n\n\t\t\tCHK(\"INVALID_UTF8\",\n\t\t\t    \"Invalid UTF-8, patch and commit message should be encoded in UTF-8\\n\" . $hereptr);\n\t\t}\n\n# Check if it's the start of a commit log\n# (not a header line and we haven't seen the patch filename)\n\t\tif ($in_header_lines && $realfile =~ /^$/ &&\n\t\t    $rawline !~ /^(commit\\b|from\\b|[\\w-]+:).+$/i) {\n\t\t\t$in_header_lines = 0;\n\t\t\t$in_commit_log = 1;\n\t\t}\n\n# Still not yet in a patch, check for any UTF-8\n\t\tif ($in_commit_log && $realfile =~ /^$/ &&\n\t\t    $rawline =~ /$NON_ASCII_UTF8/) {\n\t\t\tCHK(\"UTF8_BEFORE_PATCH\",\n\t\t\t    \"8-bit UTF-8 used in possible commit log\\n\" . $herecurr);\n\t\t}\n\n# ignore non-hunk lines and lines being removed\n\t\tnext if (!$hunk_line || $line =~ /^-/);\n\n#trailing whitespace\n\t\tif ($line =~ /^\\+.*\\015/) {\n\t\t\tmy $herevet = \"$here\\n\" . cat_vet($rawline) . \"\\n\";\n\t\t\tERROR(\"DOS_LINE_ENDINGS\",\n\t\t\t      \"DOS line endings\\n\" . $herevet);\n\n\t\t} elsif ($rawline =~ /^\\+.*\\S\\s+$/ || $rawline =~ /^\\+\\s+$/) {\n\t\t\tmy $herevet = \"$here\\n\" . cat_vet($rawline) . \"\\n\";\n\t\t\tERROR(\"TRAILING_WHITESPACE\",\n\t\t\t      \"trailing whitespace\\n\" . $herevet);\n\t\t\t$rpt_cleaners = 1;\n\t\t}\n\n# check for Kconfig help text having a real description\n# Only applies when adding the entry originally, after that we do not have\n# sufficient context to determine whether it is indeed long enough.\n\t\tif ($realfile =~ /Kconfig/ &&\n\t\t    $line =~ /.\\s*config\\s+/) {\n\t\t\tmy $length = 0;\n\t\t\tmy $cnt = $realcnt;\n\t\t\tmy $ln = $linenr + 1;\n\t\t\tmy $f;\n\t\t\tmy $is_start = 0;\n\t\t\tmy $is_end = 0;\n\t\t\tfor (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {\n\t\t\t\t$f = $lines[$ln - 1];\n\t\t\t\t$cnt-- if ($lines[$ln - 1] !~ /^-/);\n\t\t\t\t$is_end = $lines[$ln - 1] =~ /^\\+/;\n\n\t\t\t\tnext if ($f =~ /^-/);\n\n\t\t\t\tif ($lines[$ln - 1] =~ /.\\s*(?:bool|tristate)\\s*\\\"/) {\n\t\t\t\t\t$is_start = 1;\n\t\t\t\t} elsif ($lines[$ln - 1] =~ /.\\s*(?:---)?help(?:---)?$/) {\n\t\t\t\t\t$length = -1;\n\t\t\t\t}\n\n\t\t\t\t$f =~ s/^.//;\n\t\t\t\t$f =~ s/#.*//;\n\t\t\t\t$f =~ s/^\\s+//;\n\t\t\t\tnext if ($f =~ /^$/);\n\t\t\t\tif ($f =~ /^\\s*config\\s/) {\n\t\t\t\t\t$is_end = 1;\n\t\t\t\t\tlast;\n\t\t\t\t}\n\t\t\t\t$length++;\n\t\t\t}\n\t\t\tWARN(\"CONFIG_DESCRIPTION\",\n\t\t\t     \"please write a paragraph that describes the config symbol fully\\n\" . $herecurr) if ($is_start && $is_end && $length < 4);\n\t\t\t#print \"is_start<$is_start> is_end<$is_end> length<$length>\\n\";\n\t\t}\n\n\t\tif (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&\n\t\t    ($line =~ /\\+(EXTRA_[A-Z]+FLAGS).*/)) {\n\t\t\tmy $flag = $1;\n\t\t\tmy $replacement = {\n\t\t\t\t'EXTRA_AFLAGS' =>   'asflags-y',\n\t\t\t\t'EXTRA_CFLAGS' =>   'ccflags-y',\n\t\t\t\t'EXTRA_CPPFLAGS' => 'cppflags-y',\n\t\t\t\t'EXTRA_LDFLAGS' =>  'ldflags-y',\n\t\t\t};\n\n\t\t\tWARN(\"DEPRECATED_VARIABLE\",\n\t\t\t     \"Use of $flag is deprecated, please use \\`$replacement->{$flag} instead.\\n\" . $herecurr) if ($replacement->{$flag});\n\t\t}\n\n# check we are in a valid source file if not then ignore this hunk\n\t\tnext if ($realfile !~ /\\.(h|c|s|S|pl|sh)$/);\n\n#80 column limit\n\t\tif ($line =~ /^\\+/ && $prevrawline !~ /\\/\\*\\*/ &&\n\t\t    $rawline !~ /^.\\s*\\*\\s*\\@$Ident\\s/ &&\n\t\t    !($line =~ /^\\+\\s*$logFunctions\\s*\\(\\s*(?:(KERN_\\S+\\s*|[^\"]*))?\"[X\\t]*\"\\s*(?:|,|\\)\\s*;)\\s*$/ ||\n\t\t    $line =~ /^\\+\\s*\"[^\"]*\"\\s*(?:\\s*|,|\\)\\s*;)\\s*$/) &&\n\t\t    $length > 80)\n\t\t{\n\t\t\tWARN(\"LONG_LINE\",\n\t\t\t     \"line over 80 characters\\n\" . $herecurr);\n\t\t}\n\n# check for spaces before a quoted newline\n\t\tif ($rawline =~ /^.*\\\".*\\s\\\\n/) {\n\t\t\tWARN(\"QUOTED_WHITESPACE_BEFORE_NEWLINE\",\n\t\t\t     \"unnecessary whitespace before a quoted newline\\n\" . $herecurr);\n\t\t}\n\n# check for adding lines without a newline.\n\t\tif ($line =~ /^\\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\\\ No newline at end of file/) {\n\t\t\tWARN(\"MISSING_EOF_NEWLINE\",\n\t\t\t     \"adding a line without newline at end of file\\n\" . $herecurr);\n\t\t}\n\n# Blackfin: use hi/lo macros\n\t\tif ($realfile =~ m@arch/blackfin/.*\\.S$@) {\n\t\t\tif ($line =~ /\\.[lL][[:space:]]*=.*&[[:space:]]*0x[fF][fF][fF][fF]/) {\n\t\t\t\tmy $herevet = \"$here\\n\" . cat_vet($line) . \"\\n\";\n\t\t\t\tERROR(\"LO_MACRO\",\n\t\t\t\t      \"use the LO() macro, not (... & 0xFFFF)\\n\" . $herevet);\n\t\t\t}\n\t\t\tif ($line =~ /\\.[hH][[:space:]]*=.*>>[[:space:]]*16/) {\n\t\t\t\tmy $herevet = \"$here\\n\" . cat_vet($line) . \"\\n\";\n\t\t\t\tERROR(\"HI_MACRO\",\n\t\t\t\t      \"use the HI() macro, not (... >> 16)\\n\" . $herevet);\n\t\t\t}\n\t\t}\n\n# check we are in a valid source file C or perl if not then ignore this hunk\n\t\tnext if ($realfile !~ /\\.(h|c|pl)$/);\n\n\n        if ($rawline =~ /^\\+/ && $rawline =~ /\\t/) {\n\t\t\tmy $herevet = \"$here\\n\" . cat_vet($rawline) . \"\\n\";\n\t\t\tERROR(\"TABS\",\n\t\t\t      \"please, don't use tabs\\n\" . $herevet);\n\t\t\t$rpt_cleaners = 1;\n        }\n\n# check we are in a valid C source file if not then ignore this hunk\n\t\tnext if ($realfile !~ /\\.(h|c)$/);\n\n# check for RCS/CVS revision markers\n\t\tif ($rawline =~ /^\\+.*\\$(Revision|Log|Id)(?:\\$|)/) {\n\t\t\tWARN(\"CVS_KEYWORD\",\n\t\t\t     \"CVS style keyword markers, these will _not_ be updated\\n\". $herecurr);\n\t\t}\n\n# Blackfin: don't use __builtin_bfin_[cs]sync\n\t\tif ($line =~ /__builtin_bfin_csync/) {\n\t\t\tmy $herevet = \"$here\\n\" . cat_vet($line) . \"\\n\";\n\t\t\tERROR(\"CSYNC\",\n\t\t\t      \"use the CSYNC() macro in asm/blackfin.h\\n\" . $herevet);\n\t\t}\n\t\tif ($line =~ /__builtin_bfin_ssync/) {\n\t\t\tmy $herevet = \"$here\\n\" . cat_vet($line) . \"\\n\";\n\t\t\tERROR(\"SSYNC\",\n\t\t\t      \"use the SSYNC() macro in asm/blackfin.h\\n\" . $herevet);\n\t\t}\n\n# Check for potential 'bare' types\n\t\tmy ($stat, $cond, $line_nr_next, $remain_next, $off_next,\n\t\t    $realline_next);\n#print \"LINE<$line>\\n\";\n\t\tif ($linenr >= $suppress_statement &&\n\t\t    $realcnt && $line =~ /.\\s*\\S/) {\n\t\t\t($stat, $cond, $line_nr_next, $remain_next, $off_next) =\n\t\t\t\tctx_statement_block($linenr, $realcnt, 0);\n\t\t\t$stat =~ s/\\n./\\n /g;\n\t\t\t$cond =~ s/\\n./\\n /g;\n\n#print \"linenr<$linenr> <$stat>\\n\";\n\t\t\t# If this statement has no statement boundaries within\n\t\t\t# it there is no point in retrying a statement scan\n\t\t\t# until we hit end of it.\n\t\t\tmy $frag = $stat; $frag =~ s/;+\\s*$//;\n\t\t\tif ($frag !~ /(?:{|;)/) {\n#print \"skip<$line_nr_next>\\n\";\n\t\t\t\t$suppress_statement = $line_nr_next;\n\t\t\t}\n\n\t\t\t# Find the real next line.\n\t\t\t$realline_next = $line_nr_next;\n\t\t\tif (defined $realline_next &&\n\t\t\t    (!defined $lines[$realline_next - 1] ||\n\t\t\t     substr($lines[$realline_next - 1], $off_next) =~ /^\\s*$/)) {\n\t\t\t\t$realline_next++;\n\t\t\t}\n\n\t\t\tmy $s = $stat;\n\t\t\t$s =~ s/{.*$//s;\n\n\t\t\t# Ignore goto labels.\n\t\t\tif ($s =~ /$Ident:\\*$/s) {\n\n\t\t\t# Ignore functions being called\n\t\t\t} elsif ($s =~ /^.\\s*$Ident\\s*\\(/s) {\n\n\t\t\t} elsif ($s =~ /^.\\s*else\\b/s) {\n\n\t\t\t# declarations always start with types\n\t\t\t} elsif ($prev_values eq 'E' && $s =~ /^.\\s*(?:$Storage\\s+)?(?:$Inline\\s+)?(?:const\\s+)?((?:\\s*$Ident)+?)\\b(?:\\s+$Sparse)?\\s*\\**\\s*(?:$Ident|\\(\\*[^\\)]*\\))(?:\\s*$Modifier)?\\s*(?:;|=|,|\\()/s) {\n\t\t\t\tmy $type = $1;\n\t\t\t\t$type =~ s/\\s+/ /g;\n\t\t\t\tpossible($type, \"A:\" . $s);\n\n\t\t\t# definitions in global scope can only start with types\n\t\t\t} elsif ($s =~ /^.(?:$Storage\\s+)?(?:$Inline\\s+)?(?:const\\s+)?($Ident)\\b\\s*(?!:)/s) {\n\t\t\t\tpossible($1, \"B:\" . $s);\n\t\t\t}\n\n\t\t\t# any (foo ... *) is a pointer cast, and foo is a type\n\t\t\twhile ($s =~ /\\(($Ident)(?:\\s+$Sparse)*[\\s\\*]+\\s*\\)/sg) {\n\t\t\t\tpossible($1, \"C:\" . $s);\n\t\t\t}\n\n\t\t\t# Check for any sort of function declaration.\n\t\t\t# int foo(something bar, other baz);\n\t\t\t# void (*store_gdt)(x86_descr_ptr *);\n\t\t\tif ($prev_values eq 'E' && $s =~ /^(.(?:typedef\\s*)?(?:(?:$Storage|$Inline)\\s*)*\\s*$Type\\s*(?:\\b$Ident|\\(\\*\\s*$Ident\\))\\s*)\\(/s) {\n\t\t\t\tmy ($name_len) = length($1);\n\n\t\t\t\tmy $ctx = $s;\n\t\t\t\tsubstr($ctx, 0, $name_len + 1, '');\n\t\t\t\t$ctx =~ s/\\)[^\\)]*$//;\n\n\t\t\t\tfor my $arg (split(/\\s*,\\s*/, $ctx)) {\n\t\t\t\t\tif ($arg =~ /^(?:const\\s+)?($Ident)(?:\\s+$Sparse)*\\s*\\**\\s*(:?\\b$Ident)?$/s || $arg =~ /^($Ident)$/s) {\n\n\t\t\t\t\t\tpossible($1, \"D:\" . $s);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n#\n# Checks which may be anchored in the context.\n#\n\n# Check for switch () and associated case and default\n# statements should be at the same indent.\n\t\tif ($line=~/\\bswitch\\s*\\(.*\\)/) {\n\t\t\tmy $err = '';\n\t\t\tmy $sep = '';\n\t\t\tmy @ctx = ctx_block_outer($linenr, $realcnt);\n\t\t\tshift(@ctx);\n\t\t\tfor my $ctx (@ctx) {\n\t\t\t\tmy ($clen, $cindent) = line_stats($ctx);\n\t\t\t\tif ($ctx =~ /^\\+\\s*(case\\s+|default:)/ &&\n\t\t\t\t\t\t\t$indent != $cindent) {\n\t\t\t\t\t$err .= \"$sep$ctx\\n\";\n\t\t\t\t\t$sep = '';\n\t\t\t\t} else {\n\t\t\t\t\t$sep = \"[...]\\n\";\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ($err ne '') {\n\t\t\t\tERROR(\"SWITCH_CASE_INDENT_LEVEL\",\n\t\t\t\t      \"switch and case should be at the same indent\\n$hereline$err\");\n\t\t\t}\n\t\t}\n\n# if/while/etc brace do not go on next line, unless defining a do while loop,\n# or if that brace on the next line is for something else\n\t\tif ($line =~ /(.*)\\b((?:if|while|for|switch)\\s*\\(|do\\b|else\\b)/ && $line !~ /^.\\s*\\#/) {\n\t\t\tmy $pre_ctx = \"$1$2\";\n\n\t\t\tmy ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);\n\n\t\t\tif ($line =~ /^\\+\\t{6,}/) {\n\t\t\t\tWARN(\"DEEP_INDENTATION\",\n\t\t\t\t     \"Too many leading tabs - consider code refactoring\\n\" . $herecurr);\n\t\t\t}\n\n\t\t\tmy $ctx_cnt = $realcnt - $#ctx - 1;\n\t\t\tmy $ctx = join(\"\\n\", @ctx);\n\n\t\t\tmy $ctx_ln = $linenr;\n\t\t\tmy $ctx_skip = $realcnt;\n\n\t\t\twhile ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&\n\t\t\t\t\tdefined $lines[$ctx_ln - 1] &&\n\t\t\t\t\t$lines[$ctx_ln - 1] =~ /^-/)) {\n\t\t\t\t##print \"SKIP<$ctx_skip> CNT<$ctx_cnt>\\n\";\n\t\t\t\t$ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);\n\t\t\t\t$ctx_ln++;\n\t\t\t}\n\n\t\t\t#print \"realcnt<$realcnt> ctx_cnt<$ctx_cnt>\\n\";\n\t\t\t#print \"pre<$pre_ctx>\\nline<$line>\\nctx<$ctx>\\nnext<$lines[$ctx_ln - 1]>\\n\";\n\n\t\t\tif ($ctx !~ /{\\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\\+\\s*{/) {\n\t\t\t\tERROR(\"OPEN_BRACE\",\n\t\t\t\t      \"that open brace { should be on the previous line\\n\" .\n\t\t\t\t\t\"$here\\n$ctx\\n$rawlines[$ctx_ln - 1]\\n\");\n\t\t\t}\n\t\t\tif ($level == 0 && $pre_ctx !~ /}\\s*while\\s*\\($/ &&\n\t\t\t    $ctx =~ /\\)\\s*\\;\\s*$/ &&\n\t\t\t    defined $lines[$ctx_ln - 1])\n\t\t\t{\n\t\t\t\tmy ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);\n\t\t\t\tif ($nindent > $indent) {\n\t\t\t\t\tWARN(\"TRAILING_SEMICOLON\",\n\t\t\t\t\t     \"trailing semicolon indicates no statements, indent implies otherwise\\n\" .\n\t\t\t\t\t\t\"$here\\n$ctx\\n$rawlines[$ctx_ln - 1]\\n\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n# Check relative indent for conditionals and blocks.\n\t\tif ($line =~ /\\b(?:(?:if|while|for)\\s*\\(|do\\b)/ && $line !~ /^.\\s*#/ && $line !~ /\\}\\s*while\\s*/) {\n\t\t\t($stat, $cond, $line_nr_next, $remain_next, $off_next) =\n\t\t\t\tctx_statement_block($linenr, $realcnt, 0)\n\t\t\t\t\tif (!defined $stat);\n\t\t\tmy ($s, $c) = ($stat, $cond);\n\n\t\t\tsubstr($s, 0, length($c), '');\n\n\t\t\t# Make sure we remove the line prefixes as we have\n\t\t\t# none on the first line, and are going to readd them\n\t\t\t# where necessary.\n\t\t\t$s =~ s/\\n./\\n/gs;\n\n\t\t\t# Find out how long the conditional actually is.\n\t\t\tmy @newlines = ($c =~ /\\n/gs);\n\t\t\tmy $cond_lines = 1 + $#newlines;\n\n\t\t\t# We want to check the first line inside the block\n\t\t\t# starting at the end of the conditional, so remove:\n\t\t\t#  1) any blank line termination\n\t\t\t#  2) any opening brace { on end of the line\n\t\t\t#  3) any do (...) {\n\t\t\tmy $continuation = 0;\n\t\t\tmy $check = 0;\n\t\t\t$s =~ s/^.*\\bdo\\b//;\n\t\t\t$s =~ s/^\\s*{//;\n\t\t\tif ($s =~ s/^\\s*\\\\//) {\n\t\t\t\t$continuation = 1;\n\t\t\t}\n\t\t\tif ($s =~ s/^\\s*?\\n//) {\n\t\t\t\t$check = 1;\n\t\t\t\t$cond_lines++;\n\t\t\t}\n\n\t\t\t# Also ignore a loop construct at the end of a\n\t\t\t# preprocessor statement.\n\t\t\tif (($prevline =~ /^.\\s*#\\s*define\\s/ ||\n\t\t\t    $prevline =~ /\\\\\\s*$/) && $continuation == 0) {\n\t\t\t\t$check = 0;\n\t\t\t}\n\n\t\t\tmy $cond_ptr = -1;\n\t\t\t$continuation = 0;\n\t\t\twhile ($cond_ptr != $cond_lines) {\n\t\t\t\t$cond_ptr = $cond_lines;\n\n\t\t\t\t# If we see an #else/#elif then the code\n\t\t\t\t# is not linear.\n\t\t\t\tif ($s =~ /^\\s*\\#\\s*(?:else|elif)/) {\n\t\t\t\t\t$check = 0;\n\t\t\t\t}\n\n\t\t\t\t# Ignore:\n\t\t\t\t#  1) blank lines, they should be at 0,\n\t\t\t\t#  2) preprocessor lines, and\n\t\t\t\t#  3) labels.\n\t\t\t\tif ($continuation ||\n\t\t\t\t    $s =~ /^\\s*?\\n/ ||\n\t\t\t\t    $s =~ /^\\s*#\\s*?/ ||\n\t\t\t\t    $s =~ /^\\s*$Ident\\s*:/) {\n\t\t\t\t\t$continuation = ($s =~ /^.*?\\\\\\n/) ? 1 : 0;\n\t\t\t\t\tif ($s =~ s/^.*?\\n//) {\n\t\t\t\t\t\t$cond_lines++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmy (undef, $sindent) = line_stats(\"+\" . $s);\n\t\t\tmy $stat_real = raw_line($linenr, $cond_lines);\n\n\t\t\t# Check if either of these lines are modified, else\n\t\t\t# this is not this patch's fault.\n\t\t\tif (!defined($stat_real) ||\n\t\t\t    $stat !~ /^\\+/ && $stat_real !~ /^\\+/) {\n\t\t\t\t$check = 0;\n\t\t\t}\n\t\t\tif (defined($stat_real) && $cond_lines > 1) {\n\t\t\t\t$stat_real = \"[...]\\n$stat_real\";\n\t\t\t}\n\n\t\t\t#print \"line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\\n\";\n\n\t\t\tif ($check && (($sindent % 4) != 0 ||\n\t\t\t    ($sindent <= $indent && $s ne ''))) {\n\t\t\t\tWARN(\"SUSPECT_CODE_INDENT\",\n\t\t\t\t     \"suspect code indent for conditional statements ($indent, $sindent)\\n\" . $herecurr . \"$stat_real\\n\");\n\t\t\t}\n\t\t}\n\n\t\t# Track the 'values' across context and added lines.\n\t\tmy $opline = $line; $opline =~ s/^./ /;\n\t\tmy ($curr_values, $curr_vars) =\n\t\t\t\tannotate_values($opline . \"\\n\", $prev_values);\n\t\t$curr_values = $prev_values . $curr_values;\n\t\tif ($dbg_values) {\n\t\t\tmy $outline = $opline; $outline =~ s/\\t/ /g;\n\t\t\tprint \"$linenr > .$outline\\n\";\n\t\t\tprint \"$linenr > $curr_values\\n\";\n\t\t\tprint \"$linenr >  $curr_vars\\n\";\n\t\t}\n\t\t$prev_values = substr($curr_values, -1);\n\n#ignore lines not being added\n\t\tif ($line=~/^[^\\+]/) {next;}\n\n# TEST: allow direct testing of the type matcher.\n\t\tif ($dbg_type) {\n\t\t\tif ($line =~ /^.\\s*$Declare\\s*$/) {\n\t\t\t\tERROR(\"TEST_TYPE\",\n\t\t\t\t      \"TEST: is type\\n\" . $herecurr);\n\t\t\t} elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {\n\t\t\t\tERROR(\"TEST_NOT_TYPE\",\n\t\t\t\t      \"TEST: is not type ($1 is)\\n\". $herecurr);\n\t\t\t}\n\t\t\tnext;\n\t\t}\n# TEST: allow direct testing of the attribute matcher.\n\t\tif ($dbg_attr) {\n\t\t\tif ($line =~ /^.\\s*$Modifier\\s*$/) {\n\t\t\t\tERROR(\"TEST_ATTR\",\n\t\t\t\t      \"TEST: is attr\\n\" . $herecurr);\n\t\t\t} elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {\n\t\t\t\tERROR(\"TEST_NOT_ATTR\",\n\t\t\t\t      \"TEST: is not attr ($1 is)\\n\". $herecurr);\n\t\t\t}\n\t\t\tnext;\n\t\t}\n\n# check for initialisation to aggregates open brace on the next line\n\t\tif ($line =~ /^.\\s*{/ &&\n\t\t    $prevline =~ /(?:^|[^=])=\\s*$/) {\n\t\t\tERROR(\"OPEN_BRACE\",\n\t\t\t      \"that open brace { should be on the previous line\\n\" . $hereprev);\n\t\t}\n\n#\n# Checks which are anchored on the added line.\n#\n\n# check for malformed paths in #include statements (uses RAW line)\n\t\tif ($rawline =~ m{^.\\s*\\#\\s*include\\s+[<\"](.*)[\">]}) {\n\t\t\tmy $path = $1;\n\t\t\tif ($path =~ m{//}) {\n\t\t\t\tERROR(\"MALFORMED_INCLUDE\",\n\t\t\t\t      \"malformed #include filename\\n\" .\n\t\t\t\t\t$herecurr);\n\t\t\t}\n\t\t}\n\n# no C99 // comments\n\t\tif ($line =~ m{//}) {\n\t\t\tERROR(\"C99_COMMENTS\",\n\t\t\t      \"do not use C99 // comments\\n\" . $herecurr);\n\t\t}\n\t\t# Remove C99 comments.\n\t\t$line =~ s@//.*@@;\n\t\t$opline =~ s@//.*@@;\n\n# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider\n# the whole statement.\n#print \"APW <$lines[$realline_next - 1]>\\n\";\n\t\tif (defined $realline_next &&\n\t\t    exists $lines[$realline_next - 1] &&\n\t\t    !defined $suppress_export{$realline_next} &&\n\t\t    ($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\\((.*)\\)/ ||\n\t\t     $lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\\((.*)\\)/)) {\n\t\t\t# Handle definitions which produce identifiers with\n\t\t\t# a prefix:\n\t\t\t#   XXX(foo);\n\t\t\t#   EXPORT_SYMBOL(something_foo);\n\t\t\tmy $name = $1;\n\t\t\tif ($stat =~ /^(?:.\\s*}\\s*\\n)?.([A-Z_]+)\\s*\\(\\s*($Ident)/ &&\n\t\t\t    $name =~ /^${Ident}_$2/) {\n#print \"FOO C name<$name>\\n\";\n\t\t\t\t$suppress_export{$realline_next} = 1;\n\n\t\t\t} elsif ($stat !~ /(?:\n\t\t\t\t\\n.}\\s*$|\n\t\t\t\t^.DEFINE_$Ident\\(\\Q$name\\E\\)|\n\t\t\t\t^.DECLARE_$Ident\\(\\Q$name\\E\\)|\n\t\t\t\t^.LIST_HEAD\\(\\Q$name\\E\\)|\n\t\t\t\t^.(?:$Storage\\s+)?$Type\\s*\\(\\s*\\*\\s*\\Q$name\\E\\s*\\)\\s*\\(|\n\t\t\t\t\\b\\Q$name\\E(?:\\s+$Attribute)*\\s*(?:;|=|\\[|\\()\n\t\t\t    )/x) {\n#print \"FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\\n\";\n\t\t\t\t$suppress_export{$realline_next} = 2;\n\t\t\t} else {\n\t\t\t\t$suppress_export{$realline_next} = 1;\n\t\t\t}\n\t\t}\n\t\tif (!defined $suppress_export{$linenr} &&\n\t\t    $prevline =~ /^.\\s*$/ &&\n\t\t    ($line =~ /EXPORT_SYMBOL.*\\((.*)\\)/ ||\n\t\t     $line =~ /EXPORT_UNUSED_SYMBOL.*\\((.*)\\)/)) {\n#print \"FOO B <$lines[$linenr - 1]>\\n\";\n\t\t\t$suppress_export{$linenr} = 2;\n\t\t}\n\t\tif (defined $suppress_export{$linenr} &&\n\t\t    $suppress_export{$linenr} == 2) {\n\t\t\tWARN(\"EXPORT_SYMBOL\",\n\t\t\t     \"EXPORT_SYMBOL(foo); should immediately follow its function/variable\\n\" . $herecurr);\n\t\t}\n\n# check for global initialisers.\n\t\tif ($line =~ /^.$Type\\s*$Ident\\s*(?:\\s+$Modifier)*\\s*=\\s*(0|NULL|false)\\s*;/) {\n\t\t\tERROR(\"GLOBAL_INITIALISERS\",\n\t\t\t      \"do not initialise globals to 0 or NULL\\n\" .\n\t\t\t\t$herecurr);\n\t\t}\n# check for static initialisers.\n\t\tif ($line =~ /\\bstatic\\s.*=\\s*(0|NULL|false)\\s*;/) {\n\t\t\tERROR(\"INITIALISED_STATIC\",\n\t\t\t      \"do not initialise statics to 0 or NULL\\n\" .\n\t\t\t\t$herecurr);\n\t\t}\n\n# check for static const char * arrays.\n\t\tif ($line =~ /\\bstatic\\s+const\\s+char\\s*\\*\\s*(\\w+)\\s*\\[\\s*\\]\\s*=\\s*/) {\n\t\t\tWARN(\"STATIC_CONST_CHAR_ARRAY\",\n\t\t\t     \"static const char * array should probably be static const char * const\\n\" .\n\t\t\t\t$herecurr);\n               }\n\n# check for static char foo[] = \"bar\" declarations.\n\t\tif ($line =~ /\\bstatic\\s+char\\s+(\\w+)\\s*\\[\\s*\\]\\s*=\\s*\"/) {\n\t\t\tWARN(\"STATIC_CONST_CHAR_ARRAY\",\n\t\t\t     \"static char array declaration should probably be static const char\\n\" .\n\t\t\t\t$herecurr);\n               }\n\n# check for declarations of struct pci_device_id\n\t\tif ($line =~ /\\bstruct\\s+pci_device_id\\s+\\w+\\s*\\[\\s*\\]\\s*\\=\\s*\\{/) {\n\t\t\tWARN(\"DEFINE_PCI_DEVICE_TABLE\",\n\t\t\t     \"Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\\n\" . $herecurr);\n\t\t}\n\n# check for new typedefs, only function parameters and sparse annotations\n# make sense.\n\t\tif ($line =~ /\\btypedef\\s/ &&\n\t\t    $line !~ /\\btypedef\\s+$Type\\s*\\(\\s*\\*?$Ident\\s*\\)\\s*\\(/ &&\n\t\t    $line !~ /\\btypedef\\s+$Type\\s+$Ident\\s*\\(/ &&\n\t\t    $line !~ /\\b$typeTypedefs\\b/ &&\n\t\t    $line !~ /\\b__bitwise(?:__|)\\b/) {\n\t\t\tWARN(\"NEW_TYPEDEFS\",\n\t\t\t     \"do not add new typedefs\\n\" . $herecurr);\n\t\t}\n\n# * goes on variable not on type\n\t\t# (char*[ const])\n\t\twhile ($line =~ m{(\\($NonptrType(\\s*(?:$Modifier\\b\\s*|\\*\\s*)+)\\))}g) {\n\t\t\t#print \"AA<$1>\\n\";\n\t\t\tmy ($from, $to) = ($2, $2);\n\n\t\t\t# Should start with a space.\n\t\t\t$to =~ s/^(\\S)/ $1/;\n\t\t\t# Should not end with a space.\n\t\t\t$to =~ s/\\s+$//;\n\t\t\t# '*'s should not have spaces between.\n\t\t\twhile ($to =~ s/\\*\\s+\\*/\\*\\*/) {\n\t\t\t}\n\n\t\t\t#print \"from<$from> to<$to>\\n\";\n\t\t\tif ($from ne $to) {\n\t\t\t\tERROR(\"POINTER_LOCATION\",\n\t\t\t\t      \"\\\"(foo$from)\\\" should be \\\"(foo$to)\\\"\\n\" .  $herecurr);\n\t\t\t}\n\t\t}\n\t\twhile ($line =~ m{(\\b$NonptrType(\\s*(?:$Modifier\\b\\s*|\\*\\s*)+)($Ident))}g) {\n\t\t\t#print \"BB<$1>\\n\";\n\t\t\tmy ($from, $to, $ident) = ($2, $2, $3);\n\n\t\t\t# Should start with a space.\n\t\t\t$to =~ s/^(\\S)/ $1/;\n\t\t\t# Should not end with a space.\n\t\t\t$to =~ s/\\s+$//;\n\t\t\t# '*'s should not have spaces between.\n\t\t\twhile ($to =~ s/\\*\\s+\\*/\\*\\*/) {\n\t\t\t}\n\t\t\t# Modifiers should have spaces.\n\t\t\t$to =~ s/(\\b$Modifier$)/$1 /;\n\n\t\t\t#print \"from<$from> to<$to> ident<$ident>\\n\";\n\t\t\tif ($from ne $to && $ident !~ /^$Modifier$/) {\n\t\t\t\tERROR(\"POINTER_LOCATION\",\n\t\t\t\t      \"\\\"foo${from}bar\\\" should be \\\"foo${to}bar\\\"\\n\" .  $herecurr);\n\t\t\t}\n\t\t}\n\n# # no BUG() or BUG_ON()\n# \t\tif ($line =~ /\\b(BUG|BUG_ON)\\b/) {\n# \t\t\tprint \"Try to use WARN_ON & Recovery code rather than BUG() or BUG_ON()\\n\";\n# \t\t\tprint \"$herecurr\";\n# \t\t\t$clean = 0;\n# \t\t}\n\n\t\tif ($line =~ /\\bLINUX_VERSION_CODE\\b/) {\n\t\t\tWARN(\"LINUX_VERSION_CODE\",\n\t\t\t     \"LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\\n\" . $herecurr);\n\t\t}\n\n# check for uses of printk_ratelimit\n\t\tif ($line =~ /\\bprintk_ratelimit\\s*\\(/) {\n\t\t\tWARN(\"PRINTK_RATELIMITED\",\n\"Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\\n\" . $herecurr);\n\t\t}\n\n# printk should use KERN_* levels.  Note that follow on printk's on the\n# same line do not need a level, so we use the current block context\n# to try and find and validate the current printk.  In summary the current\n# printk includes all preceding printk's which have no newline on the end.\n# we assume the first bad printk is the one to report.\n\t\tif ($line =~ /\\bprintk\\((?!KERN_)\\s*\"/) {\n\t\t\tmy $ok = 0;\n\t\t\tfor (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {\n\t\t\t\t#print \"CHECK<$lines[$ln - 1]\\n\";\n\t\t\t\t# we have a preceding printk if it ends\n\t\t\t\t# with \"\\n\" ignore it, else it is to blame\n\t\t\t\tif ($lines[$ln - 1] =~ m{\\bprintk\\(}) {\n\t\t\t\t\tif ($rawlines[$ln - 1] !~ m{\\\\n\"}) {\n\t\t\t\t\t\t$ok = 1;\n\t\t\t\t\t}\n\t\t\t\t\tlast;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ($ok == 0) {\n\t\t\t\tWARN(\"PRINTK_WITHOUT_KERN_LEVEL\",\n\t\t\t\t     \"printk() should include KERN_ facility level\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n# function brace can't be on same line, except for #defines of do while,\n# or if closed on same line\n\t\tif (($line=~/$Type\\s*$Ident\\(.*\\).*\\s{/) and\n\t\t    !($line=~/\\#\\s*define.*do\\s{/) and !($line=~/}/)) {\n\t\t\tERROR(\"OPEN_BRACE\",\n\t\t\t      \"open brace '{' following function declarations go on the next line\\n\" . $herecurr);\n\t\t}\n\n# open braces for enum, union and struct go on the same line.\n\t\tif ($line =~ /^.\\s*{/ &&\n\t\t    $prevline =~ /^.\\s*(?:typedef\\s+)?(enum|union|struct)(?:\\s+$Ident)?\\s*$/) {\n\t\t\tERROR(\"OPEN_BRACE\",\n\t\t\t      \"open brace '{' following $1 go on the same line\\n\" . $hereprev);\n\t\t}\n\n# missing space after union, struct or enum definition\n\t\tif ($line =~ /^.\\s*(?:typedef\\s+)?(enum|union|struct)(?:\\s+$Ident)?(?:\\s+$Ident)?[=\\{]/) {\n\t\t    WARN(\"SPACING\",\n\t\t\t \"missing space after $1 definition\\n\" . $herecurr);\n\t\t}\n\n# check for spacing round square brackets; allowed:\n#  1. with a type on the left -- int [] a;\n#  2. at the beginning of a line for slice initialisers -- [0...10] = 5,\n#  3. inside a curly brace -- = { [0...10] = 5 }\n\t\twhile ($line =~ /(.*?\\s)\\[/g) {\n\t\t\tmy ($where, $prefix) = ($-[1], $1);\n\t\t\tif ($prefix !~ /$Type\\s+$/ &&\n\t\t\t    ($where != 0 || $prefix !~ /^.\\s+$/) &&\n\t\t\t    $prefix !~ /{\\s+$/) {\n\t\t\t\tERROR(\"BRACKET_SPACE\",\n\t\t\t\t      \"space prohibited before open square bracket '['\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n# check for spaces between functions and their parentheses.\n\t\twhile ($line =~ /($Ident)\\s+\\(/g) {\n\t\t\tmy $name = $1;\n\t\t\tmy $ctx_before = substr($line, 0, $-[1]);\n\t\t\tmy $ctx = \"$ctx_before$name\";\n\n\t\t\t# Ignore those directives where spaces _are_ permitted.\n\t\t\tif ($name =~ /^(?:\n\t\t\t\tif|for|while|switch|return|case|\n\t\t\t\tvolatile|__volatile__|\n\t\t\t\t__attribute__|format|__extension__|\n\t\t\t\tasm|__asm__)$/x)\n\t\t\t{\n\n\t\t\t# cpp #define statements have non-optional spaces, ie\n\t\t\t# if there is a space between the name and the open\n\t\t\t# parenthesis it is simply not a parameter group.\n\t\t\t} elsif ($ctx_before =~ /^.\\s*\\#\\s*define\\s*$/) {\n\n\t\t\t# cpp #elif statement condition may start with a (\n\t\t\t} elsif ($ctx =~ /^.\\s*\\#\\s*elif\\s*$/) {\n\n\t\t\t# If this whole things ends with a type its most\n\t\t\t# likely a typedef for a function.\n\t\t\t} elsif ($ctx =~ /$Type$/) {\n\n\t\t\t} else {\n\t\t\t\tWARN(\"SPACING\",\n\t\t\t\t     \"space prohibited between function name and open parenthesis '('\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n# Check operator spacing.\n\t\tif (!($line=~/\\#\\s*include/)) {\n\t\t\tmy $ops = qr{\n\t\t\t\t<<=|>>=|<=|>=|==|!=|\n\t\t\t\t\\+=|-=|\\*=|\\/=|%=|\\^=|\\|=|&=|\n\t\t\t\t=>|->|<<|>>|<|>|=|!|~|\n\t\t\t\t&&|\\|\\||,|\\^|\\+\\+|--|&|\\||\\+|-|\\*|\\/|%|\n\t\t\t\t\\?|:\n\t\t\t}x;\n\t\t\tmy @elements = split(/($ops|;)/, $opline);\n\t\t\tmy $off = 0;\n\n\t\t\tmy $blank = copy_spacing($opline);\n\n\t\t\tfor (my $n = 0; $n < $#elements; $n += 2) {\n\t\t\t\t$off += length($elements[$n]);\n\n\t\t\t\t# Pick up the preceding and succeeding characters.\n\t\t\t\tmy $ca = substr($opline, 0, $off);\n\t\t\t\tmy $cc = '';\n\t\t\t\tif (length($opline) >= ($off + length($elements[$n + 1]))) {\n\t\t\t\t\t$cc = substr($opline, $off + length($elements[$n + 1]));\n\t\t\t\t}\n\t\t\t\tmy $cb = \"$ca$;$cc\";\n\n\t\t\t\tmy $a = '';\n\t\t\t\t$a = 'V' if ($elements[$n] ne '');\n\t\t\t\t$a = 'W' if ($elements[$n] =~ /\\s$/);\n\t\t\t\t$a = 'C' if ($elements[$n] =~ /$;$/);\n\t\t\t\t$a = 'B' if ($elements[$n] =~ /(\\[|\\()$/);\n\t\t\t\t$a = 'O' if ($elements[$n] eq '');\n\t\t\t\t$a = 'E' if ($ca =~ /^\\s*$/);\n\n\t\t\t\tmy $op = $elements[$n + 1];\n\n\t\t\t\tmy $c = '';\n\t\t\t\tif (defined $elements[$n + 2]) {\n\t\t\t\t\t$c = 'V' if ($elements[$n + 2] ne '');\n\t\t\t\t\t$c = 'W' if ($elements[$n + 2] =~ /^\\s/);\n\t\t\t\t\t$c = 'C' if ($elements[$n + 2] =~ /^$;/);\n\t\t\t\t\t$c = 'B' if ($elements[$n + 2] =~ /^(\\)|\\]|;)/);\n\t\t\t\t\t$c = 'O' if ($elements[$n + 2] eq '');\n\t\t\t\t\t$c = 'E' if ($elements[$n + 2] =~ /^\\s*\\\\$/);\n\t\t\t\t} else {\n\t\t\t\t\t$c = 'E';\n\t\t\t\t}\n\n\t\t\t\tmy $ctx = \"${a}x${c}\";\n\n\t\t\t\tmy $at = \"(ctx:$ctx)\";\n\n\t\t\t\tmy $ptr = substr($blank, 0, $off) . \"^\";\n\t\t\t\tmy $hereptr = \"$hereline$ptr\\n\";\n\n\t\t\t\t# Pull out the value of this operator.\n\t\t\t\tmy $op_type = substr($curr_values, $off + 1, 1);\n\n\t\t\t\t# Get the full operator variant.\n\t\t\t\tmy $opv = $op . substr($curr_vars, $off, 1);\n\n\t\t\t\t# Ignore operators passed as parameters.\n\t\t\t\tif ($op_type ne 'V' &&\n\t\t\t\t    $ca =~ /\\s$/ && $cc =~ /^\\s*,/) {\n\n#\t\t\t\t# Ignore comments\n#\t\t\t\t} elsif ($op =~ /^$;+$/) {\n\n\t\t\t\t# ; should have either the end of line or a space or \\ after it\n\t\t\t\t} elsif ($op eq ';') {\n\t\t\t\t\tif ($ctx !~ /.x[WEBC]/ &&\n\t\t\t\t\t    $cc !~ /^\\\\/ && $cc !~ /^;/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space required after that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# // is a comment\n\t\t\t\t} elsif ($op eq '//') {\n\n\t\t\t\t# No spaces for:\n\t\t\t\t#   ->\n\t\t\t\t#   :   when part of a bitfield\n\t\t\t\t} elsif ($op eq '->' || $opv eq ':B') {\n\t\t\t\t\tif ($ctx =~ /Wx.|.xW/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"spaces prohibited around that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# , must have a space on the right.\n\t\t\t\t} elsif ($op eq ',') {\n\t\t\t\t\tif ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space required after that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# '*' as part of a type definition -- reported already.\n\t\t\t\t} elsif ($opv eq '*_') {\n\t\t\t\t\t#warn \"'*' is part of type\\n\";\n\n\t\t\t\t# unary operators should have a space before and\n\t\t\t\t# none after.  May be left adjacent to another\n\t\t\t\t# unary operator, or a cast\n\t\t\t\t} elsif ($op eq '!' || $op eq '~' ||\n\t\t\t\t\t $opv eq '*U' || $opv eq '-U' ||\n\t\t\t\t\t $opv eq '&U' || $opv eq '&&U') {\n\t\t\t\t\tif ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\\)|!|~|\\*|-|\\&|\\||\\+\\+|\\-\\-|\\{)$/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space required before that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\t\t\t\t\tif ($op eq '*' && $cc =~/\\s*$Modifier\\b/) {\n\t\t\t\t\t\t# A unary '*' may be const\n\n\t\t\t\t\t} elsif ($ctx =~ /.xW/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space prohibited after that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# unary ++ and unary -- are allowed no space on one side.\n\t\t\t\t} elsif ($op eq '++' or $op eq '--') {\n\t\t\t\t\tif ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space required one side of that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\t\t\t\t\tif ($ctx =~ /Wx[BE]/ ||\n\t\t\t\t\t    ($ctx =~ /Wx./ && $cc =~ /^;/)) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space prohibited before that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\t\t\t\t\tif ($ctx =~ /ExW/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space prohibited after that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\n\t\t\t\t# << and >> may either have or not have spaces both sides\n\t\t\t\t} elsif ($op eq '<<' or $op eq '>>' or\n\t\t\t\t\t $op eq '&' or $op eq '^' or $op eq '|' or\n\t\t\t\t\t $op eq '+' or $op eq '-' or\n\t\t\t\t\t $op eq '*' or $op eq '/' or\n\t\t\t\t\t $op eq '%')\n\t\t\t\t{\n\t\t\t\t\tif ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"need consistent spacing around '$op' $at\\n\" .\n\t\t\t\t\t\t\t$hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# A colon needs no spaces before when it is\n\t\t\t\t# terminating a case value or a label.\n\t\t\t\t} elsif ($opv eq ':C' || $opv eq ':L') {\n\t\t\t\t\tif ($ctx =~ /Wx./) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"space prohibited before that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\n\t\t\t\t# All the others need spaces both sides.\n\t\t\t\t} elsif ($ctx !~ /[EWC]x[CWE]/) {\n\t\t\t\t\tmy $ok = 0;\n\n\t\t\t\t\t# Ignore email addresses <foo@bar>\n\t\t\t\t\tif (($op eq '<' &&\n\t\t\t\t\t     $cc =~ /^\\S+\\@\\S+>/) ||\n\t\t\t\t\t    ($op eq '>' &&\n\t\t\t\t\t     $ca =~ /<\\S+\\@\\S+$/))\n\t\t\t\t\t{\n\t\t\t\t\t    \t$ok = 1;\n\t\t\t\t\t}\n\n\t\t\t\t\t# Ignore ?:\n\t\t\t\t\tif (($opv eq ':O' && $ca =~ /\\?$/) ||\n\t\t\t\t\t    ($op eq '?' && $cc =~ /^:/)) {\n\t\t\t\t\t    \t$ok = 1;\n\t\t\t\t\t}\n\n\t\t\t\t\tif ($ok == 0) {\n\t\t\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t\t\t      \"spaces required around that '$op' $at\\n\" . $hereptr);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t$off += length($elements[$n + 1]);\n\t\t\t}\n\t\t}\n\n# check for multiple assignments\n\t\tif ($line =~ /^.\\s*$Lval\\s*=\\s*$Lval\\s*=(?!=)/) {\n\t\t\tCHK(\"MULTIPLE_ASSIGNMENTS\",\n\t\t\t    \"multiple assignments should be avoided\\n\" . $herecurr);\n\t\t}\n\n## # check for multiple declarations, allowing for a function declaration\n## # continuation.\n## \t\tif ($line =~ /^.\\s*$Type\\s+$Ident(?:\\s*=[^,{]*)?\\s*,\\s*$Ident.*/ &&\n## \t\t    $line !~ /^.\\s*$Type\\s+$Ident(?:\\s*=[^,{]*)?\\s*,\\s*$Type\\s*$Ident.*/) {\n##\n## \t\t\t# Remove any bracketed sections to ensure we do not\n## \t\t\t# falsly report the parameters of functions.\n## \t\t\tmy $ln = $line;\n## \t\t\twhile ($ln =~ s/\\([^\\(\\)]*\\)//g) {\n## \t\t\t}\n## \t\t\tif ($ln =~ /,/) {\n## \t\t\t\tWARN(\"MULTIPLE_DECLARATION\",\n##\t\t\t\t     \"declaring multiple variables together should be avoided\\n\" . $herecurr);\n## \t\t\t}\n## \t\t}\n\n#need space before brace following if, while, etc\n\t\tif (($line =~ /\\(.*\\){/ && $line !~ /\\($Type\\){/) ||\n\t\t    $line =~ /do{/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space required before the open brace '{'\\n\" . $herecurr);\n\t\t}\n\n# closing brace should have a space following it when it has anything\n# on the line\n\t\tif ($line =~ /}(?!(?:,|;|\\)))\\S/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space required after that close brace '}'\\n\" . $herecurr);\n\t\t}\n\n# check spacing on square brackets\n\t\tif ($line =~ /\\[\\s/ && $line !~ /\\[\\s*$/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space prohibited after that open square bracket '['\\n\" . $herecurr);\n\t\t}\n\t\tif ($line =~ /\\s\\]/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space prohibited before that close square bracket ']'\\n\" . $herecurr);\n\t\t}\n\n# check spacing on parentheses\n\t\tif ($line =~ /\\(\\s/ && $line !~ /\\(\\s*(?:\\\\)?$/ &&\n\t\t    $line !~ /for\\s*\\(\\s+;/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space prohibited after that open parenthesis '('\\n\" . $herecurr);\n\t\t}\n\t\tif ($line =~ /(\\s+)\\)/ && $line !~ /^.\\s*\\)/ &&\n\t\t    $line !~ /for\\s*\\(.*;\\s+\\)/ &&\n\t\t    $line !~ /:\\s+\\)/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"space prohibited before that close parenthesis ')'\\n\" . $herecurr);\n\t\t}\n\n#goto labels aren't indented, allow a single space however\n\t\tif ($line=~/^.\\s+[A-Za-z\\d_]+:(?![0-9]+)/ and\n\t\t   !($line=~/^. [A-Za-z\\d_]+:/) and !($line=~/^.\\s+default:/)) {\n\t\t\tWARN(\"INDENTED_LABEL\",\n\t\t\t     \"labels should not be indented\\n\" . $herecurr);\n\t\t}\n\n# Return is not a function.\n\t\tif (defined($stat) && $stat =~ /^.\\s*return(\\s*)(\\(.*);/s) {\n\t\t\tmy $spacing = $1;\n\t\t\tmy $value = $2;\n\n\t\t\t# Flatten any parentheses\n\t\t\t$value =~ s/\\(/ \\(/g;\n\t\t\t$value =~ s/\\)/\\) /g;\n\t\t\twhile ($value =~ s/\\[[^\\[\\]]*\\]/1/ ||\n\t\t\t       $value !~ /(?:$Ident|-?$Constant)\\s*\n\t\t\t\t\t     $Compare\\s*\n\t\t\t\t\t     (?:$Ident|-?$Constant)/x &&\n\t\t\t       $value =~ s/\\([^\\(\\)]*\\)/1/) {\n\t\t\t}\n#print \"value<$value>\\n\";\n\t\t\tif ($value =~ /^\\s*(?:$Ident|-?$Constant)\\s*$/) {\n\t\t\t\tERROR(\"RETURN_PARENTHESES\",\n\t\t\t\t      \"return is not a function, parentheses are not required\\n\" . $herecurr);\n\n\t\t\t} elsif ($spacing !~ /\\s+/) {\n\t\t\t\tERROR(\"SPACING\",\n\t\t\t\t      \"space required before the open parenthesis '('\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n# Return of what appears to be an errno should normally be -'ve\n\t\tif ($line =~ /^.\\s*return\\s*(E[A-Z]*)\\s*;/) {\n\t\t\tmy $name = $1;\n\t\t\tif ($name ne 'EOF' && $name ne 'ERROR') {\n\t\t\t\tWARN(\"USE_NEGATIVE_ERRNO\",\n\t\t\t\t     \"return of an errno should typically be -ve (return -$1)\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n# Need a space before open parenthesis after if, while etc\n\t\tif ($line=~/\\b(if|while|for|switch)\\(/) {\n\t\t\tERROR(\"SPACING\", \"space required before the open parenthesis '('\\n\" . $herecurr);\n\t\t}\n\n# Check for illegal assignment in if conditional -- and check for trailing\n# statements after the conditional.\n\t\tif ($line =~ /do\\s*(?!{)/) {\n\t\t\t($stat, $cond, $line_nr_next, $remain_next, $off_next) =\n\t\t\t\tctx_statement_block($linenr, $realcnt, 0)\n\t\t\t\t\tif (!defined $stat);\n\t\t\tmy ($stat_next) = ctx_statement_block($line_nr_next,\n\t\t\t\t\t\t$remain_next, $off_next);\n\t\t\t$stat_next =~ s/\\n./\\n /g;\n\t\t\t##print \"stat<$stat> stat_next<$stat_next>\\n\";\n\n\t\t\tif ($stat_next =~ /^\\s*while\\b/) {\n\t\t\t\t# If the statement carries leading newlines,\n\t\t\t\t# then count those as offsets.\n\t\t\t\tmy ($whitespace) =\n\t\t\t\t\t($stat_next =~ /^((?:\\s*\\n[+-])*\\s*)/s);\n\t\t\t\tmy $offset =\n\t\t\t\t\tstatement_rawlines($whitespace) - 1;\n\n\t\t\t\t$suppress_whiletrailers{$line_nr_next +\n\t\t\t\t\t\t\t\t$offset} = 1;\n\t\t\t}\n\t\t}\n\t\tif (!defined $suppress_whiletrailers{$linenr} &&\n\t\t    $line =~ /\\b(?:if|while|for)\\s*\\(/ && $line !~ /^.\\s*#/) {\n\t\t\tmy ($s, $c) = ($stat, $cond);\n\n\t\t\tif ($c =~ /\\bif\\s*\\(.*[^<>!=]=[^=].*/s) {\n\t\t\t\tERROR(\"ASSIGN_IN_IF\",\n\t\t\t\t      \"do not use assignment in if condition\\n\" . $herecurr);\n\t\t\t}\n\n\t\t\t# Find out what is on the end of the line after the\n\t\t\t# conditional.\n\t\t\tsubstr($s, 0, length($c), '');\n\t\t\t$s =~ s/\\n.*//g;\n\t\t\t$s =~ s/$;//g; \t# Remove any comments\n\t\t\tif (length($c) && $s !~ /^\\s*{?\\s*\\\\*\\s*$/ &&\n\t\t\t    $c !~ /}\\s*while\\s*/)\n\t\t\t{\n\t\t\t\t# Find out how long the conditional actually is.\n\t\t\t\tmy @newlines = ($c =~ /\\n/gs);\n\t\t\t\tmy $cond_lines = 1 + $#newlines;\n\t\t\t\tmy $stat_real = '';\n\n\t\t\t\t$stat_real = raw_line($linenr, $cond_lines)\n\t\t\t\t\t\t\t. \"\\n\" if ($cond_lines);\n\t\t\t\tif (defined($stat_real) && $cond_lines > 1) {\n\t\t\t\t\t$stat_real = \"[...]\\n$stat_real\";\n\t\t\t\t}\n\n\t\t\t\tERROR(\"TRAILING_STATEMENTS\",\n\t\t\t\t      \"trailing statements should be on next line\\n\" . $herecurr . $stat_real);\n\t\t\t}\n\t\t}\n\n# Check for bitwise tests written as boolean\n\t\tif ($line =~ /\n\t\t\t(?:\n\t\t\t\t(?:\\[|\\(|\\&\\&|\\|\\|)\n\t\t\t\t\\s*0[xX][0-9]+\\s*\n\t\t\t\t(?:\\&\\&|\\|\\|)\n\t\t\t|\n\t\t\t\t(?:\\&\\&|\\|\\|)\n\t\t\t\t\\s*0[xX][0-9]+\\s*\n\t\t\t\t(?:\\&\\&|\\|\\||\\)|\\])\n\t\t\t)/x)\n\t\t{\n\t\t\tWARN(\"HEXADECIMAL_BOOLEAN_TEST\",\n\t\t\t     \"boolean test with hexadecimal, perhaps just 1 \\& or \\|?\\n\" . $herecurr);\n\t\t}\n\n# if and else should not have general statements after it\n\t\tif ($line =~ /^.\\s*(?:}\\s*)?else\\b(.*)/) {\n\t\t\tmy $s = $1;\n\t\t\t$s =~ s/$;//g; \t# Remove any comments\n\t\t\tif ($s !~ /^\\s*(?:\\sif|(?:{|)\\s*\\\\?\\s*$)/) {\n\t\t\t\tERROR(\"TRAILING_STATEMENTS\",\n\t\t\t\t      \"trailing statements should be on next line\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n# if should not continue a brace\n\t\tif ($line =~ /}\\s*if\\b/) {\n\t\t\tERROR(\"TRAILING_STATEMENTS\",\n\t\t\t      \"trailing statements should be on next line\\n\" .\n\t\t\t\t$herecurr);\n\t\t}\n# case and default should not have general statements after them\n\t\tif ($line =~ /^.\\s*(?:case\\s*.*|default\\s*):/g &&\n\t\t    $line !~ /\\G(?:\n\t\t\t(?:\\s*$;*)(?:\\s*{)?(?:\\s*$;*)(?:\\s*\\\\)?\\s*$|\n\t\t\t\\s*return\\s+\n\t\t    )/xg)\n\t\t{\n\t\t\tERROR(\"TRAILING_STATEMENTS\",\n\t\t\t      \"trailing statements should be on next line\\n\" . $herecurr);\n\t\t}\n\n\t\t# Check for }<nl>else {, these must be at the same\n\t\t# indent level to be relevant to each other.\n\t\tif ($prevline=~/}\\s*$/ and $line=~/^.\\s*else\\s*/ and\n\t\t\t\t\t\t$previndent == $indent) {\n\t\t\tERROR(\"ELSE_AFTER_BRACE\",\n\t\t\t      \"else should follow close brace '}'\\n\" . $hereprev);\n\t\t}\n\n\t\tif ($prevline=~/}\\s*$/ and $line=~/^.\\s*while\\s*/ and\n\t\t\t\t\t\t$previndent == $indent) {\n\t\t\tmy ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);\n\n\t\t\t# Find out what is on the end of the line after the\n\t\t\t# conditional.\n\t\t\tsubstr($s, 0, length($c), '');\n\t\t\t$s =~ s/\\n.*//g;\n\n\t\t\tif ($s =~ /^\\s*;/) {\n\t\t\t\tERROR(\"WHILE_AFTER_BRACE\",\n\t\t\t\t      \"while should follow close brace '}'\\n\" . $hereprev);\n\t\t\t}\n\t\t}\n\n#studly caps, commented out until figure out how to distinguish between use of existing and adding new\n#\t\tif (($line=~/[\\w_][a-z\\d]+[A-Z]/) and !($line=~/print/)) {\n#\t\t    print \"No studly caps, use _\\n\";\n#\t\t    print \"$herecurr\";\n#\t\t    $clean = 0;\n#\t\t}\n\n#no spaces allowed after \\ in define\n\t\tif ($line=~/\\#\\s*define.*\\\\\\s$/) {\n\t\t\tWARN(\"WHITESPACE_AFTER_LINE_CONTINUATION\",\n\t\t\t     \"Whitepspace after \\\\ makes next lines useless\\n\" . $herecurr);\n\t\t}\n\n#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)\n\t\tif ($tree && $rawline =~ m{^.\\s*\\#\\s*include\\s*\\<asm\\/(.*)\\.h\\>}) {\n\t\t\tmy $file = \"$1.h\";\n\t\t\tmy $checkfile = \"include/linux/$file\";\n\t\t\tif (-f \"$root/$checkfile\" &&\n\t\t\t    $realfile ne $checkfile &&\n\t\t\t    $1 !~ /$allowed_asm_includes/)\n\t\t\t{\n\t\t\t\tif ($realfile =~ m{^arch/}) {\n\t\t\t\t\tCHK(\"ARCH_INCLUDE_LINUX\",\n\t\t\t\t\t    \"Consider using #include <linux/$file> instead of <asm/$file>\\n\" . $herecurr);\n\t\t\t\t} else {\n\t\t\t\t\tWARN(\"INCLUDE_LINUX\",\n\t\t\t\t\t     \"Use #include <linux/$file> instead of <asm/$file>\\n\" . $herecurr);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n# multi-statement macros should be enclosed in a do while loop, grab the\n# first statement and ensure its the whole macro if its not enclosed\n# in a known good container\n\t\tif ($realfile !~ m@/vmlinux.lds.h$@ &&\n\t\t    $line =~ /^.\\s*\\#\\s*define\\s*$Ident(\\()?/) {\n\t\t\tmy $ln = $linenr;\n\t\t\tmy $cnt = $realcnt;\n\t\t\tmy ($off, $dstat, $dcond, $rest);\n\t\t\tmy $ctx = '';\n\t\t\t($dstat, $dcond, $ln, $cnt, $off) =\n\t\t\t\tctx_statement_block($linenr, $realcnt, 0);\n\t\t\t$ctx = $dstat;\n\t\t\t#print \"dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\\n\";\n\t\t\t#print \"LINE<$lines[$ln-1]> len<\" . length($lines[$ln-1]) . \"\\n\";\n\n\t\t\t$dstat =~ s/^.\\s*\\#\\s*define\\s+$Ident(?:\\([^\\)]*\\))?\\s*//;\n\t\t\t$dstat =~ s/$;//g;\n\t\t\t$dstat =~ s/\\\\\\n.//g;\n\t\t\t$dstat =~ s/^\\s*//s;\n\t\t\t$dstat =~ s/\\s*$//s;\n\n\t\t\t# Flatten any parentheses and braces\n\t\t\twhile ($dstat =~ s/\\([^\\(\\)]*\\)/1/ ||\n\t\t\t       $dstat =~ s/\\{[^\\{\\}]*\\}/1/ ||\n\t\t\t       $dstat =~ s/\\[[^\\[\\]]*\\]/1/)\n\t\t\t{\n\t\t\t}\n\n\t\t\tmy $exceptions = qr{\n\t\t\t\t$Declare|\n\t\t\t\tmodule_param_named|\n\t\t\t\tMODULE_PARAM_DESC|\n\t\t\t\tDECLARE_PER_CPU|\n\t\t\t\tDEFINE_PER_CPU|\n\t\t\t\t__typeof__\\(|\n\t\t\t\tunion|\n\t\t\t\tstruct|\n\t\t\t\t\\.$Ident\\s*=\\s*|\n\t\t\t\t^\\\"|\\\"$\n\t\t\t}x;\n\t\t\t#print \"REST<$rest> dstat<$dstat> ctx<$ctx>\\n\";\n\t\t\tif ($dstat ne '' &&\n\t\t\t    $dstat !~ /^(?:$Ident|-?$Constant),$/ &&\t\t\t# 10, // foo(),\n\t\t\t    $dstat !~ /^(?:$Ident|-?$Constant);$/ &&\t\t\t# foo();\n\t\t\t    $dstat !~ /^(?:$Ident|-?$Constant)$/ &&\t\t\t# 10 // foo()\n\t\t\t    $dstat !~ /$exceptions/ &&\n\t\t\t    $dstat !~ /^\\.$Ident\\s*=/ &&\t\t\t\t# .foo =\n\t\t\t    $dstat !~ /^do\\s*$Constant\\s*while\\s*$Constant;?$/ &&\t# do {...} while (...); // do {...} while (...)\n\t\t\t    $dstat !~ /^for\\s*$Constant$/ &&\t\t\t\t# for (...)\n\t\t\t    $dstat !~ /^for\\s*$Constant\\s+(?:$Ident|-?$Constant)$/ &&\t# for (...) bar()\n\t\t\t    $dstat !~ /^do\\s*{/ &&\t\t\t\t\t# do {...\n\t\t\t    $dstat !~ /^\\({/)\t\t\t\t\t\t# ({...\n\t\t\t{\n\t\t\t\t$ctx =~ s/\\n*$//;\n\t\t\t\tmy $herectx = $here . \"\\n\";\n\t\t\t\tmy $cnt = statement_rawlines($ctx);\n\n\t\t\t\tfor (my $n = 0; $n < $cnt; $n++) {\n\t\t\t\t\t$herectx .= raw_line($linenr, $n) . \"\\n\";\n\t\t\t\t}\n\n\t\t\t\tif ($dstat =~ /;/) {\n\t\t\t\t\tERROR(\"MULTISTATEMENT_MACRO_USE_DO_WHILE\",\n\t\t\t\t\t      \"Macros with multiple statements should be enclosed in a do - while loop\\n\" . \"$herectx\");\n\t\t\t\t} else {\n\t\t\t\t\tERROR(\"COMPLEX_MACRO\",\n\t\t\t\t\t      \"Macros with complex values should be enclosed in parenthesis\\n\" . \"$herectx\");\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...\n# all assignments may have only one of the following with an assignment:\n#\t.\n#\tALIGN(...)\n#\tVMLINUX_SYMBOL(...)\n\t\tif ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\\s)$Ident\\s*=|=\\s*$Ident(?:\\s|$))/) {\n\t\t\tWARN(\"MISSING_VMLINUX_SYMBOL\",\n\t\t\t     \"vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\\n\" . $herecurr);\n\t\t}\n\n# check for redundant bracing round if etc\n\t\tif ($line =~ /(^.*)\\bif\\b/ && $1 !~ /else\\s*$/) {\n\t\t\tmy ($level, $endln, @chunks) =\n\t\t\t\tctx_statement_full($linenr, $realcnt, 1);\n\t\t\t#print \"chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\\n\";\n\t\t\t#print \"APW: <<$chunks[1][0]>><<$chunks[1][1]>>\\n\";\n\t\t\tif ($#chunks > 0 && $level == 0) {\n\t\t\t\tmy $allowed = 0;\n\t\t\t\tmy $seen = 0;\n\t\t\t\tmy $herectx = $here . \"\\n\";\n\t\t\t\tmy $ln = $linenr - 1;\n\t\t\t\tfor my $chunk (@chunks) {\n\t\t\t\t\tmy ($cond, $block) = @{$chunk};\n\n\t\t\t\t\t# If the condition carries leading newlines, then count those as offsets.\n\t\t\t\t\tmy ($whitespace) = ($cond =~ /^((?:\\s*\\n[+-])*\\s*)/s);\n\t\t\t\t\tmy $offset = statement_rawlines($whitespace) - 1;\n\n\t\t\t\t\t#print \"COND<$cond> whitespace<$whitespace> offset<$offset>\\n\";\n\n\t\t\t\t\t# We have looked at and allowed this specific line.\n\t\t\t\t\t$suppress_ifbraces{$ln + $offset} = 1;\n\n\t\t\t\t\t$herectx .= \"$rawlines[$ln + $offset]\\n[...]\\n\";\n\t\t\t\t\t$ln += statement_rawlines($block) - 1;\n\n\t\t\t\t\tsubstr($block, 0, length($cond), '');\n\n\t\t\t\t\t$seen++ if ($block =~ /^\\s*{/);\n\n\t\t\t\t\t#print \"cond<$cond> block<$block> allowed<$allowed>\\n\";\n\t\t\t\t\tif (statement_lines($cond) > 1) {\n\t\t\t\t\t\t#print \"APW: ALLOWED: cond<$cond>\\n\";\n\t\t\t\t\t\t$allowed = 1;\n\t\t\t\t\t}\n\t\t\t\t\tif ($block =~/\\b(?:if|for|while)\\b/) {\n\t\t\t\t\t\t#print \"APW: ALLOWED: block<$block>\\n\";\n\t\t\t\t\t\t$allowed = 1;\n\t\t\t\t\t}\n\t\t\t\t\tif (statement_block_size($block) > 1) {\n\t\t\t\t\t\t#print \"APW: ALLOWED: lines block<$block>\\n\";\n\t\t\t\t\t\t$allowed = 1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif ($seen && !$allowed) {\n\t\t\t\t\tWARN(\"BRACES\",\n\t\t\t\t\t     \"braces {} are not necessary for any arm of this statement\\n\" . $herectx);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (!defined $suppress_ifbraces{$linenr - 1} &&\n\t\t\t\t\t$line =~ /\\b(if|while|for|else)\\b/) {\n\t\t\tmy $allowed = 0;\n\n\t\t\t# Check the pre-context.\n\t\t\tif (substr($line, 0, $-[0]) =~ /(\\}\\s*)$/) {\n\t\t\t\t#print \"APW: ALLOWED: pre<$1>\\n\";\n\t\t\t\t$allowed = 1;\n\t\t\t}\n\n\t\t\tmy ($level, $endln, @chunks) =\n\t\t\t\tctx_statement_full($linenr, $realcnt, $-[0]);\n\n\t\t\t# Check the condition.\n\t\t\tmy ($cond, $block) = @{$chunks[0]};\n\t\t\t#print \"CHECKING<$linenr> cond<$cond> block<$block>\\n\";\n\t\t\tif (defined $cond) {\n\t\t\t\tsubstr($block, 0, length($cond), '');\n\t\t\t}\n\t\t\tif (statement_lines($cond) > 1) {\n\t\t\t\t#print \"APW: ALLOWED: cond<$cond>\\n\";\n\t\t\t\t$allowed = 1;\n\t\t\t}\n\t\t\tif ($block =~/\\b(?:if|for|while)\\b/) {\n\t\t\t\t#print \"APW: ALLOWED: block<$block>\\n\";\n\t\t\t\t$allowed = 1;\n\t\t\t}\n\t\t\tif (statement_block_size($block) > 1) {\n\t\t\t\t#print \"APW: ALLOWED: lines block<$block>\\n\";\n\t\t\t\t$allowed = 1;\n\t\t\t}\n\t\t\t# Check the post-context.\n\t\t\tif (defined $chunks[1]) {\n\t\t\t\tmy ($cond, $block) = @{$chunks[1]};\n\t\t\t\tif (defined $cond) {\n\t\t\t\t\tsubstr($block, 0, length($cond), '');\n\t\t\t\t}\n\t\t\t\tif ($block =~ /^\\s*\\{/) {\n\t\t\t\t\t#print \"APW: ALLOWED: chunk-1 block<$block>\\n\";\n\t\t\t\t\t$allowed = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ($level == 0 && $block =~ /^\\s*\\{/ && !$allowed) {\n\t\t\t\tmy $herectx = $here . \"\\n\";\n\t\t\t\tmy $cnt = statement_rawlines($block);\n\n\t\t\t\tfor (my $n = 0; $n < $cnt; $n++) {\n\t\t\t\t\t$herectx .= raw_line($linenr, $n) . \"\\n\";\n\t\t\t\t}\n\n\t\t\t\tWARN(\"BRACES\",\n\t\t\t\t     \"braces {} are not necessary for single statement blocks\\n\" . $herectx);\n\t\t\t}\n\t\t}\n\n# don't include deprecated include files (uses RAW line)\n\t\tfor my $inc (keys %dep_includes) {\n\t\t\tif ($rawline =~ m@^.\\s*\\#\\s*include\\s*\\<$inc>@) {\n\t\t\t\tERROR(\"DEPRECATED_INCLUDE\",\n\t\t\t\t      \"Don't use <$inc>, include \" .\n\t\t\t\t      \"$dep_includes{$inc} instead\\n\" .\n\t\t\t\t      $herecurr);\n\t\t\t}\n\t\t}\n\n# don't use deprecated functions\n\t\tfor my $func (keys %dep_functions) {\n\t\t\tif ($line =~ /\\b$func\\b/) {\n\t\t\t\tERROR(\"DEPRECATED_FUNCTION\",\n\t\t\t\t      \"$func is deprecated, \" .\n\t\t\t\t      \"use $dep_functions{$func} instead\\n\" .\n\t\t\t\t      $herecurr);\n\t\t\t}\n\t\t}\n\n# no volatiles please\n\t\tmy $asm_volatile = qr{\\b(__asm__|asm)\\s+(__volatile__|volatile)\\b};\n\t\tif ($line =~ /\\bvolatile\\b/ && $line !~ /$asm_volatile/) {\n\t\t\tWARN(\"VOLATILE\",\n\t\t\t     \"Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\\n\" . $herecurr);\n\t\t}\n\n# warn about #if 0\n\t\tif ($line =~ /^.\\s*\\#\\s*if\\s+0\\b/) {\n\t\t\tCHK(\"REDUNDANT_CODE\",\n\t\t\t    \"if this code is redundant consider removing it\\n\" .\n\t\t\t\t$herecurr);\n\t\t}\n\n# check for needless kfree() checks\n\t\tif ($prevline =~ /\\bif\\s*\\(([^\\)]*)\\)/) {\n\t\t\tmy $expr = $1;\n\t\t\tif ($line =~ /\\bkfree\\(\\Q$expr\\E\\);/) {\n\t\t\t\tWARN(\"NEEDLESS_KFREE\",\n\t\t\t\t     \"kfree(NULL) is safe this check is probably not required\\n\" . $hereprev);\n\t\t\t}\n\t\t}\n# check for needless usb_free_urb() checks\n\t\tif ($prevline =~ /\\bif\\s*\\(([^\\)]*)\\)/) {\n\t\t\tmy $expr = $1;\n\t\t\tif ($line =~ /\\busb_free_urb\\(\\Q$expr\\E\\);/) {\n\t\t\t\tWARN(\"NEEDLESS_USB_FREE_URB\",\n\t\t\t\t     \"usb_free_urb(NULL) is safe this check is probably not required\\n\" . $hereprev);\n\t\t\t}\n\t\t}\n\n# prefer usleep_range over udelay\n\t\tif ($line =~ /\\budelay\\s*\\(\\s*(\\w+)\\s*\\)/) {\n\t\t\t# ignore udelay's < 10, however\n\t\t\tif (! (($1 =~ /(\\d+)/) && ($1 < 10)) ) {\n\t\t\t\tCHK(\"USLEEP_RANGE\",\n\t\t\t\t    \"usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\\n\" . $line);\n\t\t\t}\n\t\t}\n\n# warn about unexpectedly long msleep's\n\t\tif ($line =~ /\\bmsleep\\s*\\((\\d+)\\);/) {\n\t\t\tif ($1 < 20) {\n\t\t\t\tWARN(\"MSLEEP\",\n\t\t\t\t     \"msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\\n\" . $line);\n\t\t\t}\n\t\t}\n\n# warn about #ifdefs in C files\n#\t\tif ($line =~ /^.\\s*\\#\\s*if(|n)def/ && ($realfile =~ /\\.c$/)) {\n#\t\t\tprint \"#ifdef in C files should be avoided\\n\";\n#\t\t\tprint \"$herecurr\";\n#\t\t\t$clean = 0;\n#\t\t}\n\n# warn about spacing in #ifdefs\n\t\tif ($line =~ /^.\\s*\\#\\s*(ifdef|ifndef|elif)\\s\\s+/) {\n\t\t\tERROR(\"SPACING\",\n\t\t\t      \"exactly one space required after that #$1\\n\" . $herecurr);\n\t\t}\n\n# check for spinlock_t definitions without a comment.\n\t\tif ($line =~ /^.\\s*(struct\\s+mutex|spinlock_t)\\s+\\S+;/ ||\n\t\t    $line =~ /^.\\s*(DEFINE_MUTEX)\\s*\\(/) {\n\t\t\tmy $which = $1;\n\t\t\tif (!ctx_has_comment($first_line, $linenr)) {\n\t\t\t\tCHK(\"UNCOMMENTED_DEFINITION\",\n\t\t\t\t    \"$1 definition without comment\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n# check for memory barriers without a comment.\n\t\tif ($line =~ /\\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\\(/) {\n\t\t\tif (!ctx_has_comment($first_line, $linenr)) {\n\t\t\t\tCHK(\"MEMORY_BARRIER\",\n\t\t\t\t    \"memory barrier without comment\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n# check of hardware specific defines\n\t\tif ($line =~ m@^.\\s*\\#\\s*if.*\\b(__i386__|__powerpc64__|__sun__|__s390x__)\\b@ && $realfile !~ m@include/asm-@) {\n\t\t\tCHK(\"ARCH_DEFINES\",\n\t\t\t    \"architecture specific defines should be avoided\\n\" .  $herecurr);\n\t\t}\n\n# Check that the storage class is at the beginning of a declaration\n\t\tif ($line =~ /\\b$Storage\\b/ && $line !~ /^.\\s*$Storage\\b/) {\n\t\t\tWARN(\"STORAGE_CLASS\",\n\t\t\t     \"storage class should be at the beginning of the declaration\\n\" . $herecurr)\n\t\t}\n\n# check the location of the inline attribute, that it is between\n# storage class and type.\n\t\tif ($line =~ /\\b$Type\\s+$Inline\\b/ ||\n\t\t    $line =~ /\\b$Inline\\s+$Storage\\b/) {\n\t\t\tERROR(\"INLINE_LOCATION\",\n\t\t\t      \"inline keyword should sit between storage class and type\\n\" . $herecurr);\n\t\t}\n\n# Check for __inline__ and __inline, prefer inline\n\t\tif ($line =~ /\\b(__inline__|__inline)\\b/) {\n\t\t\tWARN(\"INLINE\",\n\t\t\t     \"plain inline is preferred over $1\\n\" . $herecurr);\n\t\t}\n\n# Check for __attribute__ packed, prefer __packed\n\t\tif ($line =~ /\\b__attribute__\\s*\\(\\s*\\(.*\\bpacked\\b/) {\n\t\t\tWARN(\"PREFER_PACKED\",\n\t\t\t     \"__packed is preferred over __attribute__((packed))\\n\" . $herecurr);\n\t\t}\n\n# Check for __attribute__ aligned, prefer __aligned\n\t\tif ($line =~ /\\b__attribute__\\s*\\(\\s*\\(.*aligned/) {\n\t\t\tWARN(\"PREFER_ALIGNED\",\n\t\t\t     \"__aligned(size) is preferred over __attribute__((aligned(size)))\\n\" . $herecurr);\n\t\t}\n\n# check for sizeof(&)\n\t\tif ($line =~ /\\bsizeof\\s*\\(\\s*\\&/) {\n\t\t\tWARN(\"SIZEOF_ADDRESS\",\n\t\t\t     \"sizeof(& should be avoided\\n\" . $herecurr);\n\t\t}\n\n# check for line continuations in quoted strings with odd counts of \"\n\t\tif ($rawline =~ /\\\\$/ && $rawline =~ tr/\"/\"/ % 2) {\n\t\t\tWARN(\"LINE_CONTINUATIONS\",\n\t\t\t     \"Avoid line continuations in quoted strings\\n\" . $herecurr);\n\t\t}\n\n# Check for misused memsets\n\t\tif (defined $stat &&\n\t\t    $stat =~ /^\\+(?:.*?)\\bmemset\\s*\\(\\s*$FuncArg\\s*,\\s*$FuncArg\\s*\\,\\s*$FuncArg\\s*\\)/s) {\n\n\t\t\tmy $ms_addr = $2;\n\t\t\tmy $ms_val = $8;\n\t\t\tmy $ms_size = $14;\n\n\t\t\tif ($ms_size =~ /^(0x|)0$/i) {\n\t\t\t\tERROR(\"MEMSET\",\n\t\t\t\t      \"memset to 0's uses 0 as the 2nd argument, not the 3rd\\n\" . \"$here\\n$stat\\n\");\n\t\t\t} elsif ($ms_size =~ /^(0x|)1$/i) {\n\t\t\t\tWARN(\"MEMSET\",\n\t\t\t\t     \"single byte memset is suspicious. Swapped 2nd/3rd argument?\\n\" . \"$here\\n$stat\\n\");\n\t\t\t}\n\t\t}\n\n# typecasts on min/max could be min_t/max_t\n\t\tif (defined $stat &&\n\t\t    $stat =~ /^\\+(?:.*?)\\b(min|max)\\s*\\(\\s*$FuncArg\\s*,\\s*$FuncArg\\s*\\)/) {\n\t\t\tif (defined $2 || defined $8) {\n\t\t\t\tmy $call = $1;\n\t\t\t\tmy $cast1 = deparenthesize($2);\n\t\t\t\tmy $arg1 = $3;\n\t\t\t\tmy $cast2 = deparenthesize($8);\n\t\t\t\tmy $arg2 = $9;\n\t\t\t\tmy $cast;\n\n\t\t\t\tif ($cast1 ne \"\" && $cast2 ne \"\") {\n\t\t\t\t\t$cast = \"$cast1 or $cast2\";\n\t\t\t\t} elsif ($cast1 ne \"\") {\n\t\t\t\t\t$cast = $cast1;\n\t\t\t\t} else {\n\t\t\t\t\t$cast = $cast2;\n\t\t\t\t}\n\t\t\t\tWARN(\"MINMAX\",\n\t\t\t\t     \"$call() should probably be ${call}_t($cast, $arg1, $arg2)\\n\" . \"$here\\n$stat\\n\");\n\t\t\t}\n\t\t}\n\n# check for new externs in .c files.\n\t\tif ($realfile =~ /\\.c$/ && defined $stat &&\n\t\t    $stat =~ /^.\\s*(?:extern\\s+)?$Type\\s+($Ident)(\\s*)\\(/s)\n\t\t{\n\t\t\tmy $function_name = $1;\n\t\t\tmy $paren_space = $2;\n\n\t\t\tmy $s = $stat;\n\t\t\tif (defined $cond) {\n\t\t\t\tsubstr($s, 0, length($cond), '');\n\t\t\t}\n\t\t\tif ($s =~ /^\\s*;/ &&\n\t\t\t    $function_name ne 'uninitialized_var')\n\t\t\t{\n\t\t\t\tWARN(\"AVOID_EXTERNS\",\n\t\t\t\t     \"externs should be avoided in .c files\\n\" .  $herecurr);\n\t\t\t}\n\n\t\t\tif ($paren_space =~ /\\n/) {\n\t\t\t\tWARN(\"FUNCTION_ARGUMENTS\",\n\t\t\t\t     \"arguments for function declarations should follow identifier\\n\" . $herecurr);\n\t\t\t}\n\n\t\t} elsif ($realfile =~ /\\.c$/ && defined $stat &&\n\t\t    $stat =~ /^.\\s*extern\\s+/)\n\t\t{\n\t\t\tWARN(\"AVOID_EXTERNS\",\n\t\t\t     \"externs should be avoided in .c files\\n\" .  $herecurr);\n\t\t}\n\n# checks for new __setup's\n\t\tif ($rawline =~ /\\b__setup\\(\"([^\"]*)\"/) {\n\t\t\tmy $name = $1;\n\n\t\t\tif (!grep(/$name/, @setup_docs)) {\n\t\t\t\tCHK(\"UNDOCUMENTED_SETUP\",\n\t\t\t\t    \"__setup appears un-documented -- check Documentation/kernel-parameters.txt\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n# check for pointless casting of kmalloc return\n\t\tif ($line =~ /\\*\\s*\\)\\s*[kv][czm]alloc(_node){0,1}\\b/) {\n\t\t\tWARN(\"UNNECESSARY_CASTS\",\n\t\t\t     \"unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\\n\" . $herecurr);\n\t\t}\n\n# check for multiple semicolons\n\t\tif ($line =~ /;\\s*;\\s*$/) {\n\t\t    WARN(\"ONE_SEMICOLON\",\n\t\t\t \"Statements terminations use 1 semicolon\\n\" . $herecurr);\n\t\t}\n\n# check for gcc specific __FUNCTION__\n\t\tif ($line =~ /__FUNCTION__/) {\n\t\t\tWARN(\"USE_FUNC\",\n\t\t\t     \"__func__ should be used instead of gcc specific __FUNCTION__\\n\"  . $herecurr);\n\t\t}\n\n# check for semaphores initialized locked\n\t\tif ($line =~ /^.\\s*sema_init.+,\\W?0\\W?\\)/) {\n\t\t\tWARN(\"CONSIDER_COMPLETION\",\n\t\t\t     \"consider using a completion\\n\" . $herecurr);\n\n\t\t}\n# recommend kstrto* over simple_strto* and strict_strto*\n\t\tif ($line =~ /\\b((simple|strict)_(strto(l|ll|ul|ull)))\\s*\\(/) {\n\t\t\tWARN(\"CONSIDER_KSTRTO\",\n\t\t\t     \"$1 is obsolete, use k$3 instead\\n\" . $herecurr);\n\t\t}\n# check for __initcall(), use device_initcall() explicitly please\n\t\tif ($line =~ /^.\\s*__initcall\\s*\\(/) {\n\t\t\tWARN(\"USE_DEVICE_INITCALL\",\n\t\t\t     \"please use device_initcall() instead of __initcall()\\n\" . $herecurr);\n\t\t}\n# check for various ops structs, ensure they are const.\n\t\tmy $struct_ops = qr{acpi_dock_ops|\n\t\t\t\taddress_space_operations|\n\t\t\t\tbacklight_ops|\n\t\t\t\tblock_device_operations|\n\t\t\t\tdentry_operations|\n\t\t\t\tdev_pm_ops|\n\t\t\t\tdma_map_ops|\n\t\t\t\textent_io_ops|\n\t\t\t\tfile_lock_operations|\n\t\t\t\tfile_operations|\n\t\t\t\thv_ops|\n\t\t\t\tide_dma_ops|\n\t\t\t\tintel_dvo_dev_ops|\n\t\t\t\titem_operations|\n\t\t\t\tiwl_ops|\n\t\t\t\tkgdb_arch|\n\t\t\t\tkgdb_io|\n\t\t\t\tkset_uevent_ops|\n\t\t\t\tlock_manager_operations|\n\t\t\t\tmicrocode_ops|\n\t\t\t\tmtrr_ops|\n\t\t\t\tneigh_ops|\n\t\t\t\tnlmsvc_binding|\n\t\t\t\tpci_raw_ops|\n\t\t\t\tpipe_buf_operations|\n\t\t\t\tplatform_hibernation_ops|\n\t\t\t\tplatform_suspend_ops|\n\t\t\t\tproto_ops|\n\t\t\t\trpc_pipe_ops|\n\t\t\t\tseq_operations|\n\t\t\t\tsnd_ac97_build_ops|\n\t\t\t\tsoc_pcmcia_socket_ops|\n\t\t\t\tstacktrace_ops|\n\t\t\t\tsysfs_ops|\n\t\t\t\ttty_operations|\n\t\t\t\tusb_mon_operations|\n\t\t\t\twd_ops}x;\n\t\tif ($line !~ /\\bconst\\b/ &&\n\t\t    $line =~ /\\bstruct\\s+($struct_ops)\\b/) {\n\t\t\tWARN(\"CONST_STRUCT\",\n\t\t\t     \"struct $1 should normally be const\\n\" .\n\t\t\t\t$herecurr);\n\t\t}\n\n# use of NR_CPUS is usually wrong\n# ignore definitions of NR_CPUS and usage to define arrays as likely right\n\t\tif ($line =~ /\\bNR_CPUS\\b/ &&\n\t\t    $line !~ /^.\\s*\\s*#\\s*if\\b.*\\bNR_CPUS\\b/ &&\n\t\t    $line !~ /^.\\s*\\s*#\\s*define\\b.*\\bNR_CPUS\\b/ &&\n\t\t    $line !~ /^.\\s*$Declare\\s.*\\[[^\\]]*NR_CPUS[^\\]]*\\]/ &&\n\t\t    $line !~ /\\[[^\\]]*\\.\\.\\.[^\\]]*NR_CPUS[^\\]]*\\]/ &&\n\t\t    $line !~ /\\[[^\\]]*NR_CPUS[^\\]]*\\.\\.\\.[^\\]]*\\]/)\n\t\t{\n\t\t\tWARN(\"NR_CPUS\",\n\t\t\t     \"usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\\n\" . $herecurr);\n\t\t}\n\n# check for %L{u,d,i} in strings\n\t\tmy $string;\n\t\twhile ($line =~ /(?:^|\")([X\\t]*)(?:\"|$)/g) {\n\t\t\t$string = substr($rawline, $-[1], $+[1] - $-[1]);\n\t\t\t$string =~ s/%%/__/g;\n\t\t\tif ($string =~ /(?<!%)%L[udi]/) {\n\t\t\t\tWARN(\"PRINTF_L\",\n\t\t\t\t     \"\\%Ld/%Lu are not-standard C, use %lld/%llu\\n\" . $herecurr);\n\t\t\t\tlast;\n\t\t\t}\n\t\t}\n\n# whine mightly about in_atomic\n\t\tif ($line =~ /\\bin_atomic\\s*\\(/) {\n\t\t\tif ($realfile =~ m@^drivers/@) {\n\t\t\t\tERROR(\"IN_ATOMIC\",\n\t\t\t\t      \"do not use in_atomic in drivers\\n\" . $herecurr);\n\t\t\t} elsif ($realfile !~ m@^kernel/@) {\n\t\t\t\tWARN(\"IN_ATOMIC\",\n\t\t\t\t     \"use of in_atomic() is incorrect outside core kernel code\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n# check for lockdep_set_novalidate_class\n\t\tif ($line =~ /^.\\s*lockdep_set_novalidate_class\\s*\\(/ ||\n\t\t    $line =~ /__lockdep_no_validate__\\s*\\)/ ) {\n\t\t\tif ($realfile !~ m@^kernel/lockdep@ &&\n\t\t\t    $realfile !~ m@^include/linux/lockdep@ &&\n\t\t\t    $realfile !~ m@^drivers/base/core@) {\n\t\t\t\tERROR(\"LOCKDEP\",\n\t\t\t\t      \"lockdep_no_validate class is reserved for device->mutex.\\n\" . $herecurr);\n\t\t\t}\n\t\t}\n\n\t\tif ($line =~ /debugfs_create_file.*S_IWUGO/ ||\n\t\t    $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {\n\t\t\tWARN(\"EXPORTED_WORLD_WRITABLE\",\n\t\t\t     \"Exporting world writable files is usually an error. Consider more restrictive permissions.\\n\" . $herecurr);\n\t\t}\n\t}\n\n\t# If we have no input at all, then there is nothing to report on\n\t# so just keep quiet.\n\tif ($#rawlines == -1) {\n\t\texit(0);\n\t}\n\n\t# In mailback mode only produce a report in the negative, for\n\t# things that appear to be patches.\n\tif ($mailback && ($clean == 1 || !$is_patch)) {\n\t\texit(0);\n\t}\n\n\t# This is not a patch, and we are are in 'no-patch' mode so\n\t# just keep quiet.\n\tif (!$chk_patch && !$is_patch) {\n\t\texit(0);\n\t}\n\n\tif (!$is_patch) {\n\t\tERROR(\"NOT_UNIFIED_DIFF\",\n\t\t      \"Does not appear to be a unified-diff format patch\\n\");\n\t}\n\tif ($is_patch && $chk_signoff && $signoff == 0) {\n\t\tERROR(\"MISSING_SIGN_OFF\",\n\t\t      \"Missing Signed-off-by: line(s)\\n\");\n\t}\n\n\tprint report_dump();\n\tif ($summary && !($clean == 1 && $quiet == 1)) {\n\t\tprint \"$filename \" if ($summary_file);\n\t\tprint \"total: $cnt_error errors, $cnt_warn warnings, \" .\n\t\t\t(($check)? \"$cnt_chk checks, \" : \"\") .\n\t\t\t\"$cnt_lines lines checked\\n\";\n\t\tprint \"\\n\" if ($quiet == 0);\n\t}\n\n\tif ($quiet == 0) {\n\t\t# If there were whitespace errors which cleanpatch can fix\n\t\t# then suggest that.\n\t\tif ($rpt_cleaners) {\n\t\t\tprint \"NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\\n\";\n\t\t\tprint \"      scripts/cleanfile\\n\\n\";\n\t\t\t$rpt_cleaners = 0;\n\t\t}\n\t}\n\n\tif ((keys %ignore_type) && ($quiet == 0)) {\n        print \"NOTE: Ignored message types:\";\n        foreach my $ignore (sort keys %ignore_type) {\n        print \" $ignore\";\n        }\n        print \"\\n\";\n        print \"\\n\" if ($quiet == 0);\n\t}\n\n\tif ($clean == 1 && $quiet == 0) {\n\t\tprint \"$vname has no obvious style problems and is ready for submission.\\n\"\n\t}\n\tif ($clean == 0 && $quiet == 0) {\n\t\tprint << \"EOM\";\n$vname has style problems, please review.\n\nIf any of these errors are false positives, please report\nthem to the maintainer, see CHECKPATCH in MAINTAINERS.\nEOM\n\t}\n\n\treturn $clean;\n}\n"
  },
  {
    "path": "scripts/cmd2man.sh",
    "content": "#!/bin/bash\n\ncmd=$1\ndescr=$2\nseealso=$3\n\nif [[ -z $2 ]]; then\n    echo \"Missing command description\" >&2\n    exit 1\nfi\n\n# add name and descriptions\necho \"NAME\"\necho $(basename $cmd) \"-\" $descr\n# removes special color characters\n# change \"Section: text\" to \"Section:\\n  text\"\n# change \"Section:\" to \"SECTION\"\n# change \"USAGE\" to \"SYNOPSIS\"\n# Format option description\n$cmd --help |  sed -r \"s/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g\" |\n     sed -e \"s#^\\([A-Z][a-z _\\-]*:\\) \\(.*\\)#\\1\\n    \\2#\" |\n     sed -e \"s#^\\([A-Z][a-z _\\-]*\\):#\\U\\1#\" |\n     sed -e \"s#\\[\\(=[a-zA-Z0-9]*\\)(#\\1(#g\" |\n     sed -e \"s#^USAGE\\$#SYNOPSIS#\" |\n\tperl -ne 'if (/^(\\s+-[^\\n]+)$/) { if ($opt == 1) {print \"\\n\\n$1\"} else { print \"\\n$1\" }  $opt=1} elsif ($opt==1) {if ($_ =~ /^\\b*([^\\b]*)/) {print $1} else {print $_} $opt=0;} elsif ($opt == 2) {if ($_ =~ /^\\b*([^\\b]*)/) {print \"\\n$1\"} else {print $_; $opt=0;}} elsif (/^(\\s+args)$/) {print \"\\n$1\"; $opt=2} else { print $_ }'\n\nif [[ -n $seealso ]]; then\n    echo \"SEE ALSO\"\n    echo \"   $seealso\"\nfi\n"
  },
  {
    "path": "scripts/code_format.sh",
    "content": "f=$1\n\nif [ ! -f $f ]; then\n    echo \"$f: file not found\"\n    exit 1\nfi\n\n# remove extra space after parenthesis\nsed -e 's#( #(#g' $f | sed -e 's# )#)#g'\n"
  },
  {
    "path": "scripts/commit-msg",
    "content": "#!/bin/sh\n# From Gerrit Code Review 2.8.5\n#\n# Part of Gerrit Code Review (http://code.google.com/p/gerrit/)\n#\n# Copyright (C) 2009 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nunset GREP_OPTIONS\n\nCHANGE_ID_AFTER=\"Bug|Issue\"\nMSG=\"$1\"\n\n# Check for, and add if missing, a unique Change-Id\n#\nadd_ChangeId() {\n\tclean_message=`sed -e '\n\t\t/^diff --git .*/{\n\t\t\ts///\n\t\t\tq\n\t\t}\n\t\t/^Signed-off-by:/d\n\t\t/^#/d\n\t' \"$MSG\" | git stripspace`\n\tif test -z \"$clean_message\"\n\tthen\n\t\treturn\n\tfi\n\n\tif test \"false\" = \"`git config --bool --get gerrit.createChangeId`\"\n\tthen\n\t\treturn\n\tfi\n\n\t# Does Change-Id: already exist? if so, exit (no change).\n\tif grep -i '^Change-Id:' \"$MSG\" >/dev/null\n\tthen\n\t\treturn\n\tfi\n\n\tid=`_gen_ChangeId`\n\tT=\"$MSG.tmp.$$\"\n\tAWK=awk\n\tif [ -x /usr/xpg4/bin/awk ]; then\n\t\t# Solaris AWK is just too broken\n\t\tAWK=/usr/xpg4/bin/awk\n\tfi\n\n\t# How this works:\n\t# - parse the commit message as (textLine+ blankLine*)*\n\t# - assume textLine+ to be a footer until proven otherwise\n\t# - exception: the first block is not footer (as it is the title)\n\t# - read textLine+ into a variable\n\t# - then count blankLines\n\t# - once the next textLine appears, print textLine+ blankLine* as these\n\t#   aren't footer\n\t# - in END, the last textLine+ block is available for footer parsing\n\t$AWK '\n\tBEGIN {\n\t\t# while we start with the assumption that textLine+\n\t\t# is a footer, the first block is not.\n\t\tisFooter = 0\n\t\tfooterComment = 0\n\t\tblankLines = 0\n\t}\n\n\t# Skip lines starting with \"#\" without any spaces before it.\n\t/^#/ { next }\n\n\t# Skip the line starting with the diff command and everything after it,\n\t# up to the end of the file, assuming it is only patch data.\n\t# If more than one line before the diff was empty, strip all but one.\n\t/^diff --git / {\n\t\tblankLines = 0\n\t\twhile (getline) { }\n\t\tnext\n\t}\n\n\t# Count blank lines outside footer comments\n\t/^$/ && (footerComment == 0) {\n\t\tblankLines++\n\t\tnext\n\t}\n\n\t# Catch footer comment\n\t/^\\[[a-zA-Z0-9-]+:/ && (isFooter == 1) {\n\t\tfooterComment = 1\n\t}\n\n\t/]$/ && (footerComment == 1) {\n\t\tfooterComment = 2\n\t}\n\n\t# We have a non-blank line after blank lines. Handle this.\n\t(blankLines > 0) {\n\t\tprint lines\n\t\tfor (i = 0; i < blankLines; i++) {\n\t\t\tprint \"\"\n\t\t}\n\n\t\tlines = \"\"\n\t\tblankLines = 0\n\t\tisFooter = 1\n\t\tfooterComment = 0\n\t}\n\n\t# Detect that the current block is not the footer\n\t(footerComment == 0) && (!/^\\[?[a-zA-Z0-9-]+:/ || /^[a-zA-Z0-9-]+:\\/\\//) {\n\t\tisFooter = 0\n\t}\n\n\t{\n\t\t# We need this information about the current last comment line\n\t\tif (footerComment == 2) {\n\t\t\tfooterComment = 0\n\t\t}\n\t\tif (lines != \"\") {\n\t\t\tlines = lines \"\\n\";\n\t\t}\n\t\tlines = lines $0\n\t}\n\n\t# Footer handling:\n\t# If the last block is considered a footer, splice in the Change-Id at the\n\t# right place.\n\t# Look for the right place to inject Change-Id by considering\n\t# CHANGE_ID_AFTER. Keys listed in it (case insensitive) come first,\n\t# then Change-Id, then everything else (eg. Signed-off-by:).\n\t#\n\t# Otherwise just print the last block, a new line and the Change-Id as a\n\t# block of its own.\n\tEND {\n\t\tunprinted = 1\n\t\tif (isFooter == 0) {\n\t\t\tprint lines \"\\n\"\n\t\t\tlines = \"\"\n\t\t}\n\t\tchangeIdAfter = \"^(\" tolower(\"'\"$CHANGE_ID_AFTER\"'\") \"):\"\n\t\tnumlines = split(lines, footer, \"\\n\")\n\t\tfor (line = 1; line <= numlines; line++) {\n\t\t\tif (unprinted && match(tolower(footer[line]), changeIdAfter) != 1) {\n\t\t\t\tunprinted = 0\n\t\t\t\tprint \"Change-Id: I'\"$id\"'\"\n\t\t\t}\n\t\t\tprint footer[line]\n\t\t}\n\t\tif (unprinted) {\n\t\t\tprint \"Change-Id: I'\"$id\"'\"\n\t\t}\n\t}' \"$MSG\" > \"$T\" && mv \"$T\" \"$MSG\" || rm -f \"$T\"\n}\n_gen_ChangeIdInput() {\n\techo \"tree `git write-tree`\"\n\tif parent=`git rev-parse \"HEAD^0\" 2>/dev/null`\n\tthen\n\t\techo \"parent $parent\"\n\tfi\n\techo \"author `git var GIT_AUTHOR_IDENT`\"\n\techo \"committer `git var GIT_COMMITTER_IDENT`\"\n\techo\n\tprintf '%s' \"$clean_message\"\n}\n_gen_ChangeId() {\n\t_gen_ChangeIdInput |\n\tgit hash-object -t commit --stdin\n}\n\n\nadd_ChangeId\n"
  },
  {
    "path": "scripts/demo/disk_usage.sh",
    "content": "#/!bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nFS1=$1\nFS2=$2\n\nPERIOD=10\n\nfunction df_line\n{\n    time=$1\n\n    # get lustre OST usage\n    DF1=`lfs df /mnt/lustre/ | grep OST | awk '{print $3}'`\n    # get posix backend usage\n    DF2=`df /mnt/backend | grep \"/\" | awk '{print $3}'`\n\n    line=`echo $time $DF1 $DF2 | sed -e \"s/ /;/g\"`\n    echo $line\n}\n\nTIME=0\necho \"time;ost1;ost2;archive\"\nwhile ((1)); do\n    df_line $TIME\n    sleep $PERIOD\n    TIME=$(( $TIME + $PERIOD ))\ndone\n"
  },
  {
    "path": "scripts/demo/migr_purge.sh",
    "content": "#/!bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# 1) RH DB init: scan lustre FS\n# 2) start logging disk usage\n# 3) start posix copytool\n# 4) start policy engine for monitoring disk usage and migrating files\n# 5) start script for writing data\n# 6) start policy engine for processing changelogs\n\nROOT=/mnt/lustre\nRH=../../src/robinhood/robinhood\n\n$RH -f rh.migr_purge.conf --scan --once -L events.log -l DEBUG\n\n./disk_usage.sh > usage.csv &\n\nhsm_posix_copytool --path=/mnt/backend  --verbose > ct.log 2> ct.log &\n\n$RH -f rh.migr_purge.conf --migrate --purge -L rh.log -l DEBUG &\n\n./write_data.sh /mnt/lustre &\n\n$RH -f rh.migr_purge.conf --readlog -L events.log -l DEBUG &\n"
  },
  {
    "path": "scripts/demo/mkplot.sh",
    "content": "#!/bin/sh\n\n# convert output data\nsed -e \"s/;/ /g\" usage.csv | grep -v ost | awk '{print $1\" \"$2\" \"$3\" \"$2+$3\" \"$4}' > usage.dat\n\n# run gnuplot\ngnuplot trace_usage.gp\n"
  },
  {
    "path": "scripts/demo/populate_backend.sh",
    "content": "#/!bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# this script fills a filesystem continuously\n# while migration and purges are triggered\n# by Policy Engine.\n\nROOT=$1\n\nif [[ -z $ROOT ]]; then\n    echo \"Usage: $0 <path>\";\n    exit 1;\nfi\n\nMAX_DEPTH=4\nSUBDIRS=30 # subdirs at each level\nLEAVES=30 # nbr of files at lower level\nFILE_SZ_MB=10 # file size\n\nfunction mksubtree\n{\n    local DIR=$1\n    local LVL=$2\n    local d\n    local f\n\n    if (( $LVL >= $MAX_DEPTH )); then\n        for f in `seq 1 $LEAVES`; do\n           echo \"Writing file $DIR/file.$f...\"\n           dd if=/dev/zero of=$DIR/file.$f bs=1M count=$FILE_SZ_MB 2> /dev/null\n\t       if (( $? != 0 )); then\n\t\t        echo \"ERROR $!\"\n           fi\n           # migrate file to backend\n           echo \"Archiving file $DIR/file.$f...\"\n           lfs hsm_archive $DIR/file.$f || echo \"ERROR executing lfs hsm_archive $DIR/file.$f\"\n           sleep 2\n        done\n\n        df -h $DIR\n        # release files if they are archived\n        sleep 5\n        echo \"Releasing files in $DIR...\"\n        for f in $DIR/* ; do\n            echo \"release: $f\"\n            lfs hsm_release $f\n        done\n        sleep 2\n        df -h $DIR\n    else\n        for d in `seq 1 $SUBDIRS`; do\n            mkdir -p $DIR/dir.$d\n            mksubtree $DIR/dir.$d $(( $LVL + 1 ))\n        done\n    fi\n}\n\nhsm_posix_copytool --path=/mnt/backend  --verbose > ct.log 2> ct.log &\nmksubtree $ROOT 1\n"
  },
  {
    "path": "scripts/demo/rh.migr_purge.conf",
    "content": "General {\n\tfs_path = \"/mnt/lustre\";\n}\n\ndb_update_policy\n{\n\tmd_update   = on_event_periodic(1sec,1min);\n\tpath_update = on_event_periodic(10sec,6h);\n}\n\n# ChangeLog Reader configuration\nChangeLog {\n    # 1 MDT block for each MDT:\n    MDT {\n        mdt_name  = \"MDT0000\" ;\n        # id returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    batch_ack_count = 100;\n    force_polling = ON;\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    #FALSE; Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n    stats_interval = 1min ;\n    # Log files\n    log_file = /tmp/rh.log;\n    report_file = /tmp/rh.report.log;\n    alert_file = stderr;\n}\n\nListManager {\n\tMySQL {\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n######## Policies for this demo ###########\nmigration_policies\n{\n    policy default\n    {\n        # aggressive migr policy (15s after last mod)\n        condition { last_mod > 15s }\n\tarchive_id = 1;\n    }\n}\n\n# aggressive migration: run every minute\nmigration_parameters\n{\n    runtime_interval = 5s;\n    backup_new_files = TRUE;\n    check_copy_status_on_startup = TRUE;\n    check_copy_status_delay = 30min;\n}\n\n######## most basic space release policy ##########\n\npurge_policies\n{\n    # aggressive purge policy (10s after last access)\n    # (only if purge needed)\n    policy default\n    {\n        condition { last_access > 10s }\n    }\n}\n\npurge_parameters {\n    post_purge_df_latency = 10s;\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\n# check every minute\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 20% ;\n    low_threshold_pct  = 18% ;\n    check_interval     = 10s ;\n}\n\n\nhsm_remove_policy\n{\n    # set this parameter to 'TRUE' for disabling HSM object removal\n    no_hsm_remove = FALSE;\n    # delay before impacting object removal in HSM\n    deferred_remove_delay = 1s;\n}\n\n"
  },
  {
    "path": "scripts/demo/trace_usage.gp",
    "content": "set term png\nset out \"usage.png\"\nset xlabel \"time(sec)\"\nset ylabel \"space used(MB)\"\nset key left top\nplot \"usage.dat\" using 1:($2/1024) title 'OST1' with lines, \\\n     \"usage.dat\" using 1:($3/1024) title 'OST2' with lines, \\\n     \"usage.dat\" using 1:($4/1024) title 'Lustre total (OST1+OST2)' with lines, \\\n     \"usage.dat\" using 1:($5/1024) title 'backend' with lines\n\n"
  },
  {
    "path": "scripts/demo/write_data.sh",
    "content": "#/!bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# this script fills a filesystem continuously\n# while migration and purges are triggered\n# by Policy Engine.\n\nROOT=$1\n\nif [[ -z $ROOT ]]; then\n    echo \"Usage: $0 <path>\";\n    exit 1;\nfi\n\nMAX_DEPTH=4\nSUBDIRS=30 # subdirs at each level\nLEAVES=30 # nbr of files at lower level\nFILE_SZ_MB=2 # file size\n\nfunction mksubtree\n{\n    local DIR=$1\n    local LVL=$2\n    local d\n    local f\n\n    if (( $LVL >= MAX_DEPTH )); then\n        for f in `seq 1 $LEAVES`; do\n           echo \"Writing file $DIR/file.$f...\"\n           dd if=/dev/zero of=$DIR/file.$f bs=1M count=$FILE_SZ_MB 2> /dev/null\n\t   if (( $? != 0 )); then\n\t\techo \"ERROR $!\"\n           fi\n           sleep 1\n        done\n    else\n        for d in `seq 1 $SUBDIRS`; do\n            mkdir -p $DIR/dir.$d\n            mksubtree $DIR/dir.$d $(( $LVL + 1 ))\n        done\n    fi\n}\n\nwhile (( 1 )); do\n\n     mksubtree $ROOT 1\n\ndone\n"
  },
  {
    "path": "scripts/exafs.sh",
    "content": "#!/bin/bash\n\n# creates 1.7+ billion entries filesystem\n# with a namespace similar to production systems\nNB_CONT=10          # 10    containers\nGROUP_PER_CONT=20   # 200   groups\nUSER_PER_GROUP=50   # 10000 users\nSTUDY_PER_USER=1000 # 10M   studies\nSUBDIR=10           # 100M  directories in studies\nFCOUNT=15           # 1.5G files\nLCOUNT=1            # 100M symlinks\n\nBIN=$(basename `readlink -f $0`)\nROOT=$1\nif [[ ! -d \"$ROOT\" ]]; then\n    echo \"usage: $0 <dir>\"\n    exit 1\nfi\n\n\nfunction mk_cont\n{\n    dir=$1\n    fcount=0\n    lcount=0\n    TIME_START=`date +%s.%N`\n    ((total=$GROUP_PER_CONT*$USER_PER_GROUP*$STUDY_PER_USER*$SUBDIR*($FCOUNT+$LCOUNT)))\n    last=0\n\n    for g in $(seq -w 2 1 $GROUP_PER_CONT); do\n    for u in $(seq -w 2 1 $USER_PER_GROUP); do\n    for s in $(seq -w 4 1 $STUDY_PER_USER); do\n    for d in $(seq -w 2 1 $SUBDIR); do\n\n        curdir=$dir/group$g/user$u/study$s/data$d\n\n        for f in $(seq 1 $FCOUNT); do\n            ((fcount=$fcount+1))\n        done\n        for l in $(seq 1 $LCOUNT); do\n            ((lcount=$lcount+1))\n        done\n        if (( $fcount+$lcount-$last >= 10000 )); then\n            now=`date +%s.%N`\n            sec=`echo $now - $TIME_START | bc -l | xargs printf \"%.2f\"`\n            speed=`echo \"($fcount+$lcount)/($sec)\" | bc -l | xargs printf \"%.2f\"`\n            ((reste=$total-$fcount-$lcount))\n            approx_remain=`echo \"($reste/$speed)\" | bc -l | xargs printf \"%.0f\"`\n            if (( $approx_remain > 86400 )); then\n                t_remain=`echo \"($reste/$speed)/86400\" | bc -l | xargs printf \"%.2f days\"`\n            elif (( $approx_remain > 3600 )); then\n                t_remain=`echo \"($reste/$speed)/3600\" | bc -l | xargs printf \"%.2f hours\"`\n            else\n                t_remain=`echo \"($reste/$speed)\" | bc -l | xargs printf \"%.2f sec\"`\n            fi\n            ((last=$fcount+$lcount))\n            echo \"$dir: created $fcount files, $lcount symlinks in $sec sec ($speed entries/sec), remaining ~$t_remain\"\n        fi\n\n    done\n    done\n    done\n    done\n}\n\ntrap \"pkill $BIN\" SIGINT\n\nfor c in $(seq -w 2 1 $NB_CONT); do\n    mk_cont $ROOT/cont$c &\ndone\nwait\n"
  },
  {
    "path": "scripts/fill_fs.sh",
    "content": "#!/bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# this script fills a filesystem continuously\n# while migration and purges are triggered\n# by Policy Engine.\n\nROOT=$1\n\nif [[ -z $ROOT ]]; then\n    echo \"Usage: $0 <path>\";\n    exit 1;\nfi\n\nMAX_DEPTH=4\nSUBDIRS=30 # subdirs at each level\nLEAVES=30 # nbr of files at lower level\nFILE_KB_MAX=4 # file size max +1\n\n# give a set of users here\nUSERS=`head -n 4 /etc/passwd | cut -d ':' -f 1 | xargs`\nNB_USERS=`echo $USERS | wc -w`\n\nTOTAL_FILES=0\nTIME_START=`date +%s.%N`\n\nfunction random256\n{\n\tod -N 1 -i /dev/urandom | head -1 | awk '{print $2}'\n}\n\nfunction check_usage\n{\n\tifree=`df -i $ROOT/. | xargs | awk '{print $(NF-2)}'`\n\tkfree=`df -k $ROOT/. | xargs | awk '{print $(NF-2)}'`\n\n\t(( $ifree < 10000 )) && echo \"Free inodes is low ($ifree): stopping\" && return 1\n\t(( $kfree < 100000 )) && echo \"Free space is low ($kfree KB < 100 MB): stopping\" && return 1\n\treturn 0\n}\n\nlast=0\n\nfunction mksubtree\n{\n    local DIR=$1\n    local LVL=$2\n    local d\n    local f\n\n    if (( $LVL >= MAX_DEPTH )); then\n        for f in `seq 1 $LEAVES`; do\n\t   # pseuso-random file size\n\t   sz=$((`random256` % $FILE_KB_MAX))\n       if (( $sz == 0 )); then\n            touch $DIR/file.$f || ( echo \"touch ERROR\" && exit 1 )\n\t   else\n            dd if=/dev/zero of=$DIR/file.$f bs=1k count=$sz 2>/dev/null || ( echo \"dd ERROR\" && exit 1 )\n\t   fi\n\t   if (( $? != 0 )); then\n\t\techo \"ERROR $!\"\n           fi\n\n\t   ((TOTAL_FILES=$TOTAL_FILES+1))\n\n        uindex=$(( `random256` % $NB_USERS ))\n        uindex=$(( $uindex + 1 ))\n        owner=`echo $USERS | cut -d \" \" -f $uindex`\n        chown $owner:$owner $DIR/file.$f\n\n\t    if (( $(($TOTAL_FILES % 1000)) == 0 )); then\n\t\t[[ -n \"$now\" ]] && last=$now\n\t\tnow=`date +%s.%N`\n\t\tsec=`echo $now - $TIME_START | bc -l | xargs printf \"%.2f\"`\n\t\tif [[ $last != 0 ]]; then\n\t\t\tspeed=`echo \"1000/($now-$last)\" | bc -l | xargs printf \"%.2f\"`\n\t\telse\n\t\t\tspeed=`echo \"1000/$sec\" | bc -l | xargs printf \"%.2f\"`\n\t\tfi\n\n\t\techo \"$TOTAL_FILES files created in $sec s ($speed files/sec)\"\n\t    fi\n        done\n    else\n        for d in `seq 1 $SUBDIRS`; do\n\t    check_usage || exit 1\n            mkdir -p $DIR/dir.$d\n            mksubtree $DIR/dir.$d $(( $LVL + 1 ))\n        done\n    fi\n}\n\nwhile (( 1 )); do\n\n     mksubtree $ROOT 1\n\ndone\n"
  },
  {
    "path": "scripts/fix_man_options.sh",
    "content": "#!/bin/bash\nsed -e \"s#\\\\\\fP-\\\\\\fI\\([A-Za-z]*\\)\\\\\\fP#-\\1\\\\\\fP#g\" | sed -e \"s#-\\\\\\fI\\([A-Za-z]*\\)\\\\\\fP#-\\1#g\"\n"
  },
  {
    "path": "scripts/git_prepare_hook",
    "content": "#!/bin/sh\n#\n# An example hook script to prepare the commit log message.\n# Called by git-commit with the name of the file that has the\n# commit message, followed by the description of the commit\n# message's source.  The hook's purpose is to edit the commit\n# message file.  If the hook fails with a non-zero status,\n# the commit is aborted.\n#\n# To enable this hook, make this file executable.\n\n# This hook includes three examples.  The first comments out the\n# \"Conflicts:\" part of a merge commit.\n#\n# The second includes the output of \"git diff --name-status -r\"\n# into the message, just before the \"git status\" output.  It is\n# commented because it doesn't cope with --amend or with squashed\n# commits.\n#\n# The third example adds a Signed-off-by line to the message, that can\n# still be edited.  This is rarely a good idea.\nrootdir=$(git rev-parse --show-toplevel)\n\ncase \"$2\" in\n  merge)\n    sed -i '/^Conflicts:/,/#/!b;s/^/# &/;s/^# #/#/' \"$1\" ;;\n\n  \"\"|template)\n#    perl -i -pe '\n#      print \"\\n\" . `git diff --cached --name-status -r`\n#\t  if /^#/ && $first++ == 0' \"$1\"\n      tmpfile=$(mktemp)\n      cat $rootdir/.commit-template >> $tmpfile\n      $rootdir/scripts/check_commit.sh $1 | sed -e \"s/^/# /\" >> $tmpfile\n      cat $1 >> $tmpfile\n      mv -f $tmpfile $1\n      #show diff:\n      git diff --cached | sed -e \"s/^/# /\" >> $1\n      ;;\n  *)\n    ;;\nesac\n\n# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\\(.*>\\).*$/Signed-off-by: \\1/p')\n# grep -qs \"^$SOB\" \"$1\" || echo \"$SOB\" >> \"$1\"\n"
  },
  {
    "path": "scripts/indent.sh",
    "content": "f=$1\nscript_dir=$(dirname $(readlink -m $0))\n\n[ -f \"$f\" ] || exit 1\n\nIGNORE_LIST=\"DEPRECATED_FUNCTION,INITIALISED_STATIC,GLOBAL_INITIALISERS,PRINTF_L,ASSIGN_IN_IF,C99_COMMENTS,USE_NEGATIVE_ERRNO,BRACES,NEW_TYPEDEFS\"\n\n# Kernel style:\n#    -nbad -bap -nbc -bbo -hnl -br -brs -c33 -cd33 -ncdb -ce -ci4\n#    -cli0 -d0 -di1 -nfc1 -i8 -ip0 -l80 -lp -npcs -nprs -npsl -sai\n#    -saf -saw -ncs -nsc -sob -nfca -cp33 -ss -ts8 -il1\n\n\nindent $f -o $f.new.c  \\\n-nbad -bap -nbc -bbo -hnl -br -brs -c0 -cd0 -ncdb -ce -ci4 \\\n-cli0 -d0 -di2 -nfc1 -i4 -nut -ip0 -l80 -lp -npcs -nprs -npsl -sai \\\n-saf -saw -ncs -nsc -sob -nfca -cp2 -ss -ts4 -il1 -T time_t -T uint64_t -T size_t -T global_config_t -T sm_info_def_t\n\n$script_dir/checkpatch.pl --ignore $IGNORE_LIST --terse --show-types -f $f.new.c > $f.check.out\n\ngrep $f.new.c $f.check.out | while read l; do\n\terror=$(echo \"$l\" | cut -d ':' -f 4 | tr -d ' ')\n\tline=$(echo \"$l\" | cut -d ':' -f 2)\n\n\tcase \"$error\" in\n\tPOINTER_LOCATION)\n\t\tsed -i \"$line s/\\([a-z_]*\\) \\* \\([a-z_]*\\)/\\1 *\\2/g\" $f.new.c\n\t\t;;\n\tSPACING)\n\t\tsed -i -e \"$line s/( /(/g\" -e \"$line s/ )/)/g\" -e \"$line s/while(/while (/\" $f.new.c\n\t\t;;\n\tRETURN_PARENTHESES)\n\t\tsed -i -e \"$line s/return (\\(.*\\));/return \\1;/\" $f.new.c\n\t\t;;\n\t*)\t\n\t\techo \"ignored error $error, line $line\"\n\t\t;;\n\tesac\n\t\ndone\n\n# mark remaining errors\n$script_dir/checkpatch.pl --ignore $IGNORE_LIST --terse --show-types -f $f.new.c > $f.check.out\nerr=0\ngrep $f.new.c $f.check.out | while read l; do\n\terr=1\n\terror=$(echo \"$l\" | cut -d ':' -f 4 | tr -d ' ')\n\tline=$(echo \"$l\" | cut -d ':' -f 2)\n\n\tcase \"$error\" in\n\tPOINTER_LOCATION|SPACING|LONG_LINE)\n\t\tsed -i \"$line s#^#// FIXME CHECKPATCH: #\" $f.new.c\n\t\t;;\n\t*)\t\n\t\techo \"ignored error $error, line $line\"\n\t\t;;\n\tesac\ndone\n\n[[ \"$err\" != \"0\" ]] && vimdiff $f.new.c $f\n$script_dir/checkpatch.pl --ignore $IGNORE_LIST --show-types -f $f.new.c\n"
  },
  {
    "path": "scripts/ld.so.robinhood.conf.in",
    "content": "@LIBDIR@/robinhood/\n"
  },
  {
    "path": "scripts/pre-commit",
    "content": "#!/bin/bash\n\n# Run nonreg script if a file from the webgui is modified\n\nSRC_PATTERN=\"web_gui\"\nif git diff --cached --name-only | grep --quiet \"$SRC_PATTERN\"\nthen\n  cd $GIT_DIR/../web_gui/tests/\n  ./nonreg.bash\n  exit $?\nfi\n"
  },
  {
    "path": "scripts/rbh-config",
    "content": "#!/bin/bash\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n#\n# Robinhood configuration helper\n#\n\n# check caller\nwho=`whoami`\n\nif [[ $who != root ]]; then\n    echo\n    echo \"WARNING: this script should be executed by root\" >&2\n    echo \"         else, it could not work correctly.\" >&2\n    echo\nfi\n\n\n# this function creates the database for robinhood\n# it must be launched by root.\nfunction db_check\n{\n    if (( $# != 0 )); then\n        echo \"WARNING: no argument expected for function precheck_db\"\n    fi\n    echo \"Checking system configuration...\"\n\n    if [[ ! -x `which mysqladmin` ]]; then\n        echo \"Command 'mysqladmin' not found.\"\n        echo \"Install 'mysql' and 'mysql-server' packages on your system.\"\n        exit 2\n    fi\n    echo \"mysqladmin command OK.\"\n\n    if [[ ! -x `which mysql_config` ]]; then\n        echo \"Command 'mysql_config' not found.\"\n        echo \"Install 'mysql' package on your system.\"\n        exit 2\n    fi\n    echo \"mysql_config command OK.\"\n\n    version=`mysql_config --version | cut -d . -f 1`\n    if (( $? )); then\n        echo \"Error executing 'mysql_config --version'.\"\n        exit 2\n    fi\n    echo \"MySQL version is $version.\"\n\n    service=mysqld\n    # mysqld is named mysql on some systems\n    if [ -x /etc/init.d/mysql ]; then\n        service=mysql\n    fi\n    mysql --version | grep -q MariaDB && service=mariadb\n    running=0\n    echo \"Checking service $service...\"\n    if [ -x /usr/bin/systemctl ]; then\n        /usr/bin/systemctl --quiet is-active $service && running=1\n    else\n        /sbin/service $service status && running=1\n    fi\n\n    if (( $running == 0 )); then\n        echo \"Service '$service' is not running.\"\n        echo \"It must be started to run this script.\"\n        exit 2\n    else\n        echo \"$service is running\"\n    fi\n    \n    if [[ ! -x `which mysql` ]]; then\n        echo \"Command 'mysql' not found.\"\n        echo \"Install 'mysql' or 'mariadb' package on your system.\"\n        exit 2\n    fi\n    echo \"mysql command OK.\"\n}\n\n\nfunction db_config\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 3  && $# != 4 )); then\n        echo \"ERROR: 0, 3 or 4 arguments expected\"\n            echo \"Usage: create_db <db_name> <client_hosts> <user_passwd> [<db_admin_passwd>]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0 )); then\n            echo\n            echo \"Enter a custom identifier for your filesystem. E.g. lustre\"\n\n            while (( 1 )); do\n                read -p \"fsname (max 8 chars): \" fsname\n                if [[ $fsname =~ ^[a-zA-Z][a-zA-Z0-9_]{0,7}$ ]]; then\n                    break\n                else\n                    echo\n                    unmatched=`echo $fsname | sed -e \"s/[a-zA-Z0-9_]//g\"`\n                    echo \"Error: unexpected '\" $unmatched \"'.\"\n                    echo \"Filesystem name must only contain alpha-num chars with no space.\"\n                fi\n            done\n\n            echo\n            echo \"Enter hosts where robinhood commands will run. E.g. localhost\"\n            echo \"You can use '%' as wildcard: \\\"%\\\" for all hosts, \\\"cluster%\\\" for nodes starting with 'cluster'...\"\n\n            read -p \"hosts: \" clienthost\n\n            while (( 1 )); do\n                echo\n                echo \"Choose a password for connecting to the database (user 'robinhood'). \"\n                read -p \"password: \" -s pass1\n                echo\n                read -p  \"confirm password: \" -s pass2\n                echo\n\n                if [[ $pass1 = $pass2 ]]; then\n                    break\n                else\n                    echo \"Passwords don't match.\"\n                    echo \"Try again.\"\n                fi\n            done\n\n            echo \"Write this password to /etc/robinhood.d/.dbpassword file\"\n\n\n            DB_NAME=\"robinhood_$fsname\"\n    else\n        DB_NAME=$1\n        clienthost=$2\n        pass1=$3\n        pass2=$3\n    fi\n\n    echo\n    echo \"Configuration summary:\"\n    echo \"- Database name: '$DB_NAME'\"\n    echo \"- Client hosts: '$clienthost'\"\n    echo \"- Database user name: 'robinhood'\"\n\n    if (( $interactive != 0 )); then\n            echo\n            echo -n \"Do you agree? [y/N]\"\n\n            read -n 1 ok\n            echo\n            if [[ $ok != [yY] ]]; then\n                echo \"aborting.\"\n                exit 1\n            fi\n\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n    else\n        pass_root=$4\n    fi\n\n    echo\n    echo \"Creating database '$DB_NAME'...\"\n\n    mysqladmin --password=\"$pass_root\" create $DB_NAME\n\n    if (( $? )); then\n        echo \"Error creating DB.\"\n        exit 1\n    fi\n    echo \"done\"\n\n    echo\n    echo \"Setting access right for user 'robinhood'@'$clienthost'...\"\n\n    echo \"(notice: user robinhood must have SUPER privilege to create triggers)\"\n    mysql --password=\"$pass_root\" $DB_NAME << EOF\nGRANT USAGE ON $DB_NAME.* TO 'robinhood'@'localhost' IDENTIFIED BY '$pass1' ;\nGRANT USAGE ON $DB_NAME.* TO 'robinhood'@'$clienthost' IDENTIFIED BY '$pass1' ;\nGRANT ALL PRIVILEGES ON $DB_NAME.* TO 'robinhood'@'localhost' IDENTIFIED BY '$pass1' ;\nGRANT ALL PRIVILEGES ON $DB_NAME.* TO 'robinhood'@'$clienthost' IDENTIFIED BY '$pass1' ;\nGRANT SUPER ON *.* TO 'robinhood'@'localhost' IDENTIFIED BY '$pass1' ;\nGRANT SUPER ON *.* TO 'robinhood'@'$clienthost' IDENTIFIED BY '$pass1' ;\nFLUSH PRIVILEGES;\nSHOW GRANTS FOR 'robinhood'@'$clienthost';\nEOF\n\n    # About SUPER privilege on *.* (needed for creating triggers)\n    # Before MySQL 5.1, it must be granted for all databases\n    # (cannot distinguish the database to grant permission on).\n\n    if (( $? )); then\n        echo \"Error setting access rights for 'robinhood'@'$clienthost'\"\n        exit 1\n    fi\n\n    echo\n    echo \"Testing connection to '$DB_NAME'...\"\n    mysql --user=robinhood --password=$pass1 $DB_NAME << EOF\nquit\nEOF\n\n    if (( $? )); then\n        echo \"Connection to $DB_NAME@localhost failed\"\n        exit 1\n    fi\n\n    echo\n    echo \"Database successfully created!\"\n}\n\nfunction fsnames\n{\n    lctl list_param mdd.* | sed -e \"s/mdd.//\" -e \"s/-MDT[0-9]*//\" | sort -u |\n        xargs\n}\n\nfunction enable_changelogs\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 )); then\n        echo \"ERROR: 0 or 1 argument expected\"\n            echo \"Usage: enable_chglogs [<fsname>]\"\n        exit 1\n    fi\n\n    if (($interactive != 0 )); then\n            # check if we are on mdt\n            echo -n \"Checking available MDTs...    \"\n            fslist=$(fsnames)\n            echo $fslist\n\n            if [[ -z $fslist ]]; then\n                echo \"No MDT found on this machine. Run the command on MDT.\"\n                exit 2\n            fi\n\n            echo \"Select the filesystem you want to activate changelogs for:\"\n            while ((1)); do\n                read -p \"$fslist: \" fsname\n                if [[ -n \"$fsname\" ]]; then\n                    if [[ $fslist = *$fsname* ]]; then\n                        break\n                    else\n                        echo \"$fsname: unknown\"\n                    fi\n                else\n                    cnt=$(echo $fslist | wc -w)\n                    # if count = 1, use the only fsname\n                    if (( $cnt == 1 )); then\n                        fsname=$fslist\n                        break\n                    fi\n                    if (( $cnt == 1 )); then\n                        fsname=$fslist\n                        break\n                    fi\n                fi\n            done\n    else\n        fsname=$1\n        fslist=$(fsnames)\n        if [[ $fslist = *$fsname* ]]; then\n                echo \"file system '$fsname' exists\"\n        else\n                echo \"$fsname: unknown\"\n                exit 2\n        fi\n    fi\n    # check mdt count\n    mdtcnt=$(lctl list_param mdd.* | wc -l)\n\n    echo \"Checking if \\\"cl1\\\" is already registered for $fsname ($mdtcnt MDTs on this host)...\"\n    nb_cl1=$(lctl get_param mdd.$fsname-MDT*.changelog_users | grep cl1 | wc -l)\n\n    if (( $nb_cl1 == $mdtcnt )); then\n        echo \"\\\"cl1\\\" is already registered. Skipping registration.\"\n    elif (( $nb_cl1 < $mdtcnt )); then\n        echo \"\\\"cl1\\\" is not registered on all MDTs on this host ($mdtcnt)...\"\n        for m in $(lctl list_param mdd.$fsname-MDT*); do\n           lctl get_param $m.changelog_users | grep cl1 > /dev/null 2> /dev/null\n           if (( $? != 0 )); then\n               short=$(echo $m | cut -d '.' -f 2)\n               echo \"No client registered on $short yet\"\n               lctl --device $short changelog_register || echo \"FAILED to register to $short\"\n           fi\n        done\n    fi\n\n    echo \"Checking event mask...\"\n\n    if [[ -z \"$PURPOSE\" || $PURPOSE = LUSTRE_HSM ]]; then\n        EVENT_LIST=\"HSM CREAT UNLNK TRUNC SATTR CTIME MTIME CLOSE RENME RNMTO RMDIR HLINK LYOUT\"\n    elif [[ $PURPOSE = SHOOK ]]; then\n        EVENT_LIST=\"CREAT UNLNK TRUNC XATTR SATTR CTIME MTIME CLOSE RENME RNMTO RMDIR HLINK LYOUT\"\n    else\n        EVENT_LIST=\"CREAT UNLNK TRUNC SATTR CTIME MTIME CLOSE RENME RNMTO RMDIR HLINK LYOUT\"\n    fi\n    for event in $EVENT_LIST; do\n        missing=0\n        lctl get_param mdd.$fsname-MDT*.changelog_mask | grep $event >/dev/null || missing=1\n\n        if (( $missing != 0 )); then\n            echo \"event $event not in changelog mask: setting it\"\n            # first try with -P, if supported\n            lctl set_param -P mdd.$fsname-MDT*.changelog_mask \"+$event\" 2> /dev/null ||\n                lctl set_param mdd.$fsname-MDT*.changelog_mask \"+$event\" 2> /dev/null ||\n                echo \"FAILED\"\n        else\n            echo \"event $event OK\"\n        fi\n    done\n}\n\nfunction select_db\n{\n    pass_root=$1\n    db_list=`mysql -Ns --password=\"$pass_root\" -e \"show databases\" | grep -v mysql | xargs`\n\n    if (( $? )); then\n            echo \"Failed to get database list\"\n            exit 1\n    fi\n    if [[ -z \"$db_list\" ]]; then\n        echo \"No DB found\"\n        exit 2\n    fi\n\n    echo \"Available databases are: $db_list\"\n\n    while ((1)); do\n        read -p \"Select database: \" db\n        if [[ -n \"$db\" ]]; then\n            if [[ $db_list = *$db* ]]; then\n                break\n            else\n                echo \"$db: unknown\"\n            fi\n        fi\n    done\n\n    echo \"Testing connection to '$db'...\"\n    mysql $db --password=\"$pass_root\" << EOF\nquit\nEOF\n\n    if (( $? )); then\n        echo \"Connection to $db@localhost failed\"\n        exit 1\n    else\n        echo \"OK\"\n        export RBH_DB=$db\n    fi\n}\n\nfunction empty_db\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 && $# != 2 )); then\n        echo \"ERROR: 0, 1 or 2 arguments expected\"\n            echo \"Usage: empty_db [<db_name> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0 )); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n    else\n            RBH_DB=$1\n            pass_root=$2\n    fi\n\n    echo \"Cleaning tables of database '$RBH_DB'...\"\n    mysql -v --password=\"$pass_root\" $RBH_DB << EOF\nBEGIN;\nDROP TABLE IF EXISTS ENTRIES;\nDROP TABLE IF EXISTS NAMES;\nDROP TABLE IF EXISTS STRIPE_INFO;\nDROP TABLE IF EXISTS STRIPE_ITEMS;\nDROP TABLE IF EXISTS VARS;\nDROP TABLE IF EXISTS ANNEX_INFO;\nDROP TABLE IF EXISTS ID_MAPPING;\nDROP TABLE IF EXISTS SOFT_RM;\nDROP TABLE IF EXISTS RECOVERY;\nDROP TABLE IF EXISTS ACCT_STAT;\nDROP FUNCTION IF EXISTS one_path;\nDROP FUNCTION IF EXISTS this_path;\nCOMMIT;\nEOF\n    if (( $? )); then\n        echo \"Command failed\"\n        exit 1\n    else\n        echo \"DONE\"\n    fi\n}\n\nfunction reset_acct\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 && $# != 2 )); then\n        echo \"ERROR: 0, 1 or 2 arguments expected\"\n            echo \"Usage: reset_acct [<db_name> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0 )); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n    else\n            RBH_DB=$1\n            pass_root=$2\n    fi\n\n    echo \"Cleaning ACCT tables in database '$RBH_DB'...\"\n    mysql -v --password=\"$pass_root\" $RBH_DB << EOF\nBEGIN;\nDROP TABLE IF EXISTS ACCT_STAT;\nCOMMIT;\nEOF\n    if (( $? )); then\n        echo \"Command failed\"\n        exit 1\n    else\n        echo \"DONE\"\n    fi\n}\n\n\nfunction reset_fileclasses\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 && $# != 2 )); then\n        echo \"ERROR: 0, 1 or 2 arguments expected\"\n            echo \"Usage: reset_classes [<db_name> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0)); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n    else\n        RBH_DB=$1\n        pass_root=$2\n    fi\n\n    # checking valid fields for this purpose\n    has_arch=0\n    has_rel=0\n\n    echo \"Checking schema...\"\n    mysql --password=\"$pass_root\" $RBH_DB -e \"SELECT arch_cl_update FROM ENTRIES WHERE FALSE\" 2>/dev/null && has_arch=1\n    mysql --password=\"$pass_root\" $RBH_DB -e \"SELECT rel_cl_update FROM ENTRIES WHERE FALSE\" 2>/dev/null && has_rel=1\n\n    expr=\"\"\n    if (( $has_arch )); then\n        expr=\"arch_cl_update=NULL\";\n    fi\n    if (( $has_rel )); then\n        if [ -z $expr ]; then\n            expr=\"rel_cl_update=NULL\"\n        else\n            expr=\"$expr,rel_cl_update=NULL\"\n        fi\n    fi\n\n    if [ -z $expr ]; then\n        echo \"Database $RBH_DB is already empty. No fileclass to reset.\"\n        exit 0\n    fi\n\n    echo \"Resetting fileclasses in '$RBH_DB'...\"\n       mysql -v --password=\"$pass_root\" $RBH_DB -e \"UPDATE ENTRIES SET $expr ;\"\n    if (( $? )); then\n        echo \"Database command failed\"\n        exit 1\n    else\n        echo \"DONE\"\n    fi\n}\n\nfunction repair_db\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 && $# != 2 )); then\n        echo \"ERROR: 0, 1 or 2 arguments expected\"\n            echo \"Usage: repair_db [<db_name> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0)); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n    else\n        RBH_DB=$1\n        pass_root=$2\n    fi\n\n    # check tables\n    mysqlcheck --password=\"$pass_root\" --auto-repair --databases mysql $RBH_DB\n    if (( $? )); then\n        echo \"Check failed\"\n        exit 1\n    else\n        echo \"DONE\"\n    fi\n}\n\nfunction backup_db\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 2 && $# != 3 )); then\n        echo \"ERROR: 2 or 3 arguments expected\"\n            echo \"Usage: backup_db [<db_name> <dest_dir> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0)); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n\n        while (( 1 )); do\n                read -p \"destination directory: \" DEST_DIR\n                if [[ -d \"$DEST_DIR\" ]]; then\n                    break\n                else\n                    echo\n                    echo \"Error: the specified directory does not exist\"\n                fi\n            done\n    else\n        RBH_DB=$1\n        DEST_DIR=$2\n        pass_root=$3\n    fi\n\n    if [[ ! -d \"$DEST_DIR\" ]]; then\n        echo \"$DEST_DIR: directory does not exist\"\n        exit 1\n    fi\n\n    # backup format: <dir>/<dbname>.backup.<date>.<time>.sql\n    timestamp=`date \"+%Y%m%d-%H%M%S\"`\n    bkfile=\"$DEST_DIR/$RBH_DB.backup.$timestamp.sql\"\n\n    # single-transaction avoids wide locking for InnoDB engine.\n    mysqldump --single-transaction --password=\"$pass_root\" $RBH_DB > \"$bkfile\"\n    if (( $? != 0 )); then\n        echo \"Backup failed\"\n        exit 1\n    else\n        sz=`stat --format=\"%s\" \"$bkfile\"`\n        if (( $? != 0 )); then\n            echo \"ERROR: backup file not found\"\n            exit 1\n        fi\n        echo \"Backup successful. file: $bkfile, size: $sz bytes\"\n        echo \"DONE\"\n        exit 0\n    fi\n}\n\nfunction optimize_db\n{\n    interactive=0\n    if (( $# == 0 )); then\n        interactive=1\n    elif (( $# != 1 && $# != 2 )); then\n        echo \"ERROR: 1 or 2 arguments expected\"\n            echo \"Usage: optimize_db [<db_name> [<db_admin_passwd>]]\"\n        exit 1\n    fi\n\n    if (( $interactive != 0)); then\n            echo\n            echo \"Enter password for root's database account (leave blank if none is set):\"\n            read -p \"root's DB password: \" -s pass_root\n            echo\n\n            select_db $pass_root || exit 1\n\n            if [ -z $RBH_DB ]; then\n                echo \"ERROR: database not specified\"\n                exit 1\n            fi\n    else\n        RBH_DB=$1\n        pass_root=$2\n    fi\n\n    mysqlcheck --optimize --password=\"$pass_root\" --database $RBH_DB\n    exit $?\n}\n\n\nfunction test_connect\n{\n    if (( $# != 2 )); then\n        echo \"ERROR: 2 arguments expected\"\n            echo \"Usage: test_db <db_name> <robinhood_passwd>\"\n        exit 1\n    fi\n\n    DB_NAME=$1\n    pass=$2\n\n    echo \"Testing connection to '$DB_NAME'...\"\n    mysql --user=robinhood --password=$pass $DB_NAME << EOF\nquit\nEOF\n\n    if (( $? )); then\n        echo \"Connection to $DB_NAME@localhost failed\"\n        exit 1\n    else\n        echo \"Connection OK\"\n    fi\n}\n\n\nif [[ \"$1\" = precheck_db ]]; then\n    shift 1\n    db_check $*\nelif [[ \"$1\" = create_db ]]; then\n    shift 1\n    db_check && db_config $*\nelif [[ \"$1\" = enable_chglogs ]]; then\n    shift 1\n    enable_changelogs $*\nelif [[ \"$1\" = empty_db ]]; then\n    shift 1\n    empty_db $*\nelif [[ \"$1\" = reset_acct ]]; then\n    shift 1\n    reset_acct $*\nelif [[ \"$1\" = reset_classes ]]; then\n    shift 1\n    reset_fileclasses $*\nelif [[ \"$1\" = repair_db ]]; then\n    shift 1\n    repair_db $*\nelif [[ \"$1\" = backup_db ]]; then\n    shift 1\n    backup_db $*\nelif [[ \"$1\" = optimize_db ]]; then\n    shift 1\n    optimize_db $*\nelif [[ \"$1\" = test_db ]]; then\n    shift 1\n    test_connect $*\nelse\n    echo \"Usage: $0 <action> [options]\"\n    echo \"Actions:\"\n    echo \"    precheck_db:\"\n    echo \"        check database packages and service\"\n    echo \"    create_db [<db_name> <client_hosts> <robinhood_passwd> [<db_admin_passwd>]] : \"\n    echo \"        create robinhood db (interactive if no option is specified)\"\n    echo \"    enable_chglogs [<fsname>]:\"\n    echo \"        enable changelogs on MDT (interactive if no option is specified)\"\n    echo \"    empty_db [<db_name> [<db_admin_passwd>]]:\"\n    echo \"        delete robinhood database content (interactive if no option is specified)\"\n    echo \"    reset_acct [<db_name> [<db_admin_passwd>]]:\"\n    echo \"        drop acct info, so robinhood can rebuild it when it restarts.\"\n    echo \"    reset_classes [<db_name> [<db_admin_passwd>]]:\"\n    echo \"        reset fileclasses after a change in config file (interactive if no option is specified)\"\n    echo \"    test_db <db_name> <robinhood_passwd>:\"\n    echo \"        test connection to the database\"\n    echo \"    backup_db [<db_name> <dest_dir> [<db_admin_passwd>]]:\"\n    echo \"        backup robinhood database to dest dir\"\n    echo \"    repair_db [<db_name> [<db_admin_passwd>]]:\"\n    echo \"        check tables and fix them after a mysql server crash\"\n    echo \"    optimize_db [<db_name> [<db_admin_passwd>]]:\"\n    echo \"        defragments the database for better performance and using less disk space\"\nfi\n"
  },
  {
    "path": "scripts/rbh_cksum.sh.in",
    "content": "#!/bin/bash\n\n\n[[ -e /etc/sysconfig/rbh_cksum ]] && . /etc/sysconfig/rbh_cksum\n\n# \"defaults\" section\nRBH_CKSUM_CMD=${RBH_CKSUM_CMD:-sha1sum}\nRBH_CKSUM_DV_CMD=${RBH_CKSUM_DV_CMD:-@DV_CMD@}\n# xattr not set if variable set empty, so allow empty value\nRBH_CKSUM_XATTR=${RBH_CKSUM_XATTR-user.sha1sum}\n\nusage() {\n\techo \"usage: $(readlink -m \"$0\") <previous_value> <path>\" >&2\n\texit 1\n}\n\n\n# data version helper. Use lfs iff file is on a luste filesystem\nget_dv() {\n\tlocal path=\"$1\"\n\n\t$RBH_CKSUM_DV_CMD \"$path\"\n}\n\ncompute_cksum() {\n\tlocal path=\"$1\"\n\tlocal cksum\n\n\t# run checksum program and take first word\n\tcksum=$($RBH_CKSUM_CMD \"$path\")\n\techo \"${cksum%% *}\"\n}\n\n\ngetfattr_output() {\n\tlocal path=\"$1\"\n\n\t[[ -z \"$RBH_CKSUM_XATTR\" ]] && return\n\n\tgetfattr -n \"$RBH_CKSUM_XATTR\" --only-values -- \"$path\" 2>/dev/null || true\n}\n\nsetfattr_output() {\n\tlocal path=\"$1\"\n\tlocal cksum=\"$2\"\n\n\t[[ -z \"$RBH_CKSUM_XATTR\" ]] && return\n\n\tsetfattr -n \"$RBH_CKSUM_XATTR\" -v \"$cksum\" -- \"$path\"\n}\n\n\nrbh_cksum() {\n\tlocal output=\"$1\"\n\tlocal path=\"$2\"\n\tlocal cksum\n\tlocal dv\n\tlocal cksum_old=\"\"\n\tlocal dv_old\n\tlocal xattr_output\n\tlocal update_xattr=1\n\n\n\t[[ -z \"$path\" ]] && usage\n\n\n\tset -e\n\tset -u\n\n\n\t# compute data version, checksum, then data version again to compare\n\tdv_old=$(get_dv \"$path\")\n\tcksum=$(compute_cksum \"$path\")\n\tdv=$(get_dv \"$path\")\n\n\tif [[ \"$dv\" != \"$dv_old\" ]]; then\n\t\techo \"$path: data version changed during checksuming, try again later\" >&2\n\t\tsetfattr_output \"$path\" \"\"\n\t\texit 0\n\tfi;\n\n\tif [[ -z \"$dv\" || -z \"$cksum\" ]]; then\n\t\techo \"$path: empty dv ($dv) or checksum ($cksum), aborting\" >&2\n\t\texit 1\n\tfi\n\n\n\t# if using xattrs, xattrs win over argument\n\txattr_output=$(getfattr_output \"$path\")\n\tif [[ -n \"$xattr_output\" ]]; then\n\t\toutput=\"$xattr_output\"\n\t\tupdate_xattr=\"\"\n\tfi\n\n\t# check if dv changed since last check\n\tif [[ -n \"$output\" ]]; then\n\t\t# arbitrary choice: dv can contain :, cksum cannot\n\t\tdv_old=${output%:*}\n\n\t\tif [[ \"$dv\" != \"$dv_old\" ]]; then\n\t\t\techo \"$path: dv changed since output, using new\" >&2\n\t\t\tupdate_xattr=1\n\t\telse\n\t\t\tcksum_old=${output##*:}\n\t\tfi\n\tfi\n\n\t# compare with old cksum if relevant\n\tcase \"$cksum_old\" in\n\t\t\"\")\n\t\t\techo \"$path: new cksum: $dv:$cksum\" >&2\n\t\t\t;;\n\t\t\"$cksum\")\n\t\t\techo \"$path: cksum OK: $dv:$cksum\" >&2\n\t\t\t;;\n\t\t*)\n\t\t\techo \"$path: checksum changed! dv $dv: old $cksum_old, new $cksum\" >&2\n\t\t\texit 1\n\tesac\n\n\tif [[ -n \"$update_xattr\" ]]; then\n\t\tsetfattr_output \"$path\" \"$dv:$cksum\"\n\tfi\n\n\techo \"$dv:$cksum\"\n}\n\n# if we're being sourced, don't parse arguments\n[[ $(caller | cut -d' ' -f1) != \"0\" ]] && return\n\nrbh_cksum \"$@\"\n"
  },
  {
    "path": "scripts/rbhext_tool",
    "content": "#!/bin/sh\n#\n# Simple cp command wrapper for robinhood\n#\n\nVERB=$1\nSRC=$2\nDEST=$3\n\nBIN=`basename $0`\n\nif [[ -z \"$VERB\" || -z \"$SRC\" || -z \"$DEST\" ]]; then\n\techo \"Usage:\"\n\techo \"\t$BIN ARCHIVE <src> <dest>\"\n\techo \"\t$BIN RESTORE <src> <dest>\"\n\texit 22\nfi\n\nif [[ $VERB == \"ARCHIVE\" || $VERB == \"RESTORE\" ]]; then\n\n\tcp -a \"$SRC\" \"$DEST\"\n\texit $?\nfi\n"
  },
  {
    "path": "scripts/rbhext_tool_clnt",
    "content": "#!/bin/bash\n#\n# Client script to call remote file copy operations\n#\n# 20110325 <kilian.cavalotti@cea.fr>\n# 20120628 <diego.moreno@bull.net>\n#\n\n\n##### EDIT THIS SECTION #######\n\n# Choose a random server in a pool\n\n# XXX use nodeset instead of static list\n#SERVERS=$(nodeset -e @<group>)\nSERVERS=(server1 server2)\nSERV_PORT=49999\n# must be < robinhood's copy_timeout\nTIMEOUT=15000\n\nLOG_FILE=\"/var/log/robinhood/rbhext_tool_clnt.log\"\n\n#Should be 'y' if you want to debug\nDEBUG=\"n\"\n\n##### END OF EDIT SECTION #####\n\nusage(){\n    echo \"Usage:\"\n    echo \"  $BIN < ARCHIVE | RESTORE > <src> <dest> [hints]\"\n    exit 22\n}\n\nVERB=$1\nSRC=$2\nDEST=$3\nHINTS=$4\n\nBIN=`basename $0`\nDIR=`dirname \"$DEST\"`\n\nif [[ -z \"$VERB\" || -z \"$SRC\" || -z \"$DEST\" || $# -gt 4 ]]; then\n    usage\nfi\n\nif [[ $VERB == \"ARCHIVE\" || $VERB == \"RESTORE\" ]]; then\n\n    # Choose a random server in a pool\n    RAND_ID=$(($RANDOM % ${#SERVERS[@]}))\n    RAND_SERV=${SERVERS[$RAND_ID]}\n\n    args=\"$VERB $SRC $DEST $HINTS\"\n    net_cmd=\"nc -w $TIMEOUT $RAND_SERV $SERV_PORT\"\n    if [[ $DEBUG == \"y\" ]] ; then\n        echo \"DEBUG $(date +%x' '%T) Starting $args | $cmd\" >> $LOG_FILE\n    fi\n    # connect to $RAND_SERV and transmit parameters\n    ret=$(echo $args | $net_cmd)\n    rc=$?\n\n    if (( $rc != 0 )); then\n        if [[ $DEBUG == \"y\" ]] ; then\n                echo \"Error in $args\" >> $LOG_FILE\n        fi\n        echo \"Error copying file $SRC\" >> $LOG_FILE\n\texit $rc\n    fi\n\n    # get return code and exit\n    if [[ $DEBUG == \"y\" ]] ; then\n        echo \"$(date +%x' '%T) $args to $RAND_SERV with rc=$ret\" >> $LOG_FILE\n    fi\n\n    if [[ -z $ret ]] ; then\n        if [[ $DEBUG == \"y\" ]] ; then\n                echo \"Nothing in rc for $args : -1\" >> $LOG_FILE\n        fi\n        exit 1\n    else\n        if [[ $DEBUG == \"y\" ]] ; then\n                echo \"$(date +%x' '%T) $args with rc=$ret\" >> $LOG_FILE\n        fi\n        exit $ret\n    fi\n\nelse\n    usage\nfi\n\n"
  },
  {
    "path": "scripts/rbhext_tool_svr",
    "content": "#!/bin/bash\n#\n# Server script to handle remotely initiated file copy operations\n#\n# 20110325 <kilian.cavalotti@cea.fr>\n# 20120629 <diego.moreno@bull.net>\n#\n\n# Get arguments from remote\nread args\nset -- $args\n\n\nVERB=$1\nSRC=$2\nDEST=$3\nHINTS=$4\n\n###############################\n###### EDIT if necessary ######\n\nLOG_FILE=\"/var/log/rbhext_tool.log\"\n\n# must be >= robinhood's copy_timeout value\nTIMEOUT=15000\n\n# General copy command\n# CMD=\"my_cp $HINTS $SRC $DEST\"\nCMD=\"cp -a $SRC $DEST\"\n\n#Should be 'y' if you want to debug\nDEBUG=\"y\"\n\n##### End of EDIT zone #######\n##############################\n\n\nTIMEOUT_CMD=`which timeout &> /dev/null ; echo $?`\n\nDIR=`dirname \"$DEST\"`\n\nif [[ -z \"$VERB\" || -z \"$SRC\" || -z \"$DEST\" || $# -gt 4 ]]; then\n\n   if [[ $DEBUG == \"y\" ]] ; then\n       echo \"$(date +%x' '%T) Error in $VERB $SRC $DEST\" >> $LOG_FILE\n   fi\n    # send an exit code back to the client\n    echo 22\n    exit 22\nfi\n\n\nif [[ $VERB == \"ARCHIVE\" || $VERB == \"RESTORE\" ]]; then\n    [ -d \"$DIR\" ] || mkdir -p \"$DIR\" || exit 1\n    # create file if it doesn't exist\n    [ -e \"$DEST\" ] || touch \"$DEST\" || exit 1\n\n    if [[ $DEBUG == \"y\" ]] ; then\n   \techo \"$(date +%x' '%T) Running $CMD\" >> $LOG_FILE\n    fi\n\n    if [[ $TIMEOUT_CMD -eq 0 ]] ; then\n    \ttimeout $TIMEOUT $CMD >> $LOGFILE 2>&1\n    else\n    \t$CMD >> $LOG_FILE 2>&1\n    fi\n    # capture $CMD return code and send it back to the client\n    ret=$?\n\n    if [[ $DEBUG == \"y\" ]] ; then\n    \techo \"$(date +%x' '%T) $CMD ret=$ret\" >> $LOG_FILE\n    fi\n\n    echo $ret\n  \texit $ret\nfi\n\n"
  },
  {
    "path": "scripts/rewrite.sh",
    "content": "#!/bin/bash\n\nd=$(dirname $0)\n\nfunction process_lines\n{\n    local l1\n    local l2\n    local l3\n    local l4\n    local ln\n    local fi\n    local num\n    local repl\n\n    while (( 1 )); do\n        read l1 || break\n        read l2 || break\n        read l3 || break\n        read l4\n#        echo \"l1: $l1\"\n#        echo \"l2: $l2\"\n#        echo \"l3: $l3\"\n#        echo \"l4: $l4\"\n\n        #10703: FILE: src/common/lustre_tools.c:413:\n        fi=$(echo \"$l2\" | awk '{print $3}' | cut -d ':' -f 1)\n        num=$(echo \"$l2\" | awk '{print $3}' | cut -d ':' -f 2)\n        ln=$(echo \"$l3\" | sed -e \"s/^+//\")\n#        lbefore=$(sed \"${num}q;d\" $fi)\n        sed -i -e \"${num}s/ \\* / */g\" $fi\n#        lafter=$(sed \"${num}q;d\" $fi)\n        echo \"$fi:$num\" >&2\n#        echo \"AVANT: $lbefore\" >&2\n#        echo \"APRES: $lafter\" >&2\n    done\n}\n\n\n#echo \"summary of previous changes:\" >&2\n#git show $GIT_COMMIT^ src >&2\n\n#echo \"summary of current changes:\" >&2\n#git show $GIT_COMMIT src >&2\n\nfor f in $(git diff-tree --name-only --diff-filter=AMR --root -r --no-commit-id $GIT_COMMIT | grep '\\.[chyl]$'); do\n    git show $GIT_COMMIT $f > /tmp/patch || continue\n    $d/checkpatch.pl --ignore CODE_INDENT,SPACE_BEFORE_TAB,LEADING_SPACE,PRINTF_L /tmp/patch | grep -A 2 \"foo \\* bar\" | process_lines\n    \n    sed -i -e \"s/( [^ ]/(/g\" -e \"s/[^ ] )/)/g\" $f\ndone\nexit 0\n"
  },
  {
    "path": "scripts/robinhood.init.in",
    "content": "#!/bin/bash\n##\n# robinhood.init 1.0 2008/02/07 Ph. Gregoire philippe.gregoire@cea.fr\n##\n# chkconfig:\t- 95 5\n# description:\trobinhood (filesystem purge and audit) service\n# processname:\t/usr/sbin/robinhood\n# config:\t/etc/robinhood.d/<filesystem>.conf\n##\n\n# Source function library.\n. /etc/rc.d/init.d/functions\n\n# variables - may be overwritten by /etc/sysconfig/robinhood\nRH_CONF_DIR=@CONFDIR@/robinhood.d\t# directory containing configuration files\nRH_RUN_DIR=/\t\t\t\t\t# directory where daemon will run\nDAEMON=@SBINDIR@/robinhood\n\n# options for starting the daemon\n# eg. --scan --purge (leave empty for default actions).\nRBH_OPT=\"\"\n\n# Source robinhood configuration.\n[ -f @CONFDIR@/sysconfig/robinhood ] && . @CONFDIR@/sysconfig/robinhood\n\nVARRUN=/var/run\nRETVAL=0\n\n\n#\n# pidfile_name() build the name of the pid file for a given configuration file\n# pidfile is like /var/run/robinhood.<fs> if conf file is /etc/robinhood.d/<purpose>/<fs>.conf\n#\npidfile_name()\n{\n\tlocal conf=$1\n\tlocal pidfile=$VARRUN/rbh.${conf##*/}\n\techo ${pidfile%.conf}\n}\n\n#\n# build_fslist() build the list of configurations files\n# configurations files can be given as\n# - a full pathname :\n#\tthe script expects that file exists.\n# - a filename : <myconfigfile>\n#\tthe script expects to find file /etc/robinhood.d/<purpose>/<myconfigfile>.\n# - a filesystem name <fsname> :\n#\tthe scripts expects to find a file /etc/robinhood.d/<purpose>/<fsname>.conf\n# - an empty list :\n#\tthe script take all files directly under /etc/robinhood.d/<purpose>\n#\nbuild_fslist()\n{\n\tlocal fs\n\tlocal fs_list=\"\"\n\n\tif [[ $# -eq 0 ]]; then\n\t\t# no args, consider all files under the configuration directory\n\t\tfor fs in $(ls -1 $RH_CONF_DIR/*.conf $RH_CONF_DIR/*.cfg 2>&-)\n\t\tdo\n\t\t\t[ -f $fs ] && fs_list=\"$fs_list $fs\"\n\t\tdone\n\t\tif [[ -z $fs_list ]] ; then\n\t\t\techo \"No configuration files under $RH_CONF_DIR\" >&2\n\t\t\treturn 1\n\t\tfi\n\telse\n\t\t# several args, each may be a full configuration file pathname\n\t\t# or filename of a filesystem name\n\t\tfor fs in $@\n\t\tdo\n\t\t\tif [[ $fs = /* ]] && [[ -f $fs ]] ; then\n\t\t\t\tfs_list=\"$fs_list $fs\"\n\t\t\telif [ -f $RH_CONF_DIR/$fs ] ; then\n\t\t\t\tfs_list=\"$fs_list $RH_CONF_DIR/$fs\"\n\t\t\telif [ -f $RH_CONF_DIR/$fs.conf ] ; then\n\t\t\t\tfs_list=\"$fs_list $RH_CONF_DIR/$fs.conf\"\n\t\t\telse\n\t\t\t\techo \"Unable to find configuration file for $fs\" >&2\n\t\t\tfi\n\t\tdone\n\tfi\n\techo ${fs_list% } # remove leading space\n}\n\n#\n# daemon_exist()\n# check if process corresponding to pidfile exist.\n#\ndaemon_exist()\n{\n\tlocal pidfile=$1\n\tlocal pid\n\n\tif [ -f $pidfile ] ; then\n\t\t# check if there is still some process\n\t\tpid=$(<$pidfile)\n\t\tif [ -z \"${pid//[0-9]/}\" -a -d \"/proc/$pid\" ] ; then\n\t\t\treturn 0\n\t\tfi\n\tfi\n\treturn 1\n}\n\n#\n# check_or_start_fs()\n# checks if another daemon is not already running for the same purpose,\n# then (optionally) launches a new daemon with a specific configuration file and a specific pid file.\n#\ncheck_or_start_fs()\n{\n\tlocal action=$1\n\tlocal conf=$2\n\tlocal pidfile=$(pidfile_name $conf)\n\tif [ -z \"$RBH_OPT\" ]; then\n\t\tlocal cmdline=\"$DAEMON -d -f $conf -p $pidfile\"\n\telse\n\t\tlocal cmdline=\"$DAEMON $RBH_OPT -d -f $conf -p $pidfile\"\n\tfi\n\tlocal label=\"Starting Robinhood for $conf \"\n\n\t# check if there is still some process\n\tif daemon_exist $pidfile ; then\n\t\tif [[ $action = START ]] ; then\n\t\t\t# According to LSB, running \"start\" on a service already running\n\t\t\t# should be successful.\n\t\t\techo \"RobinHood already started for $conf\"\n\t\t\techo_passed\n\t\t\techo\n\t\t\treturn 0\n\t\telse\n\t\t\techo \"Robinhood for $conf is running:\"\n\t\t\tps -fp $(<$pidfile)\n\t\t\treturn 0\n\t\tfi\n\telse\n\t\tif [[ $action = START ]] ; then\n            # prevent from excessive memory usage\n            stack=$(ulimit  -s)\n            mem=$(free -k | grep 'Mem:' | awk '{print $2}')\n            if [ -n \"$stack\" ] && (( 32*$stack > $mem)); then\n                echo \"WARNING: thread stack size (ulimit -s) is surprisingly high and may not be adapted to a multithreaded application!\" >&2\n            fi\n\t\t\trm -f $pidfile\n\t\t\tcd ${RH_RUN_DIR}\n\t\t\taction \"$label ...\" $cmdline\n\t\t\techo -n \"Checking process status... \"\n\t\t\tsleep 1\n\t\t\tif [ -d /proc/$(<$pidfile) ]; then\n\t\t\t\tsuccess \"$label\"\n                        else\n                               \tfailure \"$label\"\n                        fi\n\t\t\techo\n\t\t\t#echo -n \"$label ...\"\n\t\t\t#if initlog $INITLOG_ARGS -c \"$cmdline\" \"$label\" ; then\n\t\t\t#\tsuccess \"$label\"\n\t\t\t#else\n\t\t\t#\tfailure \"$label\"\n\t\t\t#fi\n\t\t\t#echo\n\t\telse\n\t\t\techo \"Robinhood for $conf is not running\"\n\t\t\treturn 1\n\t\tfi\n\tfi\n\n}\n\n#\n# stop_fs() stops one daemon instance\n#\nstop_fs()\n{\n\tlocal conf=$1\n\tlocal pidfile=$(pidfile_name $conf)\n\tlocal label=\"Stopping Robinhood for $conf\"\n\tlocal rc=0\n\n\t#echo -n \"$label ...\"\n\tif [ -f $pidfile ] ; then\n\t\tlocal pid=$(<$pidfile)\n\t\tif [ -z \"${pid//[0-9]/}\" ] ; then\n\t\t\t# this a pid number\n\t\t\t#[ -d /proc/$pid ] && initlog $INITLOG_ARGS -c \"/bin/kill kill -TERM $pid\" \"$label\"\n\t\t\t[ -d /proc/$pid ] && action \"$label ...\" /bin/kill -TERM $pid\n\n\t\t\t# wait 10 seconds max\n\t\t\tfor i in `seq 1 10`; do\n\t\t\t\tif [ ! -d /proc/$pid ]; then\n\t\t\t\t\tbreak;\n\t\t\t\tfi\n\n\t\t\t\tsleep 1\n\t\t\tdone\n\t\t\t#[ -d /proc/$pid ] && initlog $INITLOG_ARGS -c \"/bin/kill -9 $pid\" \"$label\"\n\t\t\t[ -d /proc/$pid ] && action \"Force shutdown of Robinhood for $conf ...\" /bin/kill -9 $pid\n\t\t\tsleep 1\n\t\t\techo -n \"Checking process status ...\"\n\t\t\tif [ -d /proc/$pid ] ; then\n\t\t\t\techo_failure\n\t\t\t\trc=1\n\t\t\telse\n\t\t\t\techo_success\n\t\t\t\trm $pidfile\n\t\t\tfi\n\t\telse\n\t\t\techo \"$pidfile: bad pid file\"\n\t\t\techo_failure\n\t\t\trc=1\n\t\tfi\n\telse\n\t\t# According to LSB, running \"stop\" on a service already stopped\n\t\t# or not running # should be considered successful.\n\t\techo -n \"$label ...\"\n\t\techo \" already stopped\"\n\t\techo_passed\n\tfi\n\techo\n\treturn $rc\n}\n\n#\n# reload_fs\n# reload the configuration for the given filesystem\n#\nreload_fs()\n{\n\tlocal conf=$1\n\tlocal pidfile=$(pidfile_name $conf)\n\tif [ -z \"$RBH_OPT\" ]; then\n\t\tlocal cmdline=\"$DAEMON -d -f $conf -p $pidfile\"\n\telse\n\t\tlocal cmdline=\"$DAEMON $RBH_OPT -d -f $conf -p $pidfile\"\n\tfi\n\n\t# check if there is still some process\n\tif daemon_exist $pidfile ; then\n\t\tlocal pid=$(<$pidfile)\n\n\t\techo \"Robinhood for $conf is running: process pid $pid\"\n\t\t[ -d /proc/$pid ] && action \"Reloading configuration for process $pid...\" /bin/kill -HUP $pid\n\t\treturn 0\n\telse\n\t\techo \"Robinhood for $conf is not running\"\n\tfi\n\n}\n\n#\n# start action\n#\nstart()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tcheck_or_start_fs START $fs\n\tdone\n}\n\n#\n# stop action\n#\nstop()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tstop_fs $fs\n\tdone\n}\n\n#\n# status action\n#\nstatus()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tcheck_or_start_fs CHECK $fs\n\tdone\n}\n\n#\n# reload action\n#\nreload()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\treload_fs $fs\n\tdone\n}\n\n# See how we were called.\ncase \"$1\" in\n  start)\n\tshift\n\tstart $@\n\tRETVAL=$?\n\t;;\n  stop)\n\tshift\n\tstop $@\n\tRETVAL=$?\n\t;;\n  status)\n\tshift\n\tstatus $@\n\tRETVAL=$?\n\t;;\n  reload)\n\tshift\n\treload $@\n\tRETVAL=$?\n\t;;\n  restart)\n\tshift\n\tstop $@\n\tstart $@\n\tRETVAL=$?\n\t;;\n  *)\n\techo \"Usage: $0 {start|stop|status|reload|restart}\"\n\texit 1\nesac\n\nexit $RETVAL\n"
  },
  {
    "path": "scripts/robinhood.init.sles.in",
    "content": "#!/bin/bash\n##\n# robinhood.init 1.0 2008/02/07 Ph. Gregoire philippe.gregoire@cea.fr\n# SLES port \t     2009/09/29 I Hailperin  hailperin@zib.de\n# add reload action  2010/09/29 ThL \t     thomas.leibovici@cea.fr\n##\n# chkconfig:\t- 95 5\n# description:\trobinhood (filesystem purge and audit) service\n# processname:\t/usr/sbin/robinhood\n# config:\t/etc/robinhood.d/<filesystem>.conf\n##\n### BEGIN INIT INFO\n# Provides:          robinhood\n# Required-Start:    mysql\n# Required-Stop:     mysql\n# Default-Start:     3 5\n# Default-Stop:      0 1 2 6\n# Short-Description: robinhood (filesystem purge and audit) service\n# Description:\n### END INIT INFO\n\n\n# Source function library.\n. /etc/rc.status\n\n# Shell functions sourced from /etc/rc.status:\n#      rc_check         check and set local and overall rc status\n#      rc_status        check and set local and overall rc status\n#      rc_status -v     ditto but be verbose in local rc status\n#      rc_status -v -r  ditto and clear the local rc status\n#      rc_failed        set local and overall rc status to failed\n#      rc_reset         clear local rc status (overall remains)\n#      rc_exit          exit appropriate to overall rc status\n\n# First reset status of this service\nrc_reset\n\n# Set some variables, to be compatible with redhat:\n# This all seem confusing? Look in /etc/sysconfig/init,\n# or in /usr/doc/initscripts-*/sysconfig.txt on a redhat system.\nBOOTUP=color\nRES_COL=60\nMOVE_TO_COL=\"echo -en \\\\033[${RES_COL}G\"\nSETCOLOR_SUCCESS=\"echo -en \\\\033[1;32m\"\nSETCOLOR_FAILURE=\"echo -en \\\\033[1;31m\"\nSETCOLOR_WARNING=\"echo -en \\\\033[1;33m\"\nSETCOLOR_NORMAL=\"echo -en \\\\033[0;39m\"\nLOGLEVEL=1\n\n# Define some functions, which are defined in /etc/rc.d/init.d/functions\n# on redhat\n# action echo_failure echo_success echo_passed success failure\n\n# echo_success/failure\n# this could be replaced by rc_status -v. rc_status records if the last\n# command was successful or not.\n\n\n# variables - may be overwritten by /etc/sysconfig/robinhood\nRH_CONF_DIR=@CONFDIR@/robinhood.d/\t# directory containing configuration files for this purpose\nRH_RUN_DIR=/\t\t\t\t# directory where daemon will run\nDAEMON=/usr/sbin/robinhood\n\n# options for starting the daemon\n# eg. --scan --purge (leave empty for default actions).\nRBH_OPT=\"\"\n\n# Source robinhood configuration.\n[ -f /etc/sysconfig/robinhood ] && . /etc/sysconfig/robinhood\n\nVARRUN=/var/run\nRETVAL=0\n\n\n#\n# pidfile_name() build the name of the pid file for a given configuration file\n# pidfile is like /var/run/<fs> if conf file is /etc/robinhood.d/<purpose>/<fs>.conf\n#\npidfile_name()\n{\n\tlocal conf=$1\n\tlocal pidfile=$VARRUN/rbh.${conf##*/}\n\techo ${pidfile%.conf}\n}\n\n#\n# build_fslist() build the list of configurations files\n# configurations files can be given as\n# - a full pathname :\n#\tthe script expects that file exists.\n# - a filename : <myconfigfile>\n#\tthe script expects to find file /etc/robinhood.d/<myconfigfile>.\n# - a filesystem name <fsname> :\n#\tthe scripts expects to find a file /etc/robinhood.d/<fsname>.conf\n# - an empty list :\n#\tthe script take all files directly under /etc/robinhood.d/\n#\nbuild_fslist()\n{\n\tlocal fs\n\tlocal fs_list=\"\"\n\n\tif [[ $# -eq 0 ]]; then\n\t\t# no args, consider all files under the configuration directory\n\t\tfor fs in $(ls -1 $RH_CONF_DIR/*.conf $RH_CONF_DIR/*.cfg 2>&-)\n\t\tdo\n\t\t\t[ -f $fs ] && fs_list=\"$fs_list $fs\"\n\t\tdone\n\t\tif [[ -z $fs_list ]] ; then\n\t\t\techo \"No configuration files under $RH_CONF_DIR\" >&2\n\t\t\treturn 1\n\t\tfi\n\telse\n\t\t# several args, each may be a full configuration file pathname\n\t\t# or filename of a filesystem name\n\t\tfor fs in $@\n\t\tdo\n\t\t\tif [[ $fs = /* ]] && [[ -f $fs ]] ; then\n\t\t\t\tfs_list=\"$fs_list $fs\"\n\t\t\telif [ -f $RH_CONF_DIR/$fs ] ; then\n\t\t\t\tfs_list=\"$fs_list $RH_CONF_DIR/$fs\"\n\t\t\telif [ -f $RH_CONF_DIR/$fs.conf ] ; then\n\t\t\t\tfs_list=\"$fs_list $RH_CONF_DIR/$fs.conf\"\n\t\t\telse\n\t\t\t\techo \"Unable to find configuration file for $fs\" >&2\n\t\t\tfi\n\t\tdone\n\tfi\n\techo ${fs_list% } # remove leading space\n}\n\n#\n# daemon_exist()\n# check if process corresponding to pidfile exist.\n# it must run with the same args.\n#\ndaemon_exist()\n{\n\tlocal pidfile=$1\n\tlocal cmdline=$2\n\tlocal pid\n\n\tif [ -f $pidfile ] ; then\n\t\t# check if there is still some process\n\t\tpid=$(<$pidfile)\n\t\tif [ -z \"${pid//[0-9]/}\" -a -d \"/proc/$pid\" ] ; then\n\t\t\tlocal cmd=$(tr '\\0' ' ' </proc/$pid/cmdline)\n\t\t\tif [[ ${cmd% } = $cmdline ]] ; then\n\t\t\t\treturn 0\n\t\t\tfi\n\t\tfi\n\tfi\n\treturn 1\n}\n\n#\n# check_or_start_fs()\n# checks if another daemon is not already running for the same purpose,\n# then (optionally) launches a new daemon with a specific configuration file and a specific pid file.\n#\ncheck_or_start_fs()\n{\n\tlocal action=$1\n\tlocal conf=$2\n\tlocal pidfile=$(pidfile_name $conf)\n\tif [ -z \"$RBH_OPT\" ]; then\n\t\tlocal cmdline=\"$DAEMON -d -f $conf -p $pidfile\"\n\telse\n\t\tlocal cmdline=\"$DAEMON $RBH_OPT -d -f $conf -p $pidfile\"\n\tfi\n\tlocal label=\"Starting Robinhood for $conf \"\n\n\t# check if there is still some process\n\tif daemon_exist $pidfile \"$cmdline\" ; then\n\t\tif [[ $action = START ]] ; then\n\t\t\t# According to LSB, running \"start\" on a service already running\n\t\t\t# should be successful.\n\t\t\techo -n \"RobinHood already started for $conf\"\n\t\t\trc_status -v\n\t\t\techo\n\t\t\treturn 0\n\t\telse\n\t\t\techo \"Robinhood for $conf is running:\"\n\t\t\tps -fp $(<$pidfile)\n\t\t\treturn 0\n\t\tfi\n\telse\n\t\tif [[ $action = START ]] ; then\n\t\t\trm -f $pidfile\n\t\t\tcd ${RH_RUN_DIR}\n\t\t\t#action \"$label ...\" $cmdline\n\t\t\techo \"$label ...\"\n\t\t\tstartproc -p $pidfile $cmdline\n      # Remember status and be verbose\n\t\t  rc_status -v\n\t\telse\n\t\t\techo \"Robinhood for $conf is not running\"\n\t\t\treturn 1\n\t\tfi\n\tfi\n\n}\n\n#\n# stop_fs() stops one daemon instance\n#\nstop_fs()\n{\n\tlocal conf=$1\n\tlocal pidfile=$(pidfile_name $conf)\n\tlocal label=\"Stopping Robinhood for $conf\"\n\tlocal rc=0\n\n\t#echo -n \"$label ...\"\n\tif [ -f $pidfile ] ; then\n\t\tlocal pid=$(<$pidfile)\n\t\tif [ -z \"${pid//[0-9]/}\" ] ; then\n\t\t\t# this a pid number\n\t\t\t#[ -d /proc/$pid ] && initlog $INITLOG_ARGS -c \"/bin/kill kill -TERM $pid\" \"$label\"\n\t\t\t[ -d /proc/$pid ] && echo -n \"$label ...\" && \\\n\t\t\t\tkillproc -p $pidfile -TERM $DAEMON\n\t\t\t\trc_status -v\n\n\t\t\t# wait 10 seconds max\n\t\t\tfor i in `seq 1 10`; do\n\t\t\t\tif [ ! -d /proc/$pid ]; then\n\t\t\t\t\tbreak;\n\t\t\t\tfi\n\n\t\t\t\tsleep 1\n\t\t\tdone\n\t\t\t#[ -d /proc/$pid ] && initlog $INITLOG_ARGS -c \"/bin/kill -9 $pid\" \"$label\"\n\t\t\tif [ -d /proc/$pid ] ; then\n\t\t\t\techo -n \"Force shutdown of Robinhood for $conf ...\"\n\t\t\t\tkillproc -p $pidfile -KILL $DAEMON\n\t\t\t\trc_status -v\n\t\t\tfi\n\t\telse\n\t\t\techo \"$pidfile: bad pid file\"\n\t\t\tasdfasdfasdf 2>/dev/null # generate a failed exit status\n\t\t\trc_status -v\n\t\t\trc=1\n\t\tfi\n\telse\n\t\t# According to LSB, running \"stop\" on a service already stopped\n\t\t# or not running # should be considered successful.\n\t\techo \"$label ...\"\n\t\techo -n \"already stopped\"\n\t\trc_status -v\n\tfi\n\techo\n\treturn $rc\n}\n\n#\n# reload_fs\n# reload the configuration for the given filesystem\n#\nreload_fs()\n{\n\tlocal conf=$1\n\tlocal pidfile=$(pidfile_name $conf)\n\tif [ -z \"$RBH_OPT\" ]; then\n\t\tlocal cmdline=\"$DAEMON -d -f $conf -p $pidfile\"\n\telse\n\t\tlocal cmdline=\"$DAEMON $RBH_OPT -d -f $conf -p $pidfile\"\n\tfi\n\n\t# check if there is still some process\n\tif daemon_exist $pidfile \"$cmdline\" ; then\n\t\tlocal pid=$(<$pidfile)\n\n\t\techo \"Robinhood for $conf is running: process pid $pid\"\n\t\tif [ -d /proc/$pid ]; then\n\t                echo -n \"Reloading configuration for process $pid...\"\n                        killproc -p $pidfile -HUP $DAEMON\n                        rc_status -v\n\t\tfi\n\t\treturn 0\n\telse\n\t\techo \"Robinhood for $conf is not running\"\n\tfi\n\n}\n\n\n#\n# start action\n#\nstart()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tcheck_or_start_fs START $fs\n\tdone\n}\n\n#\n# stop action\n#\nstop()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tstop_fs $fs\n\tdone\n}\n\n#\n# status action\n#\nstatus()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\tcheck_or_start_fs CHECK $fs\n\tdone\n}\n\n#\n# reload action\n#\nreload()\n{\n\tlocal fs\n\tfor fs in $(build_fslist $@)\n\tdo\n\t\treload_fs $fs\n\tdone\n}\n\n# See how we were called.\ncase \"$1\" in\n  start)\n\tshift\n\tstart $@\n\tRETVAL=$?\n\t;;\n  stop)\n\tshift\n\tstop $@\n\tRETVAL=$?\n\t;;\n  status)\n\tshift\n\tstatus $@\n\tRETVAL=$?\n\t;;\n  reload)\n\tshift\n\treload $@\n\tRETVAL=$?\n\t;;\n  restart)\n\tshift\n\tstop $@\n\tstart $@\n\tRETVAL=$?\n\t;;\n  *)\n\techo \"Usage: $0 {start|stop|status|reload|restart}\"\n\texit 1\nesac\n\nexit $RETVAL\n"
  },
  {
    "path": "scripts/robinhood.service.in",
    "content": "[Unit]\nDescription=Robinhood server\n#only works if config file is unique\n\n[Service]\nType=simple\nKillMode=mixed\nEnvironmentFile=-@CONFDIR@/sysconfig/robinhood\nLimitNOFILE=8096\nExecStart=@SBINDIR@/robinhood $RBH_OPT\nExecReload=/bin/kill -HUP $MAINPID\n"
  },
  {
    "path": "scripts/robinhood@.service.in",
    "content": "[Unit]\nDescription=Robinhood server for %I\n\n[Service]\nType=simple\nKillMode=mixed\nEnvironmentFile=-@CONFDIR@/sysconfig/robinhood\nEnvironmentFile=-@CONFDIR@/sysconfig/robinhood.%I\nLimitNOFILE=8096\nExecStart=@SBINDIR@/robinhood $RBH_OPT -f @CONFDIR@/robinhood.d/%I.conf\nExecReload=/bin/kill -HUP $MAINPID\n"
  },
  {
    "path": "scripts/sanity-lhsm/repro_time.sh",
    "content": "#/!bin/bash\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nfunction error\n{\n    echo \"ERROR: $*\"\n    log_cleanup\n    exit 1\n}\n\nfunction copytool_setup\n{\n    # terminate previous copytool\n    curr_ct_pid=$(pgrep -f hsm_copytool_posix)\n    [ $? -ne 0 ] || kill $curr_ct_pid\n\n    # start a new one\n    hsm_copytool_posix --hsm_root=/tmp --archive 0 &\n    CT_PID=$!\n}\n\nfunction copytool_cleanup\n{\n    kill $CT_PID\n}\n\n\nfunction log_setup\n{\n    # ensures no other changelog reader is registered,\n    # so we only get non-acknowledged logs\n    for id in $(grep cl /proc/fs/lustre/mdd/lustre-MDT0000/changelog_users | \\\n\t\tawk '{print $1}'); do\n        lctl --device lustre-MDT0000 changelog_deregister $id\n    done\n   # changelog setup\n    lctl --device lustre-MDT0000 changelog_register \\\n        || error \"Cannot register changelog user\"\n    CLID=$(tail -n 1 /proc/fs/lustre/mdd/lustre-MDT0000/changelog_users | \\\n\t   awk '{print $1}')\n    echo \"changelog user id is $CLID\"\n    lctl set_param mdd.*.changelog_mask \"CREAT UNLNK TRUNC TIME HSM SATTR\" \\\n        || error \"Error setting changelog mask\"\n    # initial cleanup (to make sure there are no previous records)\n    lfs changelog_clear lustre $CLID 0\n}\n\nfunction log_cleanup\n{\n    # deregister changelog client after clearing its records\n    lfs changelog_clear lustre $CLID 0\n    lctl --device lustre-MDT0000 changelog_deregister $CLID\n}\n\nfunction test1\n{\n    echo \"1) create and write file\"\n    # write file\n    dd if=/dev/zero of=/mnt/lustre/file.1 bs=1M count=10\n\n    echo \"Log after create:\"\n    # read changelogs and clear all\n    lfs changelog lustre\n    echo \"lfs changelog_clear lustre $CLID 0\"\n    lfs changelog_clear lustre $CLID 0\n\n    echo \"2) archive file\"\n    # archive file\n    lfs hsm_archive /mnt/lustre/file.1\n\n    # wait for copy completion (wait for HSM event in changelog)\n    while (( 1 )); do\n        hsm_cnt=$(lfs changelog lustre | grep HSM | wc -l)\n        [ $hsm_cnt -eq 1 ] && break\n        sleep 0.1\n    done\n\n    echo \"3) read chglog and get file state\"\n    # for each changelog event, get entry state and clear the record\n    # (reproduces PolicyEngine behavior)\n    for i in `lfs changelog lustre | grep -v MARK | awk '{print $1}'`; do\n        line=$(lfs changelog lustre | egrep \"^$i \")\n        echo \"LOG RECORD: $line\"\n        FID=$(echo $line | awk '{print $6}' | cut -d '=' -f 2)\n        echo \"FID=$FID\"\n        # get state for this entry\n        lfs hsm_state \"/mnt/lustre/.lustre/fid/$FID\" > /dev/null \\\n            || error \"Cannot stat /mnt/lustre/.lustre/fid/$FID\"\n        # acknownledge the record\n        echo \"lfs changelog_clear lustre $CLID $i\"\n        lfs changelog_clear lustre $CLID $i\n    done\n\n    # at this point, the log should be empty...\n    echo \"last cleared log record is $i\"\n    echo \"Current log content:\"\n        lfs changelog lustre\n    echo \"==========================\"\n\n    echo \"4) Release\"\n\n    # now release the file\n    lfs hsm_release /mnt/lustre/file.1\n\n    echo \"Log content after hsm_release:\"\n    lfs changelog lustre\n    echo \"==========================\"\n\n}\n\nrm -rf /mnt/lustre/*\n\n#copytool_setup\nlog_setup\ntest1\nlog_cleanup\n#copytool_cleanup\n\nrm -rf /mnt/lustre/*\n"
  },
  {
    "path": "scripts/sanity-lhsm/rh-hsm.conf.in",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral {\n\tfs_path = \"/mnt/lustre\";\n}\n\n# ChangeLog Reader configuration\nChangeLog {\n    # 1 MDT block for each MDT:\n    MDT {\n        mdt_name  = \"MDT0000\" ;\n        # id returned by \"lctl changelog_register\" command\n        reader_id = \"@RH_CLID@\" ;\n    }\n    force_polling = ON;\n    polling_interval = 1s;\n}\n\nLog {\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = MAJOR;\n\n    stats_interval = 1min ;\n\n    # Log files\n    log_file = stderr;\n    report_file = stderr;\n    alert_file = /dev/null;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = \"@RH_DB@\";\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n\t}\n\n\t# if we want to test with SQLite DB\n    \tSQLite\n    \t{\n\t        db_file = \"/tmp/rh.sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n    \t}\n\n}\n\n######## Policies for this demo ###########\n\nmigration_policies\n{\n    policy default\n    {\n        # aggressive migr policy (2min after last mod)\n        condition\n        {\n            last_mod > 15s\n        }\n\n\t    archive_id = 1;\n    }\n}\n\n# aggressive migration: run every minute\nmigration_parameters\n{\n    runtime_interval = 15s;\n    backup_new_files = TRUE;\n    \n    check_copy_status_on_startup = TRUE;\n    check_copy_status_delay = 30min;\n\n}\n\n######## most basic space release policy ##########\n\npurge_policies\n{\n    # aggressive purge policy (30s after last access)\n    policy default\n    {\n        condition\n        {\n            last_access > 10s \n        }\n    }\n}\n\npurge_parameters\n{\n  post_purge_df_latency = 10s;\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\n# check every minute\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 50% ;\n    low_threshold_pct  = 45% ;\n    check_interval     = 10s ;\n}\n\n\nhsm_remove_policy\n{\n    # set this parameter to 'TRUE' for disabling HSM object removal\n    no_hsm_remove = FALSE;\n    # delay before impacting object removal in HSM\n    deferred_remove_delay = 1s;\n}\n\n"
  },
  {
    "path": "scripts/sanity-lhsm/sanity.sh",
    "content": "#/!bin/bash\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nfunction error\n{\n    echo \"ERROR: $*\"\n    rh_cleanup\n    exit 1\n}\n\nfunction check_cmd\n{\n    cmd=$1\n    which $cmd >/dev/null 2>/dev/null || error \"$cmd command is missing\"\n}\n\nfunction rh_prereq_check\n{\n   check_cmd \"mysqladmin\"\n   check_cmd \"mysql_config\"\n   (/sbin/service mysqld status | grep running >/dev/null 2>/dev/null) \\\n        || (pgrep mysqld >/dev/null) || error \"mysql daemon not running\"\n   check_cmd \"mysql\"\n}\n\nRH_DB=\"robinhood_sanity\"\nRH_CFG=\"rh-hsm.conf\"\nRH=../../src/robinhood/rh-hsm\n\nfunction rh_setup\n{\n   LOGIN=\"robinhood\"\n   PASS=\"robinhood\"\n   rh_prereq_check\n\n   # database setup\n   mysqladmin create $RH_DB || error \"Error creating robinhood DB\"\n   mysql $RH_DB > /dev/null << EOF\nGRANT USAGE ON $RH_DB.* TO '$LOGIN'@'localhost' IDENTIFIED BY '$PASS' ;\nGRANT ALL PRIVILEGES ON $RH_DB.* TO '$LOGIN'@'localhost' IDENTIFIED BY '$PASS' ;\nFLUSH PRIVILEGES;\nEOF\n    [ $? -eq 0 ] || error \"Error setting access rights for $LOGIN on db $RH_DB\"\n\n    mysql --user=$LOGIN --password=$PASS $RH_DB << EOF\nquit\nEOF\n    [ $? -eq 0 ] || error \"Error testing connection to database $RH_DB\"\n    echo \"robinhood db setup successful\"\n\n    # changelog setup\n    lctl --device lustre-MDT0000 changelog_register \\\n        || error \"Cannot register changelog user\"\n    RH_CLID=$(tail -n 1 /proc/fs/lustre/mdd/lustre-MDT0000/changelog_users | awk '{print $1}')\n    echo \"changelog user id for robinhood is $RH_CLID\"\n\n    lctl set_param mdd.*.changelog_mask \"CREAT UNLNK TRUNC TIME HSM SATTR\" \\\n        || error \"Error setting changelog mask\"\n\n    # initial cleanup\n    lfs changelog_clear lustre $RH_CLID 0\n\n    # generating config file\n    sed -e \"s/@RH_DB@/$RH_DB/\" $RH_CFG.in | sed -e \"s/@RH_CLID@/$RH_CLID/\" > $RH_CFG \\\n        || error \"Error creating robinhood config file\"\n\n    # initial scan (security)\n    $RH -f $RH_CFG --scan --once || error \"Error performing initial scan\"\n}\n\nfunction rh_cleanup\n{\n    # drop database\n    mysqladmin drop --force $RH_DB\n    # deregister changelog client\n    lfs changelog_clear lustre $RH_CLID 0\n    lctl --device lustre-MDT0000 changelog_deregister $RH_CLID\n}\n\nfunction test1\n{\n    # create new files in lustre\n    for i in $(seq 1 10); do\n       dd if=/dev/zero of=/mnt/lustre/file.$i bs=1M count=10\n    done\n    sleep 1\n    # read changelogs\n    $RH -f $RH_CFG --readlog --once || error \"Error reading events\"\n    # check their status in database\n    nb_new=$($RH-report -f $RH_CFG --dump-status=new -P \"/mnt/lustre\" | tee /dev/tty \\\n         | grep file| wc -l)\n    [ $nb_new -eq 10 ] || error \"10 new files expected, $nb_new found\"\n    # now archive them\n    $RH -f $RH_CFG --sync || error \"Error archiving files\"\n    nb_migr=$($RH-report -f $RH_CFG --dump-status=archiving -P \"/mnt/lustre\" |\\\n              tee /dev/tty | grep file| wc -l)\n    [ $nb_migr -eq 10 ] || error \"10 files copy running expected, $nb_migr found\"\n    # wait for archiving operations to complete (timeout=30s)\n    for timeo in $(seq 1 30); do\n    \t# check for HSM records (without clearing the log)\n        hsm_cnt=$(lfs changelog lustre | grep HSM | wc -l)\n        [ $hsm_cnt -ge 10 ] && break\n        sleep 1\n    done\n    hsm_cnt=$(lfs changelog lustre | grep HSM | wc -l)\n    [ $hsm_cnt -ge 10 ] || error \"timeout reached, not enough HSM events\"\n\n    # 'HSM' events should have been raised\n    $RH -f $RH_CFG --readlog --once || error \"Error reading events\"\n    nb_done=$($RH-report -f $RH_CFG --dump-status=sync -P \"/mnt/lustre\" \\\n              | tee /dev/tty | grep file| wc -l)\n    [ $nb_done -eq 10 ] || error \"10 archived files expected, $nb_done found\"\n\n    # now purge archived entries\n    $RH -f $RH_CFG --purge-fs=0 --ignore-policies || error \"Error purging entries\"\n\n    nb_released=$(find /mnt/lustre/ -type f -exec lfs hsm_state {} \\; | grep released | wc -l)\n    [ $nb_released -eq 10 ] || error \"10 released files expected, $nb_released found\"\n\n    sleep 1\n\n    # now test hsm-remove\n    rm -f /mnt/lustre/file.*\n    sleep 1\n\n    $RH -f $RH_CFG --readlog --once || error \"Error reading events\"\n    $RH-report -f $RH_CFG --deferred-rm --csv\n    sleep 2\n    # launch entry removal in HSM\n    $RH -f $RH_CFG --hsm-remove --once || error \"Error removing entries\"\n\n}\n\nrm -rf /mnt/lustre/*\n\nrh_setup\necho \"1- read chglog, archive, purge, remove\"\ntest1\nrh_cleanup\n\nrm -rf /mnt/lustre/*\n"
  },
  {
    "path": "scripts/sysconfig_robinhood.in",
    "content": "\n# directory of configuration files for this service\n#RH_CONF_DIR=/etc/robinhood.d/\n# current directory where the daemon is started\n#RH_RUN_DIR=/\n# path to robinhood command\n#DAEMON=/usr/sbin/robinhood\n\n# options for starting the daemon\n# eg. --scan --purge (leave empty for default actions).\nRBH_OPT=\"@INPUT_OPT@ --run=all\"\n"
  },
  {
    "path": "scripts/type_gen.pl",
    "content": "#!/usr/bin/env perl\n#\n# Process a definition file and generate a header with:\n# - entry_info_t\n# - field_infos array\n# - several defines (attribute index, attribute mask, ...)\n#\n#\n# usage: type_gen.pl <inputfile> <outputfile>\n#\n\nuse strict;\n\nif ( $#ARGV != 1 )\n{\n   print STDERR \"Usage: type_gen.pl <inputfile> <outputfile>\\n\";\n   exit -1;\n}\n\nmy $infile = $ARGV[0];\nmy $outfile = $ARGV[1];\n\nopen( INPUT, \"< $infile\" ) or die \"Could not open $infile\";\nopen( OUTPUT, \"> $outfile\" ) or die \"Could not open $outfile\";\n\n# 0: not yet initialized (only empty lines or comments expected)\n# 1: header section\n# 2: attr definition section\nmy $status=0;\nmy $line;\nmy $lineno=0;\n\nmy $next_index=0;\nmy %attrlist=();\n\nprint OUTPUT \"#ifndef _APP_TYPES_H\\n\";\nprint OUTPUT \"#define _APP_TYPES_H\\n\";\n\nprint OUTPUT \"\\n#include <stddef.h>\\n\";\n\nwhile ( $line = <INPUT> )\n{\n\t$lineno++;\n\n\tif ($line =~ m/^\\s*\\%header\\s*$/ )\n\t{\n\t  # beginning of a new section\n\t  $status = 1;\n\t}\n\telsif ($line =~ m/^\\s*\\%attrdef\\s*$/ )\n\t{\n\t  # beginning of a new section\n\t  $status = 2;\n\t}\n\telsif ( $status == 0 )\n\t{\n\t\tif ( ($line !~ m/^\\s*$/) && ($line !~ m/^\\s*#/) )\n\t\t{\n\t\t\tprint STDERR \"Unexepected line out of \\%header or \\%attrdef section: line $lineno: $line\\n\";\n\t\t\texit -1;\n\t\t}\n\t}\n\telsif ( $status == 1 )\n\t{\n\t\tprint OUTPUT $line;\n\t}\n\telsif ( $status == 2 )\n\t{\n\t\t# is this a comment or emptyline?\n\t\tnext if ( ($line =~ m/^\\s*$/) || ($line =~ m/^\\s*#/) );\n\n\t\tmy ($name,$ctype,$dbtype,$len,$flags,$gen_from,$gen_func);\n\n\t\t# parse the line\n\t\tif ( $line =~ m/^\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*([^,]+)$/ )\n\t\t{\n\t\t\t($name,$ctype,$dbtype,$len,$flags,$gen_from,$gen_func) = ($1,$2,$3,$4,$5,\"\",\"NULL\");\n\t\t}\n\t\telsif (  $line =~ m/^\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*([^,]+),\\s*(.*)$/ )\n\t\t{\n\t\t\t($name,$ctype,$dbtype,$len,$flags,$gen_from,$gen_func) = ($1,$2,$3,$4,$5,$6,$7);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tprint STDERR \"Bad format for attr definition: line $lineno: $line\\n\";\n                \texit -1;\n\t\t}\n\n\t\t# remove trailing blanks\n\t\t$name =~ s/\\s*$//;\n\t\t$ctype =~ s/\\s*$//;\n\t\t$dbtype =~ s/\\s*$//;\n\t\t$len =~ s/\\s*$//;\n\t\t$flags =~ s/\\s*$//;\n\t\t$gen_from =~ s/\\s*$//;\n\t\t$gen_func =~ s/\\s*$//;\n\n\t\tif ( $gen_from eq \"\" )\n\t\t{\n\t\t\t$gen_from = \"-1\";\n\t\t}\n\t\telse\n\t\t{\n\t\t\t$gen_from = \"ATTR_INDEX_\".$gen_from ;\n\t\t}\n\n\t\t${$attrlist{$next_index}}{name}=$name;\n\t\t${$attrlist{$next_index}}{ctype}=$ctype;\n\t\t${$attrlist{$next_index}}{dbtype}=$dbtype;\n\t\t${$attrlist{$next_index}}{len}=$len;\n\t\t${$attrlist{$next_index}}{flags}=$flags;\n\t\t${$attrlist{$next_index}}{gen_from}=$gen_from;\n\t\t${$attrlist{$next_index}}{gen_func}=$gen_func;\n\n\t\t$next_index ++;\n\n\t}\n}\n\n# printing data structures\nmy $index;\n\nprint OUTPUT \"\\ntypedef struct __entry_info__ \\n{\\n\";\n\nforeach $index (sort {0+$a <=> 0+$b}  keys %attrlist )\n{\n\tprint OUTPUT \"\\t\". ${$attrlist{$index}}{ctype} . \" \";\n\tprint OUTPUT \"\\t\". ${$attrlist{$index}}{name};\n\n\t# is len a number or a constant?\n\tif (${$attrlist{$index}}{len} !~ m/^[0-9]+/ )\n\t{\n\t\tprint OUTPUT \"[\".${$attrlist{$index}}{len}.\"];\\n\";\n\t}\n\telsif ( ${$attrlist{$index}}{len} > 0 )\n\t{\n\t\tprint OUTPUT \"[\".${$attrlist{$index}}{len}.\"];\\n\";\n\t}\n\telse\n\t{\n\t\tprint  OUTPUT \";\\n\";\n\t}\n}\n    # additional special field: list of status (size depends on policy definitions)\n    print OUTPUT \"\\tchar const **sm_status;\\n\";\n    # additional special fields: policy specific info\n    print OUTPUT \"\\tvoid **sm_info;\\n\";\n\nprint OUTPUT \"} entry_info_t;\\n\\n\";\n\n#print all defines (index and mask)\n\nforeach $index (sort {0+$a <=> 0+$b}  keys %attrlist )\n{\n\tprint OUTPUT \"#define ATTR_INDEX_\".${$attrlist{$index}}{name}.\" \\t$index\\n\";\n}\nprint OUTPUT \"\\n\";\nprint OUTPUT \"#define ATTR_COUNT \".$next_index.\"\\n\";\nprint OUTPUT \"\\n\";\nprint OUTPUT \"#if ATTR_COUNT > 32\\n\";\nprint OUTPUT \"#error \\\"Standard attribute index must fit in a 32bits mask\\\"\\n\";\nprint OUTPUT \"#endif\\n\";\nprint OUTPUT \"\\n\";\nforeach $index (sort {0+$a <=> 0+$b}  keys %attrlist )\n{\n#        my $mask_val = 1<<$index;\n\tprintf OUTPUT \"#define ATTR_MASK_\".${$attrlist{$index}}{name}.\" \\t(1LL << %u)\\n\", $index;\n}\n\nprint OUTPUT \"\\nstatic const field_info_t field_infos[]=\\n{\\n\";\nforeach $index (sort {0+$a <=> 0+$b}  keys %attrlist )\n{\n\tmy $lenprint=\"\";\n\t# is len a number or a constant?\n\tif (${$attrlist{$index}}{len} !~ m/^[0-9]+/ )\n\t{\n\t\t$lenprint = ${$attrlist{$index}}{len}.\"-1\";\n\t}\n\telse\n\t{\n\t\t$lenprint = ${$attrlist{$index}}{len}-1;\n\t}\n\n\tif ( $index != $next_index - 1 )\n\t{\n\t\tprint OUTPUT \"\\t{ \\\"\".${$attrlist{$index}}{name}.\"\\\", \\t\".\n\t\t\t     ${$attrlist{$index}}{dbtype}.\", \\t\".\n\t\t\t     $lenprint.\", \\t\".\n\t\t\t     ${$attrlist{$index}}{flags}.\n\t\t\t     \", offsetof(entry_info_t, \".\n\t\t\t     ${$attrlist{$index}}{name}.\") , \".\n\t\t\t     ${$attrlist{$index}}{gen_from}.\", \".\n\t\t\t     ${$attrlist{$index}}{gen_func}.\" }, \\n\";\n\t}\n\telse\n\t{\n\t\tprint OUTPUT \"\\t{ \\\"\".${$attrlist{$index}}{name}.\"\\\", \\t\".\n\t\t\t\t${$attrlist{$index}}{dbtype}.\", \\t\".\n\t\t\t\t$lenprint.\", \\t\".\n\t\t\t\t${$attrlist{$index}}{flags}.\n\t\t\t\t\", offsetof(entry_info_t, \".\n\t\t\t\t${$attrlist{$index}}{name}.\"), \".\n\t\t\t\t${$attrlist{$index}}{gen_from}.\", \".\n\t\t\t\t${$attrlist{$index}}{gen_func}.\" }\\n\";\n\t}\n\n}\n\nprint OUTPUT \"};\\n\";\nprint OUTPUT \"\\n#endif\\n\";\n"
  },
  {
    "path": "src/Makefile.am",
    "content": "SUBDIRS=include common list_mgr entry_processor fs_scan cfg_parsing modules policies\n\nif CHANGELOGS\nSUBDIRS+=chglog_reader\nendif\n\nSUBDIRS += robinhood tools tests\n\nindent:\n\tfor d in $(SUBDIRS); do \t\\\n\t\tmake -C $$d indent ; \t\\\n\tdone\n"
  },
  {
    "path": "src/cfg_parsing/Makefile.am",
    "content": "AM_YFLAGS\t\t    = --defines\nAM_LFLAGS\t\t    = -i\nAM_CFLAGS= -Wall -Werror $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nif DEBUG_PARSING\n# not supported with automake 1.12\n#AM_YFLAGS += --debug -v\nAM_CFLAGS += -DYYDEBUG=1\nendif\n\n# yacc relative depedencies\nconf_lex.l: conf_yacc.h\nconf_yacc.h: conf_yacc.c\n\n# cleaning FLEX/BISON generated files\n#CLEANFILES = conf_yacc.h conf_yacc.c conf_lex.c\n\nnoinst_LTLIBRARIES          = libconfigparsing.la librbhcfg.la\n\nlibconfigparsing_la_SOURCES = conf_yacc.y conf_yacc.h conf_lex.l analyze.c config_parsing.c analyze.h \\\n\t\t\t                  rbh_cfg_helpers.c rbh_boolexpr.c conf_yacc.c conf_lex.c\n\nlibrbhcfg_la_SOURCES        = rbh_cfg.c\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/cfg_parsing/analyze.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file    analyze.c\n * \\author  Thomas Leibovici, CEA/DAM\n * \\date    2008/07/04\n * \\brief   Building the syntax tree.\n */\n\n#include \"config.h\"\n#include \"analyze.h\"\n#include <stdlib.h>\n#include <stdio.h>\n#include <stdbool.h>\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\nextern int yylineno;\n\n/**\n *  create a list of items\n */\nlist_items *rh_config_CreateItemsList(void)\n{\n    list_items *new = (list_items *)calloc(1, sizeof(list_items));\n\n    (*new) = NULL;\n    return new;\n}\n\n/**\n *  Create a block item with the given content\n */\ngeneric_item *rh_config_CreateBlock(char *blockname, char *blockid,\n                                    list_items *list)\n{\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_BLOCK;\n    new->line = yylineno;\n\n    rh_strncpy(new->item.block.block_name, blockname, MAXSTRLEN);\n    if (blockid)\n        rh_strncpy(new->item.block.block_id, blockid, MAXSTRLEN);\n    else\n        new->item.block.block_id[0] = '\\0';\n\n    if (list) {\n        new->item.block.block_content = *list;\n        free(list);\n    } else\n        new->item.block.block_content = NULL;\n\n    new->next = NULL;\n\n    return new;\n\n}\n\n/**\n *  Add an item to a list as first element\n */\nvoid rh_config_AddItem(list_items *list, generic_item *item)\n{\n    if (!item)\n        fprintf(stderr, \"Item expected\\n\");\n\n    if ((*list) == NULL) {\n        (*list) = item;\n    } else {\n        item->next = (*list);\n        (*list) = item;\n    }\n}\n\n/**\n *  Create a key <op> value peer (boolean)\n */\ngeneric_item *rh_config_CreateKeyValueExpr(char *varname, operator_t op,\n                                           char *varval)\n{\n\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n    new->type = TYPE_BOOL_EXPR;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.bool_expr.type = BOOL_CONDITION;\n    new->item.bool_expr.oper = BOOL_OP_IDENTITY;\n    rh_strncpy(new->item.bool_expr.expr_u.key_value.varname, varname,\n               MAXSTRLEN);\n    rh_strncpy(new->item.bool_expr.expr_u.key_value.varvalue, varval,\n               MAXSTRLEN);\n    new->item.bool_expr.expr_u.key_value.op_type = op;\n    new->item.bool_expr.expr_u.key_value.arg_list = NULL;\n\n    return new;\n\n}\n\n/**\n *  Create a key=value peer (assignment or condition)\n */\ngeneric_item *rh_config_CreateAffect(char *varname, char *varval)\n{\n\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n    new->type = TYPE_AFFECT;\n    new->line = yylineno;\n    new->next = NULL;\n    rh_strncpy(new->item.affect.varname, varname, MAXSTRLEN);\n    rh_strncpy(new->item.affect.varvalue, varval, MAXSTRLEN);\n    new->item.affect.op_type = 0;\n    new->item.affect.arg_list = NULL;\n\n    return new;\n\n}\n\n/**\n * Create a block with a boolean expression\n */\ngeneric_item *rh_config_CreateBoolExpr(char *blockname, char *title,\n                                       generic_item *item)\n{\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_BLOCK;\n    new->line = yylineno;\n\n    rh_strncpy(new->item.block.block_name, blockname, MAXSTRLEN);\n\n    if (title)\n        rh_strncpy(new->item.block.block_id, title, MAXSTRLEN);\n    else\n        new->item.block.block_id[0] = '\\0';\n\n    new->item.block.block_content = item;\n\n    new->next = NULL;\n\n    return new;\n}\n\n/**\n * Create unary boolean expression from other boolean expression\n */\ngeneric_item *rh_config_CreateBoolExpr_Unary(bool_operator_t op,\n                                             generic_item *item)\n{\n    if (item->type != TYPE_BOOL_EXPR) {\n        // boolean expression expected\n        fprintf(stderr, \"Boolean expression expected\\n\");\n        return NULL;\n    }\n\n    if (op == BOOL_OP_IDENTITY)\n        return item;\n\n    if (op != BOOL_OP_NOT) {\n        /* unary operator expected */\n        fprintf(stderr, \"Unary boolean expression expected\\n\");\n        return NULL;\n    }\n\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_BOOL_EXPR;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.bool_expr.type = BOOL_UNARY;\n    new->item.bool_expr.oper = op;\n\n    /* create a bool expr and free the generic item */\n    new->item.bool_expr.expr_u.members.expr1 =\n        (type_bool_expr *)calloc(1, sizeof(type_bool_expr));\n    *new->item.bool_expr.expr_u.members.expr1 = item->item.bool_expr;\n    free(item);\n\n    new->item.bool_expr.expr_u.members.expr2 = NULL;\n\n    return new;\n}\n\n/**\n * Create binary boolean expression from 2 expressions\n */\ngeneric_item *rh_config_CreateBoolExpr_Binary(bool_operator_t op,\n                                              generic_item *expr1,\n                                              generic_item *expr2)\n{\n    if ((expr1->type != TYPE_BOOL_EXPR) || (expr2->type != TYPE_BOOL_EXPR)) {\n        // boolean expressions expected\n        fprintf(stderr, \"Boolean expression expected\\n\");\n        return NULL;\n    }\n\n    if (op != BOOL_OP_AND && op != BOOL_OP_OR) {\n        /* binary operator expected */\n        fprintf(stderr, \"Binary boolean expression expected\\n\");\n        return NULL;\n    }\n\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_BOOL_EXPR;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.bool_expr.type = BOOL_BINARY;\n    new->item.bool_expr.oper = op;\n\n    new->item.bool_expr.expr_u.members.expr1 =\n        (type_bool_expr *)calloc(1, sizeof(type_bool_expr));\n    new->item.bool_expr.expr_u.members.expr2 =\n        (type_bool_expr *)calloc(1, sizeof(type_bool_expr));\n    *new->item.bool_expr.expr_u.members.expr1 = expr1->item.bool_expr;\n    *new->item.bool_expr.expr_u.members.expr2 = expr2->item.bool_expr;\n    free(expr1);\n    free(expr2);\n\n    return new;\n\n}\n\ngeneric_item *rh_config_CreateSet(char *blockname, char *label,\n                                  generic_item *set)\n{\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_BLOCK;\n    new->line = yylineno;\n\n    rh_strncpy(new->item.block.block_name, blockname, MAXSTRLEN);\n\n    if (label)\n        rh_strncpy(new->item.block.block_id, label, MAXSTRLEN);\n    else\n        new->item.block.block_id[0] = '\\0';\n\n    new->item.block.block_content = set;\n\n    new->next = NULL;\n\n    return new;\n}\n\ngeneric_item *rh_config_CreateSet_Unary(set_operator_t op, generic_item *set)\n{\n    if (set->type != TYPE_SET) {\n        // sets expected\n        fprintf(stderr, \"Set is expected\\n\");\n        return NULL;\n    }\n\n    if (op != SET_OP_NOT) {\n        /* unary operator expected */\n        fprintf(stderr, \"Unary set operator expected (not)\\n\");\n        return NULL;\n    }\n\n    generic_item *new = calloc(1, sizeof(generic_item));\n    if (!new) {\n        fprintf(stderr, \"Not enough memory\\n\");\n        return NULL;\n    }\n\n    new->type = TYPE_SET;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.set.set_type = SET_NEGATION;\n\n    new->item.set.set_u.op.oper = op;\n\n    new->item.set.set_u.op.set1 = (type_set *)calloc(1, sizeof(type_set));\n    new->item.set.set_u.op.set2 = NULL;\n\n    if (new->item.set.set_u.op.set1 == NULL) {\n        free(new);\n        fprintf(stderr, \"Missing memory\\n\");\n        return NULL;\n    }\n\n    *new->item.set.set_u.op.set1 = set->item.set;\n\n    free(set);\n\n    return new;\n}\n\ngeneric_item *rh_config_CreateSet_Binary(set_operator_t op,\n                                         generic_item *set1,\n                                         generic_item *set2)\n{\n    if ((set1->type != TYPE_SET) || (set2->type != TYPE_SET)) {\n        // boolean setessions expected\n        fprintf(stderr, \"Sets are expected\\n\");\n        return NULL;\n    }\n\n    if (op != SET_OP_UNION && op != SET_OP_INTER) {\n        /* binary operator expected */\n        fprintf(stderr, \"Binary set operators expected (union, inter)\\n\");\n        return NULL;\n    }\n\n    generic_item *new = calloc(1, sizeof(generic_item));\n    if (!new) {\n        fprintf(stderr, \"Not enough memory\\n\");\n        return NULL;\n    }\n\n    new->type = TYPE_SET;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.set.set_type = SET_BINARY;\n\n    new->item.set.set_u.op.oper = op;\n\n    new->item.set.set_u.op.set1 = (type_set *)calloc(1, sizeof(type_set));\n    new->item.set.set_u.op.set2 = (type_set *)calloc(1, sizeof(type_set));\n\n    if ((new->item.set.set_u.op.set1 == NULL) ||\n        (new->item.set.set_u.op.set2 == NULL)) {\n        free(new);\n        fprintf(stderr, \"Missing memory\\n\");\n        return NULL;\n    }\n\n    *new->item.set.set_u.op.set1 = set1->item.set;\n    *new->item.set.set_u.op.set2 = set2->item.set;\n\n    free(set1);\n    free(set2);\n\n    return new;\n}\n\ngeneric_item *rh_config_CreateSet_Singleton(char *set_name)\n{\n    generic_item *new = (generic_item *)calloc(1, sizeof(generic_item));\n\n    new->type = TYPE_SET;\n    new->line = yylineno;\n    new->next = NULL;\n    new->item.set.set_type = SET_SINGLETON;\n    rh_strncpy(new->item.set.set_u.name, set_name, MAXSTRLEN);\n\n    return new;\n}\n\narg_list_t *rh_config_CreateArgList(void)\n{\n    arg_list_t *p_list = (arg_list_t *)calloc(1, sizeof(arg_list_t));\n    p_list->nb_args = 0;\n    p_list->args = NULL;\n    return p_list;\n}\n\nvoid rh_config_AddArg(arg_list_t *p_list, char *arg)\n{\n    if (p_list->args != NULL)\n        p_list->args =\n            (char **)realloc(p_list->args,\n                             (p_list->nb_args + 1) * sizeof(char *));\n    else\n        p_list->args = (char **)calloc(1, sizeof(char *));\n\n    p_list->nb_args++;\n    p_list->args[p_list->nb_args - 1] = (char *)malloc(strlen(arg) + 1);\n    strcpy(p_list->args[p_list->nb_args - 1], arg);\n}\n\nvoid rh_config_SetArglist(generic_item *item, arg_list_t *arglist)\n{\n    if (item->type == TYPE_BOOL_EXPR)\n        item->item.bool_expr.expr_u.key_value.arg_list = arglist;\n    else if (item->type == TYPE_AFFECT)\n        item->item.affect.arg_list = arglist;\n}\n\nstatic const char *op2str(operator_t op)\n{\n    switch (op) {\n    case OP_EQUAL:\n        return \"==\";\n    case OP_DIFF:\n        return \"!=\";\n    case OP_GT:\n        return \">\";\n    case OP_GT_EQ:\n        return \">=\";\n    case OP_LT:\n        return \"<\";\n    case OP_LT_EQ:\n        return \"<=\";\n    case OP_CMD:\n        return \":\";\n    default:\n        return \"\";\n    }\n}\n\nstatic void print_bool_expr(FILE *output, type_bool_expr *bool_expr)\n{\n    int i;\n\n    switch (bool_expr->type) {\n    case BOOL_CONDITION:\n        fprintf(output, \"%s\", bool_expr->expr_u.key_value.varname);\n        fprintf(output, \" %s \", op2str(bool_expr->expr_u.key_value.op_type));\n        fprintf(output, \"%s\", bool_expr->expr_u.key_value.varvalue);\n\n        if ((bool_expr->expr_u.key_value.arg_list != NULL)\n            && (bool_expr->expr_u.key_value.arg_list->nb_args > 0)) {\n            fprintf(output, \" (\");\n            for (i = 0; i < bool_expr->expr_u.key_value.arg_list->nb_args;\n                 i++) {\n                if (i == 0)\n                    fprintf(output, \"%s\",\n                            bool_expr->expr_u.key_value.arg_list->args[i]);\n                else\n                    fprintf(output, \", %s\",\n                            bool_expr->expr_u.key_value.arg_list->args[i]);\n            }\n            fprintf(output, \")\");\n        }\n        break;\n\n    case BOOL_UNARY:\n        if (bool_expr->oper == BOOL_OP_NOT)\n            fprintf(output, \"NOT (\");\n        else\n            fprintf(output, \"(\");\n\n        print_bool_expr(output, bool_expr->expr_u.members.expr1);\n        fprintf(output, \")\");\n        break;\n\n    case BOOL_BINARY:\n        fprintf(output, \"(\");\n\n        print_bool_expr(output, bool_expr->expr_u.members.expr1);\n\n        if (bool_expr->oper == BOOL_OP_AND)\n            fprintf(output, \") AND (\");\n        else if (bool_expr->oper == BOOL_OP_OR)\n            fprintf(output, \") OR (\");\n\n        print_bool_expr(output, bool_expr->expr_u.members.expr2);\n\n        fprintf(output, \")\");\n        break;\n    }\n}\n\nstatic void print_set(FILE *output, type_set *set)\n{\n    if (set->set_type == SET_SINGLETON)\n        fprintf(output, \"{%s}\", set->set_u.name);\n    else if (set->set_type == SET_NEGATION) {\n        fprintf(output, \"NOT (\");\n        print_set(output, set->set_u.op.set1);\n        fprintf(output, \")\");\n    } else {\n        fprintf(output, \"(\");\n        print_set(output, set->set_u.op.set1);\n        if (set->set_u.op.oper == SET_OP_UNION)\n            fprintf(output, \") UNION (\");\n        else if (set->set_u.op.oper == SET_OP_INTER)\n            fprintf(output, \") INTER (\");\n        print_set(output, set->set_u.op.set2);\n        fprintf(output, \")\");\n    }\n}\n\nunion {\n    char name[MAXSTRLEN];   /* for singleton set */\n    struct {\n        set_operator_t oper;\n        struct _type_set_ *set1;\n        struct _type_set_ *set2;\n    } op;   /* for union or insection set */\n} set_u;\n\n/**\n *  Displays the content of a list of blocks.\n */\nstatic void print_list_ident(FILE *output, list_items *list,\n                             unsigned int indent)\n{\n\n    generic_item *curr_item;\n    unsigned int i;\n\n    /* sanity check */\n    if (!list)\n        return;\n\n    curr_item = (*list);\n\n    while (curr_item) {\n\n        if (curr_item->type == TYPE_BLOCK) {\n            fprintf(output, \"%*s<BLOCK '%s'>\\n\", indent, \" \",\n                    curr_item->item.block.block_name);\n            print_list_ident(output, &curr_item->item.block.block_content,\n                             indent + 3);\n            fprintf(output, \"%*s</BLOCK '%s'>\\n\", indent, \" \",\n                    curr_item->item.block.block_name);\n        } else if (curr_item->type == TYPE_AFFECT) {\n            fprintf(output, \"%*s<AFFECT> %s='%s'\", indent, \" \",\n                    curr_item->item.affect.varname,\n                    curr_item->item.affect.varvalue);\n\n            if (curr_item->item.affect.arg_list) {\n                for (i = 0; i < curr_item->item.affect.arg_list->nb_args; i++) {\n                    if (i == 0)\n                        fprintf(output, \"('%s'\",\n                                curr_item->item.affect.arg_list->args[i]);\n                    else\n                        fprintf(output, \", '%s'\",\n                                curr_item->item.affect.arg_list->args[i]);\n                }\n                fprintf(output, \")\");\n            }\n            fprintf(output, \"</AFFECT>\\n\");\n\n        } else if (curr_item->type == TYPE_SET) {\n            fprintf(output, \"%*s\", indent, \" \");\n            /* class set */\n            print_set(output, &curr_item->item.set);\n            fprintf(output, \"\\n\");\n        } else if (curr_item->type == TYPE_BOOL_EXPR) {\n            fprintf(output, \"%*s\", indent, \" \");\n            /* boolean expression */\n            print_bool_expr(output, &curr_item->item.bool_expr);\n            fprintf(output, \"\\n\");\n        } else {\n            fprintf(output, \"/!\\\\ UNKNOWN ITEM TYPE %d\\n\", curr_item->type);\n        }\n\n        curr_item = curr_item->next;\n    }\n\n}\n\n/**\n *  Displays the content of a list of blocks.\n */\nvoid rh_config_print_list(FILE *output, list_items *list)\n{\n\n    print_list_ident(output, list, 0);\n\n}\n\nstatic void free_key_value(type_key_value *p_keyval)\n{\n    if (p_keyval->arg_list != NULL) {\n        int i;\n        if (p_keyval->arg_list->args) {\n            for (i = 0; i < p_keyval->arg_list->nb_args; i++)\n                free(p_keyval->arg_list->args[i]);\n\n            free(p_keyval->arg_list->args);\n        }\n        free(p_keyval->arg_list);\n        p_keyval->arg_list = NULL;\n    }\n}\n\nstatic void free_bool_expr_recurse(type_bool_expr *p_expr)\n{\n    if (p_expr->type == BOOL_CONDITION) {\n        free_key_value(&p_expr->expr_u.key_value);\n    } else if (p_expr->type == BOOL_UNARY) {\n        if (p_expr->expr_u.members.expr1) {\n            free_bool_expr_recurse(p_expr->expr_u.members.expr1);\n            free(p_expr->expr_u.members.expr1);\n        }\n\n    } else if (p_expr->type == BOOL_BINARY) {\n        if (p_expr->expr_u.members.expr1) {\n            free_bool_expr_recurse(p_expr->expr_u.members.expr1);\n            free(p_expr->expr_u.members.expr1);\n        }\n        if (p_expr->expr_u.members.expr2) {\n            free_bool_expr_recurse(p_expr->expr_u.members.expr2);\n            free(p_expr->expr_u.members.expr2);\n        }\n    }\n\n}\n\nstatic void free_set_recurse(type_set *p_set)\n{\n    if (p_set->set_type == SET_BINARY) {\n        if (p_set->set_u.op.set1) {\n            free_set_recurse(p_set->set_u.op.set1);\n            free(p_set->set_u.op.set1);\n        }\n        if (p_set->set_u.op.set2) {\n            free_set_recurse(p_set->set_u.op.set2);\n            free(p_set->set_u.op.set2);\n        }\n    } else if (p_set->set_type == SET_NEGATION) {\n        if (p_set->set_u.op.set1) {\n            free_set_recurse(p_set->set_u.op.set1);\n            free(p_set->set_u.op.set1);\n        }\n    }\n}\n\nstatic void free_list_items_recurse(list_items *list)\n{\n    generic_item *curr_item;\n    generic_item *next_item;\n\n    /* sanity check */\n    if (!list)\n        return;\n\n    curr_item = (*list);\n\n    while (curr_item) {\n\n        next_item = curr_item->next;\n\n        if (curr_item->type == TYPE_BLOCK) {\n            free_list_items_recurse(&curr_item->item.block.block_content);\n        } else if (curr_item->type == TYPE_BOOL_EXPR) {\n            free_bool_expr_recurse(&curr_item->item.bool_expr);\n        } else if (curr_item->type == TYPE_SET) {\n            free_set_recurse(&curr_item->item.set);\n        } else if (curr_item->type == TYPE_AFFECT) {\n            free_key_value(&curr_item->item.affect);\n        }\n        free(curr_item);\n        curr_item = next_item;\n\n    }\n    return;\n}\n\n/**\n * config_free_list:\n * Free ressources for a list\n */\nvoid rh_config_free_list(list_items *list)\n{\n\n    free_list_items_recurse(list);\n    free(list);\n    return;\n}\n\n/**\n * Resolve an environment variable.\n */\nvoid rh_config_resolv_var(char *dstvalue, char *var)\n{\n    char *val = getenv(var + 1);    /* skip '$' */\n    if (val == NULL) {\n        fprintf(stderr, \"WARNING: environment variable %s is not defined.\\n\",\n                var + 1);\n        dstvalue[0] = '\\0';\n    } else {\n        rh_strncpy(dstvalue, val, MAXSTRLEN);\n    }\n}\n"
  },
  {
    "path": "src/cfg_parsing/analyze.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file    analyze.h\n * \\author  $Author: leibovic $\n * \\date    $Date: 2008/07/04 07:57:35 $\n * \\version\t$Revision: 1.2 $\n * \\brief   Building the syntax tree.\n *\n * Build the structure that represents a config file.\n *\n */\n\n#ifndef CONFPARSER_H\n#define CONFPARSER_H\n\n#include <stdio.h>\n#include <glib.h>\n#include <stdbool.h>\n\n#ifndef rh_strncpy\n#define rh_strncpy(_s1, _s2, _sz) do { \\\n    if (_sz > 0) {                     \\\n        strncpy(_s1, _s2, _sz-1);      \\\n        (_s1)[_sz-1] = '\\0';           \\\n    }                                  \\\n} while (0)\n#endif\n\n#define MAXSTRLEN   1024\n\nextern GString *current_file;\n\n/* A program consists of several blocks,\n * each block consists of variables definitions\n * and subblocks.\n */\n\n/* forward declaration of generic item */\nstruct _generic_item_;\n\ntypedef enum {\n    TYPE_BLOCK,\n    TYPE_BOOL_EXPR,\n    TYPE_SET,\n    TYPE_AFFECT\n} type_item;\n\ntypedef enum {\n    OP_EQUAL,\n    OP_DIFF,\n    OP_GT,\n    OP_GT_EQ,\n    OP_LT,\n    OP_LT_EQ,\n    OP_CMD\n} operator_t;\n\ntypedef struct _arg_list_ {\n    unsigned int nb_args;\n    char **args;\n} arg_list_t;\n\ntypedef struct _type_key_value_ {\n    operator_t  op_type;\n    char        varname[MAXSTRLEN];\n    char        varvalue[MAXSTRLEN];\n    arg_list_t *arg_list;\n} type_key_value;\n\ntypedef struct _type_block_ {\n    char block_name[MAXSTRLEN];\n    char block_id[MAXSTRLEN];\n    struct _generic_item_ *block_content;\n} type_block;\n\ntypedef enum {\n    BOOL_CONDITION,\n    BOOL_UNARY,\n    BOOL_BINARY\n} expr_type_t;\n\ntypedef enum {\n    BOOL_OP_IDENTITY,\n    BOOL_OP_NOT,\n    BOOL_OP_AND,\n    BOOL_OP_OR\n} bool_operator_t;\n\ntypedef enum {\n    SET_OP_NOT,\n    SET_OP_UNION,\n    SET_OP_INTER\n} set_operator_t;\n\ntypedef struct _type_bool_expr_ {\n    expr_type_t type;\n    bool_operator_t oper;\n\n    union {\n        struct {\n            struct _type_bool_expr_ *expr1;\n            struct _type_bool_expr_ *expr2;\n        } members;\n        type_key_value key_value;\n    } expr_u;\n\n} type_bool_expr;\n\n/* describes unions/intersections of classes */\ntypedef struct _type_set_ {\n    enum { SET_SINGLETON, SET_NEGATION, SET_BINARY } set_type;\n\n    union {\n        char name[MAXSTRLEN];   /* for singleton set */\n        struct {\n            set_operator_t      oper;\n            struct _type_set_  *set1;\n            struct _type_set_  *set2;\n        } op;   /* for union or insection set */\n    } set_u;\n\n} type_set;\n\ntypedef struct _generic_item_ {\n    type_item type;\n\n    union {\n        type_block      block;\n        type_bool_expr  bool_expr;\n        type_key_value  affect;\n        type_set        set;\n    } item;\n\n    /* next item in the list */\n    struct _generic_item_ *next;\n\n    /* Line of this item */\n    unsigned int line;\n\n    /* was it read? */\n    bool is_read;\n\n} generic_item;\n\ntypedef generic_item *list_items;\n\n/**\n *  create a list of items\n */\nlist_items *rh_config_CreateItemsList(void);\n\n/**\n *  Create a block item with the given content\n */\ngeneric_item *rh_config_CreateBlock(char *blockname, char *blockid,\n                                    list_items *list);\n\n/**\n *  Create an affectation (key=value)\n */\ngeneric_item *rh_config_CreateAffect(char *varname, char *varval);\n\n/*  ------ Bool expr management functions -------- */\n\n/**\n *  Create a key <op> value peer (condition)\n */\ngeneric_item *rh_config_CreateKeyValueExpr(char *varname, operator_t op,\n                                           char *varval);\n\n/**\n * Create a block with a boolean expression\n */\ngeneric_item *rh_config_CreateBoolExpr(char *blockname, char *title,\n                                       generic_item *item);\n\n/**\n * Create unary boolean expression from key/value or other boolean expression\n */\ngeneric_item *rh_config_CreateBoolExpr_Unary(bool_operator_t op,\n                                             generic_item *item);\n\n/**\n * Create binary boolean expression from 2 expressions\n */\ngeneric_item *rh_config_CreateBoolExpr_Binary(bool_operator_t op,\n                                              generic_item *expr1,\n                                              generic_item *expr2);\n\n/*  ------ Sets management functions -------- */\n\ngeneric_item *rh_config_CreateSet(char *blockname, char *label,\n                                  generic_item *set);\ngeneric_item *rh_config_CreateSet_Unary(set_operator_t op, generic_item *set);\ngeneric_item *rh_config_CreateSet_Binary(set_operator_t op,\n                                         generic_item *set1,\n                                         generic_item *set2);\ngeneric_item *rh_config_CreateSet_Singleton(char *set_name);\n\n/*  ------ Arglist management functions -------- */\n\narg_list_t *rh_config_CreateArgList(void);\nvoid rh_config_AddArg(arg_list_t *arglist, char *arg);\nvoid rh_config_SetArglist(generic_item *item, arg_list_t *arglist);\n\n/**\n *  Add an item to a list\n */\nvoid rh_config_AddItem(list_items *list, generic_item *item);\n\n/**\n *  Displays the content of a list of blocks.\n */\nvoid rh_config_print_list(FILE *output, list_items *list);\n\n/**\n * config_free_list:\n * Free ressources for a list\n */\nvoid rh_config_free_list(list_items *list);\n\n/**\n * Resolve an environment variable.\n */\nvoid rh_config_resolv_var(char *dstvalue, char *var);\n\n#endif\n"
  },
  {
    "path": "src/cfg_parsing/conf_lex.c",
    "content": "\n#line 3 \"conf_lex.c\"\n\n#define  YY_INT_ALIGNED short int\n\n/* A lexical scanner generated by flex */\n\n#define FLEX_SCANNER\n#define YY_FLEX_MAJOR_VERSION 2\n#define YY_FLEX_MINOR_VERSION 6\n#define YY_FLEX_SUBMINOR_VERSION 1\n#if YY_FLEX_SUBMINOR_VERSION > 0\n#define FLEX_BETA\n#endif\n\n/* First, we deal with  platform-specific or compiler-specific issues. */\n\n/* begin standard C headers. */\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <stdlib.h>\n\n/* end standard C headers. */\n\n/* flex integer type definitions */\n\n#ifndef FLEXINT_H\n#define FLEXINT_H\n\n/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */\n\n#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L\n\n/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,\n * if you want the limit (max/min) macros for int types. \n */\n#ifndef __STDC_LIMIT_MACROS\n#define __STDC_LIMIT_MACROS 1\n#endif\n\n#include <inttypes.h>\ntypedef int8_t flex_int8_t;\ntypedef uint8_t flex_uint8_t;\ntypedef int16_t flex_int16_t;\ntypedef uint16_t flex_uint16_t;\ntypedef int32_t flex_int32_t;\ntypedef uint32_t flex_uint32_t;\n#else\ntypedef signed char flex_int8_t;\ntypedef short int flex_int16_t;\ntypedef int flex_int32_t;\ntypedef unsigned char flex_uint8_t; \ntypedef unsigned short int flex_uint16_t;\ntypedef unsigned int flex_uint32_t;\n\n/* Limits of integral types. */\n#ifndef INT8_MIN\n#define INT8_MIN               (-128)\n#endif\n#ifndef INT16_MIN\n#define INT16_MIN              (-32767-1)\n#endif\n#ifndef INT32_MIN\n#define INT32_MIN              (-2147483647-1)\n#endif\n#ifndef INT8_MAX\n#define INT8_MAX               (127)\n#endif\n#ifndef INT16_MAX\n#define INT16_MAX              (32767)\n#endif\n#ifndef INT32_MAX\n#define INT32_MAX              (2147483647)\n#endif\n#ifndef UINT8_MAX\n#define UINT8_MAX              (255U)\n#endif\n#ifndef UINT16_MAX\n#define UINT16_MAX             (65535U)\n#endif\n#ifndef UINT32_MAX\n#define UINT32_MAX             (4294967295U)\n#endif\n\n#endif /* ! C99 */\n\n#endif /* ! FLEXINT_H */\n\n/* TODO: this is always defined, so inline it */\n#define yyconst const\n\n#if defined(__GNUC__) && __GNUC__ >= 3\n#define yynoreturn __attribute__((__noreturn__))\n#else\n#define yynoreturn\n#endif\n\n/* Returned upon end-of-file. */\n#define YY_NULL 0\n\n/* Promotes a possibly negative, possibly signed char to an unsigned\n * integer for use as an array index.  If the signed char is negative,\n * we want to instead treat it as an 8-bit unsigned char, hence the\n * double cast.\n */\n#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)\n\n/* Enter a start condition.  This macro really ought to take a parameter,\n * but we do it the disgusting crufty way forced on us by the ()-less\n * definition of BEGIN.\n */\n#define BEGIN (yy_start) = 1 + 2 *\n\n/* Translate the current start state into a value that can be later handed\n * to BEGIN to return to the state.  The YYSTATE alias is for lex\n * compatibility.\n */\n#define YY_START (((yy_start) - 1) / 2)\n#define YYSTATE YY_START\n\n/* Action number for EOF rule of a given start state. */\n#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)\n\n/* Special action meaning \"start processing a new file\". */\n#define YY_NEW_FILE yyrestart(yyin  )\n\n#define YY_END_OF_BUFFER_CHAR 0\n\n/* Size of default input buffer. */\n#ifndef YY_BUF_SIZE\n#ifdef __ia64__\n/* On IA-64, the buffer size is 16k, not 8k.\n * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.\n * Ditto for the __ia64__ case accordingly.\n */\n#define YY_BUF_SIZE 32768\n#else\n#define YY_BUF_SIZE 16384\n#endif /* __ia64__ */\n#endif\n\n/* The state buf must be large enough to hold one state per character in the main buffer.\n */\n#define YY_STATE_BUF_SIZE   ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))\n\n#ifndef YY_TYPEDEF_YY_BUFFER_STATE\n#define YY_TYPEDEF_YY_BUFFER_STATE\ntypedef struct yy_buffer_state *YY_BUFFER_STATE;\n#endif\n\n#ifndef YY_TYPEDEF_YY_SIZE_T\n#define YY_TYPEDEF_YY_SIZE_T\ntypedef size_t yy_size_t;\n#endif\n\nextern int yyleng;\n\nextern FILE *yyin, *yyout;\n\n#define EOB_ACT_CONTINUE_SCAN 0\n#define EOB_ACT_END_OF_FILE 1\n#define EOB_ACT_LAST_MATCH 2\n\n    #define YY_LESS_LINENO(n)\n    #define YY_LINENO_REWIND_TO(ptr)\n    \n/* Return all but the first \"n\" matched characters back to the input stream. */\n#define yyless(n) \\\n\tdo \\\n\t\t{ \\\n\t\t/* Undo effects of setting up yytext. */ \\\n        yy_size_t yyless_macro_arg = (n); \\\n        YY_LESS_LINENO(yyless_macro_arg);\\\n\t\t*yy_cp = (yy_hold_char); \\\n\t\tYY_RESTORE_YY_MORE_OFFSET \\\n\t\t(yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \\\n\t\tYY_DO_BEFORE_ACTION; /* set up yytext again */ \\\n\t\t} \\\n\twhile ( 0 )\n\n#define unput(c) yyunput( c, (yytext_ptr)  )\n\n#ifndef YY_STRUCT_YY_BUFFER_STATE\n#define YY_STRUCT_YY_BUFFER_STATE\nstruct yy_buffer_state\n\t{\n\tFILE *yy_input_file;\n\n\tchar *yy_ch_buf;\t\t/* input buffer */\n\tchar *yy_buf_pos;\t\t/* current position in input buffer */\n\n\t/* Size of input buffer in bytes, not including room for EOB\n\t * characters.\n\t */\n\tint yy_buf_size;\n\n\t/* Number of characters read into yy_ch_buf, not including EOB\n\t * characters.\n\t */\n\tint yy_n_chars;\n\n\t/* Whether we \"own\" the buffer - i.e., we know we created it,\n\t * and can realloc() it to grow it, and should free() it to\n\t * delete it.\n\t */\n\tint yy_is_our_buffer;\n\n\t/* Whether this is an \"interactive\" input source; if so, and\n\t * if we're using stdio for input, then we want to use getc()\n\t * instead of fread(), to make sure we stop fetching input after\n\t * each newline.\n\t */\n\tint yy_is_interactive;\n\n\t/* Whether we're considered to be at the beginning of a line.\n\t * If so, '^' rules will be active on the next match, otherwise\n\t * not.\n\t */\n\tint yy_at_bol;\n\n    int yy_bs_lineno; /**< The line count. */\n    int yy_bs_column; /**< The column count. */\n    \n\t/* Whether to try to fill the input buffer when we reach the\n\t * end of it.\n\t */\n\tint yy_fill_buffer;\n\n\tint yy_buffer_status;\n\n#define YY_BUFFER_NEW 0\n#define YY_BUFFER_NORMAL 1\n\t/* When an EOF's been seen but there's still some text to process\n\t * then we mark the buffer as YY_EOF_PENDING, to indicate that we\n\t * shouldn't try reading from the input source any more.  We might\n\t * still have a bunch of tokens to match, though, because of\n\t * possible backing-up.\n\t *\n\t * When we actually see the EOF, we change the status to \"new\"\n\t * (via yyrestart()), so that the user can continue scanning by\n\t * just pointing yyin at a new input file.\n\t */\n#define YY_BUFFER_EOF_PENDING 2\n\n\t};\n#endif /* !YY_STRUCT_YY_BUFFER_STATE */\n\n/* Stack of input buffers. */\nstatic size_t yy_buffer_stack_top = 0; /**< index of top of stack. */\nstatic size_t yy_buffer_stack_max = 0; /**< capacity of stack. */\nstatic YY_BUFFER_STATE * yy_buffer_stack = NULL; /**< Stack as an array. */\n\n/* We provide macros for accessing buffer states in case in the\n * future we want to put the buffer states in a more general\n * \"scanner state\".\n *\n * Returns the top of the stack, or NULL.\n */\n#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \\\n                          ? (yy_buffer_stack)[(yy_buffer_stack_top)] \\\n                          : NULL)\n\n/* Same as previous macro, but useful when we know that the buffer stack is not\n * NULL or when we need an lvalue. For internal use only.\n */\n#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]\n\n/* yy_hold_char holds the character lost when yytext is formed. */\nstatic char yy_hold_char;\nstatic int yy_n_chars;\t\t/* number of characters read into yy_ch_buf */\nint yyleng;\n\n/* Points to current character in buffer. */\nstatic char *yy_c_buf_p = NULL;\nstatic int yy_init = 0;\t\t/* whether we need to initialize */\nstatic int yy_start = 0;\t/* start state number */\n\n/* Flag which is used to allow yywrap()'s to do buffer switches\n * instead of setting up a fresh yyin.  A bit of a hack ...\n */\nstatic int yy_did_buffer_switch_on_eof;\n\nvoid yyrestart (FILE *input_file  );\nvoid yy_switch_to_buffer (YY_BUFFER_STATE new_buffer  );\nYY_BUFFER_STATE yy_create_buffer (FILE *file,int size  );\nvoid yy_delete_buffer (YY_BUFFER_STATE b  );\nvoid yy_flush_buffer (YY_BUFFER_STATE b  );\nvoid yypush_buffer_state (YY_BUFFER_STATE new_buffer  );\nvoid yypop_buffer_state (void );\n\nstatic void yyensure_buffer_stack (void );\nstatic void yy_load_buffer_state (void );\nstatic void yy_init_buffer (YY_BUFFER_STATE b,FILE *file  );\n\n#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER )\n\nYY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size  );\nYY_BUFFER_STATE yy_scan_string (yyconst char *yy_str  );\nYY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len  );\n\nvoid *yyalloc (yy_size_t  );\nvoid *yyrealloc (void *,yy_size_t  );\nvoid yyfree (void *  );\n\n#define yy_new_buffer yy_create_buffer\n\n#define yy_set_interactive(is_interactive) \\\n\t{ \\\n\tif ( ! YY_CURRENT_BUFFER ){ \\\n        yyensure_buffer_stack (); \\\n\t\tYY_CURRENT_BUFFER_LVALUE =    \\\n            yy_create_buffer(yyin,YY_BUF_SIZE ); \\\n\t} \\\n\tYY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \\\n\t}\n\n#define yy_set_bol(at_bol) \\\n\t{ \\\n\tif ( ! YY_CURRENT_BUFFER ){\\\n        yyensure_buffer_stack (); \\\n\t\tYY_CURRENT_BUFFER_LVALUE =    \\\n            yy_create_buffer(yyin,YY_BUF_SIZE ); \\\n\t} \\\n\tYY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \\\n\t}\n\n#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)\n\n/* Begin user sect3 */\n\ntypedef unsigned char YY_CHAR;\n\nFILE *yyin = NULL, *yyout = NULL;\n\ntypedef int yy_state_type;\n\nextern int yylineno;\n\nint yylineno = 1;\n\nextern char *yytext;\n#ifdef yytext_ptr\n#undef yytext_ptr\n#endif\n#define yytext_ptr yytext\n\nstatic yy_state_type yy_get_previous_state (void );\nstatic yy_state_type yy_try_NUL_trans (yy_state_type current_state  );\nstatic int yy_get_next_buffer (void );\nstatic void yynoreturn yy_fatal_error (yyconst char* msg  );\n\n/* Done after the current pattern has been matched and before the\n * corresponding action - sets up yytext.\n */\n#define YY_DO_BEFORE_ACTION \\\n\t(yytext_ptr) = yy_bp; \\\n\tyyleng = (int) (yy_cp - yy_bp); \\\n\t(yy_hold_char) = *yy_cp; \\\n\t*yy_cp = '\\0'; \\\n\t(yy_c_buf_p) = yy_cp;\n\n#define YY_NUM_RULES 52\n#define YY_END_OF_BUFFER 53\n/* This struct is not used in this scanner,\n   but its presence is necessary. */\nstruct yy_trans_info\n\t{\n\tflex_int32_t yy_verify;\n\tflex_int32_t yy_nxt;\n\t};\nstatic yyconst flex_int16_t yy_accept[121] =\n    {   0,\n        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n        0,    0,    0,    0,    0,    0,    0,    0,   53,   51,\n       49,   50,   51,   51,   10,   51,   11,   34,   51,   32,\n       51,   34,   34,   33,   12,   14,   13,   21,   34,   35,\n       26,   30,   24,   21,   21,   21,   21,   21,   31,   22,\n       39,   39,   38,   37,   36,   44,   44,   43,   42,   41,\n       41,   40,    3,    7,    7,    6,    5,    4,    9,    9,\n        8,    0,   46,    0,   10,    0,   34,   29,    0,   45,\n       20,   34,   21,   34,   27,   28,   23,   25,   21,   21,\n       21,   17,   21,    0,    0,   48,   20,   34,   34,    0,\n\n       47,   16,   21,   15,   21,    0,   34,   21,   21,    0,\n       34,   19,   18,    0,   34,    0,   34,    1,    2,    0\n    } ;\n\nstatic yyconst YY_CHAR yy_ec[256] =\n    {   0,\n        1,    1,    1,    1,    1,    1,    1,    1,    2,    3,\n        1,    2,    2,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    2,    4,    5,    6,    7,    8,    1,    9,   10,\n       11,    1,    1,   12,   13,   14,   15,   13,   13,   13,\n       13,   13,   13,   13,   13,   13,   13,    1,   16,   17,\n       18,   19,    1,    1,   20,   14,   21,   22,   23,   14,\n       14,   14,   24,   14,   14,   25,   14,   26,   27,   14,\n       14,   28,   14,   29,   30,   14,   14,   14,   14,   14,\n        1,   31,    1,    1,   14,    1,   32,   14,   33,   34,\n\n       35,   14,   14,   14,   36,   14,   14,   37,   14,   38,\n       39,   14,   14,   40,   14,   41,   42,   14,   14,   14,\n       14,   14,   43,    1,   44,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,\n        1,    1,    1,    1,    1\n    } ;\n\nstatic yyconst YY_CHAR yy_meta[45] =\n    {   0,\n        1,    2,    2,    2,    2,    2,    1,    1,    2,    2,\n        2,    2,    3,    3,    1,    2,    2,    2,    2,    3,\n        3,    3,    3,    3,    3,    3,    3,    3,    3,    3,\n        1,    3,    3,    3,    3,    3,    3,    3,    3,    3,\n        3,    3,    2,    2\n    } ;\n\nstatic yyconst flex_uint16_t yy_base[137] =\n    {   0,\n        0,    2,    5,    0,   49,    0,   92,   96,  100,  102,\n      104,  110,  112,  116,  123,  127,  131,  133,  240,  341,\n      341,  341,  236,   72,    0,  190,  341,    0,  180,  341,\n      173,  130,   86,  341,  341,  341,  341,  140,  160,  341,\n      121,  141,  131,  171,  118,  124,  142,  126,  341,  341,\n      341,  341,  341,  341,  341,  341,  341,  341,  341,  341,\n      341,  341,  341,  341,  341,  341,  341,  341,  341,  341,\n      341,  143,  341,  127,    0,  139,    0,  341,  121,  341,\n      159,  131,  147,  208,  341,  341,  341,  341,  155,  158,\n      162,  157,  168,  195,  117,  341,  200,  200,  251,  113,\n\n      341,  196,  206,  197,  210,  205,  213,  238,  220,  217,\n      235,  245,  246,  251,  252,  252,  256,  341,    0,  341,\n      295,  298,  301,  304,  307,  310,  313,   97,  316,  319,\n      322,  325,  328,  331,  334,  337\n    } ;\n\nstatic yyconst flex_int16_t yy_def[137] =\n    {   0,\n      121,  121,  120,    3,  120,    5,  122,  122,  123,  123,\n      124,  124,  121,  121,  125,  125,  126,  126,  120,  120,\n      120,  120,  127,  120,  128,  120,  120,  129,  120,  120,\n      130,  131,  129,  120,  120,  120,  120,  132,  129,  120,\n      120,  120,  120,  132,   44,   44,   44,   44,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  127,  120,  120,  128,  133,  129,  120,  130,  120,\n      134,  129,   44,  135,  120,  120,  120,  120,   44,   44,\n       44,   44,   44,  120,  133,  120,  134,  129,  135,  136,\n\n      120,   44,   44,   44,   44,  120,  129,   44,   44,  120,\n      129,   44,   44,  120,  129,  120,  129,  120,  129,    0,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120\n    } ;\n\nstatic yyconst flex_uint16_t yy_nxt[386] =\n    {   0,\n      120,   21,   22,   21,   22,   20,   21,   22,   20,   20,\n       23,   20,   24,   20,   20,   20,   20,   20,   25,   26,\n       20,   20,   20,   20,   25,   25,   25,   25,   25,   25,\n       25,   25,   25,   25,   25,   20,   25,   25,   25,   25,\n       25,   25,   25,   25,   25,   25,   25,   27,   20,   28,\n       21,   22,   29,   30,   31,   32,   33,   34,   35,   36,\n       37,   28,   38,   39,   40,   41,   42,   43,   44,   38,\n       38,   38,   45,   38,   46,   47,   38,   38,   48,   28,\n       44,   38,   38,   38,   45,   38,   46,   47,   38,   38,\n       48,   49,   50,   52,   53,   74,   54,   52,   53,   75,\n\n       54,   57,   58,   57,   58,   61,   62,   74,   59,   82,\n       59,   61,   62,   21,   22,  101,   63,   21,   22,   96,\n       63,   82,   55,   80,   65,   66,   55,   67,   65,   66,\n       77,   67,   70,   71,   70,   71,   77,   77,   85,   86,\n       77,   96,   77,   90,   77,   73,   77,   77,   88,   83,\n       91,   93,   94,   68,   77,   90,   98,   68,   87,   77,\n       77,   83,   91,   93,   94,   77,   77,   83,   98,   92,\n       77,   77,   83,   77,   84,   80,  102,   77,   77,   83,\n       83,   92,   83,   83,   83,   77,  103,   83,  102,   77,\n      104,  105,   83,   83,   83,   83,   89,   78,  103,   83,\n\n       77,   77,  104,  105,   76,   83,   77,   77,   89,  100,\n      101,  100,  100,  100,   77,  106,  100,  100,  100,  100,\n      107,   83,   83,  100,  100,  100,  100,  106,  108,  110,\n       77,   83,  107,   83,   83,   83,  109,  111,   73,  120,\n      108,  110,  120,   83,  120,  113,  114,   83,  109,  111,\n      100,  100,  100,  101,  100,  100,  100,  113,  114,  100,\n      100,  100,  100,   83,  115,  112,  100,  100,  100,  100,\n       83,   83,  116,  117,  118,   83,  115,  112,  119,  120,\n      120,  120,   83,   83,  116,  117,  118,  120,  120,  120,\n      119,  120,  120,  100,  100,   20,   20,   20,   51,   51,\n\n       51,   56,   56,   56,   60,   60,   60,   64,   64,   64,\n       69,   69,   69,   72,   72,   72,   77,  120,   77,   79,\n       79,   79,   81,  120,   81,   83,  120,   83,   95,   95,\n       95,   97,  120,   97,   99,   99,   99,  100,  100,  100,\n       19,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120\n    } ;\n\nstatic yyconst flex_int16_t yy_chk[386] =\n    {   0,\n        0,    1,    1,    2,    2,    3,    3,    3,    3,    3,\n        3,    3,    3,    3,    3,    3,    3,    3,    3,    3,\n        3,    3,    3,    3,    3,    3,    3,    3,    3,    3,\n        3,    3,    3,    3,    3,    3,    3,    3,    3,    3,\n        3,    3,    3,    3,    3,    3,    3,    3,    3,    5,\n        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,\n        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,\n        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,\n        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,\n        5,    5,    5,    7,    7,   24,    7,    8,    8,  128,\n\n        8,    9,    9,   10,   10,   11,   11,   24,    9,   33,\n       10,   12,   12,   13,   13,  100,   13,   14,   14,   95,\n       14,   33,    7,   79,   15,   15,    8,   15,   16,   16,\n       32,   16,   17,   17,   18,   18,   32,   32,   41,   41,\n       38,   76,   32,   45,   32,   72,   38,   38,   43,   46,\n       46,   48,   74,   15,   38,   45,   82,   16,   42,   81,\n       32,   46,   46,   48,   74,   81,   81,   47,   82,   47,\n       38,   44,   83,   81,   39,   31,   89,   44,   44,   47,\n       89,   47,   92,   90,   83,   44,   90,   91,   89,   81,\n       91,   93,   89,   93,   92,   90,   44,   29,   90,   91,\n\n       97,   44,   91,   93,   26,   93,   97,   97,   44,   84,\n       84,   84,   84,   84,   97,   94,   84,   84,   84,   84,\n       98,  102,  104,   84,   84,   84,   84,   94,  103,  106,\n       97,  103,   98,  102,  104,  105,  105,  107,   23,   19,\n      103,  106,    0,  103,    0,  109,  110,  105,  105,  107,\n       84,   84,   99,   99,   99,   99,   99,  109,  110,   99,\n       99,   99,   99,  108,  111,  108,   99,   99,   99,   99,\n      112,  113,  114,  115,  116,  108,  111,  108,  117,    0,\n        0,    0,  112,  113,  114,  115,  116,    0,    0,    0,\n      117,    0,    0,   99,   99,  121,  121,  121,  122,  122,\n\n      122,  123,  123,  123,  124,  124,  124,  125,  125,  125,\n      126,  126,  126,  127,  127,  127,  129,    0,  129,  130,\n      130,  130,  131,    0,  131,  132,    0,  132,  133,  133,\n      133,  134,    0,  134,  135,  135,  135,  136,  136,  136,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120,  120,  120,  120,  120,  120,\n      120,  120,  120,  120,  120\n    } ;\n\nstatic yy_state_type yy_last_accepting_state;\nstatic char *yy_last_accepting_cpos;\n\nextern int yy_flex_debug;\nint yy_flex_debug = 0;\n\n/* The intent behind this definition is that it'll catch\n * any uses of REJECT which flex missed.\n */\n#define REJECT reject_used_but_not_detected\n#define yymore() yymore_used_but_not_detected\n#define YY_MORE_ADJ 0\n#define YY_RESTORE_YY_MORE_OFFSET\nchar *yytext;\n#line 1 \"conf_lex.l\"\n#line 2 \"conf_lex.l\"\n/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#include \"config.h\"\n#include \"analyze.h\"\n#include \"conf_yacc.h\"\n\n#include <stdio.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <libgen.h>\n#include <glib.h>\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\n/* current line number */\nint yylineno;\n\n/* levels of brackets nbr */\nint accolades;\n\n/* level of parenthesis nbr */\nint parenthesis;\n\n/* Traitement des messages d'erreur */\nvoid set_error(const char *s);\n\n#define ERRLEN 1024\nchar err_str[ERRLEN]=\"\";\n\n/* Stockage des chaines\n*/\nGString *YY_PARSED_STRING;\n\nvoid YY_BUFFER_APPEND(const char *s) {\n    g_string_append(YY_PARSED_STRING, s);\n}\n\nvoid YY_BUFFER_RESET(void) {\n    g_string_set_size(YY_PARSED_STRING, 0);\n}\n\n/* includes management */\n#define FILE_LEN 1024\nGString *current_file;\n\n#define MAX_INCLUDE_DEPTH  10\nYY_BUFFER_STATE include_stack[MAX_INCLUDE_DEPTH];\nint include_prev_state;\n\n/* keep track of filenames and line numbers */\nunsigned int lines_stack[MAX_INCLUDE_DEPTH];\nGString *files_stack[MAX_INCLUDE_DEPTH];\n\nint include_stack_index = 0;\n\n\n/* initialisation du parser */\n#define YY_USER_INIT {          \\\n    unsigned int i;             \\\n    yylineno = 1;               \\\n    accolades = 0;              \\\n    parenthesis = 0;            \\\n    include_stack_index = 0;    \\\n    for (i = 0; i < MAX_INCLUDE_DEPTH; i++) { \\\n        lines_stack[i] = 0;     \\\n        if (files_stack[i] == NULL) \\\n            files_stack[i] = g_string_sized_new(FILE_LEN); \\\n    }                           \\\n    if (YY_PARSED_STRING == NULL) YY_PARSED_STRING = g_string_sized_new(MAXSTRLEN); \\\n    if (current_file == NULL) current_file = g_string_sized_new(FILE_LEN); \\\n    BEGIN YY_INIT; \\\n}\n\n#ifdef _DEBUG_PARSING\n#define DEBUG_LEX   printf\n#else\n/* do nothing */\nstatic void DEBUG_LEX( char * format, ... ) { return ; }\n#endif\n\n\n/* These functions are defined by bison/yacc but not used, which\n * causes a compiler warning. */\nstatic void yyunput(int c, char *buf_ptr) __attribute__((unused));\nstatic int input(void) __attribute__((unused));\n\n\n/* lettre posant probleme dans une chaine */\n/* comment est compose un identifiant */\n/* INCLUDE state is used for picking the name of the include file */\n\n#line 682 \"conf_lex.c\"\n\n#define INITIAL 0\n#define YY_INIT 1\n#define INBLOC 2\n#define STRING1 3\n#define STRING2 4\n#define ESC1 5\n#define INCLUDE 6\n#define INCL_STRING 7\n#define INCL_ESC 8\n\n#ifndef YY_NO_UNISTD_H\n/* Special case for \"unistd.h\", since it is non-ANSI. We include it way\n * down here because we want the user's section 1 to have been scanned first.\n * The user has a chance to override it with an option.\n */\n#include <unistd.h>\n#endif\n\n#ifndef YY_EXTRA_TYPE\n#define YY_EXTRA_TYPE void *\n#endif\n\nstatic int yy_init_globals (void );\n\n/* Accessor methods to globals.\n   These are made visible to non-reentrant scanners for convenience. */\n\nint yylex_destroy (void );\n\nint yyget_debug (void );\n\nvoid yyset_debug (int debug_flag  );\n\nYY_EXTRA_TYPE yyget_extra (void );\n\nvoid yyset_extra (YY_EXTRA_TYPE user_defined  );\n\nFILE *yyget_in (void );\n\nvoid yyset_in  (FILE * _in_str  );\n\nFILE *yyget_out (void );\n\nvoid yyset_out  (FILE * _out_str  );\n\n\t\t\tint yyget_leng (void );\n\nchar *yyget_text (void );\n\nint yyget_lineno (void );\n\nvoid yyset_lineno (int _line_number  );\n\n/* Macros after this point can all be overridden by user definitions in\n * section 1.\n */\n\n#ifndef YY_SKIP_YYWRAP\n#ifdef __cplusplus\nextern \"C\" int yywrap (void );\n#else\nextern int yywrap (void );\n#endif\n#endif\n\n#ifndef YY_NO_UNPUT\n    \n    static void yyunput (int c,char *buf_ptr  );\n    \n#endif\n\n#ifndef yytext_ptr\nstatic void yy_flex_strncpy (char *,yyconst char *,int );\n#endif\n\n#ifdef YY_NEED_STRLEN\nstatic int yy_flex_strlen (yyconst char * );\n#endif\n\n#ifndef YY_NO_INPUT\n\n#ifdef __cplusplus\nstatic int yyinput (void );\n#else\nstatic int input (void );\n#endif\n\n#endif\n\n/* Amount of stuff to slurp up with each read. */\n#ifndef YY_READ_BUF_SIZE\n#ifdef __ia64__\n/* On IA-64, the buffer size is 16k, not 8k */\n#define YY_READ_BUF_SIZE 16384\n#else\n#define YY_READ_BUF_SIZE 8192\n#endif /* __ia64__ */\n#endif\n\n/* Copy whatever the last rule matched to the standard output. */\n#ifndef ECHO\n/* This used to be an fputs(), but since the string might contain NUL's,\n * we now use fwrite().\n */\n#define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0)\n#endif\n\n/* Gets input and stuffs it into \"buf\".  number of characters read, or YY_NULL,\n * is returned in \"result\".\n */\n#ifndef YY_INPUT\n#define YY_INPUT(buf,result,max_size) \\\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \\\n\t\t{ \\\n\t\tint c = '*'; \\\n\t\tint n; \\\n\t\tfor ( n = 0; n < max_size && \\\n\t\t\t     (c = getc( yyin )) != EOF && c != '\\n'; ++n ) \\\n\t\t\tbuf[n] = (char) c; \\\n\t\tif ( c == '\\n' ) \\\n\t\t\tbuf[n++] = (char) c; \\\n\t\tif ( c == EOF && ferror( yyin ) ) \\\n\t\t\tYY_FATAL_ERROR( \"input in flex scanner failed\" ); \\\n\t\tresult = n; \\\n\t\t} \\\n\telse \\\n\t\t{ \\\n\t\terrno=0; \\\n\t\twhile ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \\\n\t\t\t{ \\\n\t\t\tif( errno != EINTR) \\\n\t\t\t\t{ \\\n\t\t\t\tYY_FATAL_ERROR( \"input in flex scanner failed\" ); \\\n\t\t\t\tbreak; \\\n\t\t\t\t} \\\n\t\t\terrno=0; \\\n\t\t\tclearerr(yyin); \\\n\t\t\t} \\\n\t\t}\\\n\\\n\n#endif\n\n/* No semi-colon after return; correct usage is to write \"yyterminate();\" -\n * we don't want an extra ';' after the \"return\" because that will cause\n * some compilers to complain about unreachable statements.\n */\n#ifndef yyterminate\n#define yyterminate() return YY_NULL\n#endif\n\n/* Number of entries by which start-condition stack grows. */\n#ifndef YY_START_STACK_INCR\n#define YY_START_STACK_INCR 25\n#endif\n\n/* Report a fatal error. */\n#ifndef YY_FATAL_ERROR\n#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )\n#endif\n\n/* end tables serialization structures and prototypes */\n\n/* Default declaration of generated scanner - a define so the user can\n * easily add parameters.\n */\n#ifndef YY_DECL\n#define YY_DECL_IS_OURS 1\n\nextern int yylex (void);\n\n#define YY_DECL int yylex (void)\n#endif /* !YY_DECL */\n\n/* Code executed at the beginning of each rule, after yytext and yyleng\n * have been set up.\n */\n#ifndef YY_USER_ACTION\n#define YY_USER_ACTION\n#endif\n\n/* Code executed at the end of each rule. */\n#ifndef YY_BREAK\n#define YY_BREAK /*LINTED*/break;\n#endif\n\n#define YY_RULE_SETUP \\\n\tYY_USER_ACTION\n\n/** The main scanner function which does all the work.\n */\nYY_DECL\n{\n\tyy_state_type yy_current_state;\n\tchar *yy_cp, *yy_bp;\n\tint yy_act;\n    \n\tif ( !(yy_init) )\n\t\t{\n\t\t(yy_init) = 1;\n\n#ifdef YY_USER_INIT\n\t\tYY_USER_INIT;\n#endif\n\n\t\tif ( ! (yy_start) )\n\t\t\t(yy_start) = 1;\t/* first start state */\n\n\t\tif ( ! yyin )\n\t\t\tyyin = stdin;\n\n\t\tif ( ! yyout )\n\t\t\tyyout = stdout;\n\n\t\tif ( ! YY_CURRENT_BUFFER ) {\n\t\t\tyyensure_buffer_stack ();\n\t\t\tYY_CURRENT_BUFFER_LVALUE =\n\t\t\t\tyy_create_buffer(yyin,YY_BUF_SIZE );\n\t\t}\n\n\t\tyy_load_buffer_state( );\n\t\t}\n\n\t{\n#line 120 \"conf_lex.l\"\n\n\n#line 911 \"conf_lex.c\"\n\n\twhile ( /*CONSTCOND*/1 )\t\t/* loops until end-of-file is reached */\n\t\t{\n\t\tyy_cp = (yy_c_buf_p);\n\n\t\t/* Support of yytext. */\n\t\t*yy_cp = (yy_hold_char);\n\n\t\t/* yy_bp points to the position in yy_ch_buf of the start of\n\t\t * the current run.\n\t\t */\n\t\tyy_bp = yy_cp;\n\n\t\tyy_current_state = (yy_start);\nyy_match:\n\t\tdo\n\t\t\t{\n\t\t\tYY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;\n\t\t\tif ( yy_accept[yy_current_state] )\n\t\t\t\t{\n\t\t\t\t(yy_last_accepting_state) = yy_current_state;\n\t\t\t\t(yy_last_accepting_cpos) = yy_cp;\n\t\t\t\t}\n\t\t\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t\t\t{\n\t\t\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\t\t\tif ( yy_current_state >= 121 )\n\t\t\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t\t\t}\n\t\t\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (flex_int16_t) yy_c];\n\t\t\t++yy_cp;\n\t\t\t}\n\t\twhile ( yy_base[yy_current_state] != 341 );\n\nyy_find_action:\n\t\tyy_act = yy_accept[yy_current_state];\n\t\tif ( yy_act == 0 )\n\t\t\t{ /* have to back up */\n\t\t\tyy_cp = (yy_last_accepting_cpos);\n\t\t\tyy_current_state = (yy_last_accepting_state);\n\t\t\tyy_act = yy_accept[yy_current_state];\n\t\t\t}\n\n\t\tYY_DO_BEFORE_ACTION;\n\ndo_action:\t/* This label is used only to access EOF actions. */\n\n\t\tswitch ( yy_act )\n\t{ /* beginning of action switch */\n\t\t\tcase 0: /* must back up */\n\t\t\t/* undo the effects of YY_DO_BEFORE_ACTION */\n\t\t\t*yy_cp = (yy_hold_char);\n\t\t\tyy_cp = (yy_last_accepting_cpos);\n\t\t\tyy_current_state = (yy_last_accepting_state);\n\t\t\tgoto yy_find_action;\n\ncase 1:\nYY_RULE_SETUP\n#line 122 \"conf_lex.l\"\n{/* Start reading name of included file */\n                        DEBUG_LEX(\"INCLUDE\\n\");\n                        BEGIN INCLUDE;\n                        include_prev_state = YY_INIT;\n                        /* not a token, return nothing */\n                     }\n\tYY_BREAK\ncase 2:\nYY_RULE_SETUP\n#line 129 \"conf_lex.l\"\n{/* Start reading name of included file */\n                        DEBUG_LEX(\"INCLUDE\\n\");\n                        BEGIN INCLUDE;\n                        include_prev_state = INBLOC;\n                        /* not a token, return nothing */\n                    }\n\tYY_BREAK\ncase 3:\nYY_RULE_SETUP\n#line 136 \"conf_lex.l\"\n{ /* start include file name */\n                      BEGIN INCL_STRING;\n                      DEBUG_LEX(\"file:<\");\n                      YY_BUFFER_RESET();\n                    }\n\tYY_BREAK\ncase 4:\nYY_RULE_SETUP\n#line 142 \"conf_lex.l\"\n{BEGIN INCL_ESC;}\n\tYY_BREAK\ncase 5:\nYY_RULE_SETUP\n#line 144 \"conf_lex.l\"\n{ /* include file read */\n                        unsigned int i;\n                        GString *new_file_path;\n                        DEBUG_LEX(\">\");\n\n                        if ( include_stack_index >= MAX_INCLUDE_DEPTH )\n                        {\n                           /* error */\n                           snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: includes nested too deeply\",current_file->str, yylineno);\n                           set_error(err_str);\n                           return _ERROR_;\n                        }\n\n                        /* replace environment variables */\n                        if (YY_PARSED_STRING->str[0] == '$')\n                        {\n                            /* included file is an environment variable */\n                            char *val = getenv(YY_PARSED_STRING->str + 1); /* skip '$' */\n                            if (val == NULL)\n                            {\n                               snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: environment variable '%s' is not set\",\n                                        current_file->str, yylineno, YY_PARSED_STRING->str + 1);\n                               set_error(err_str);\n                               return _ERROR_;\n                            }\n                            else if (strlen(val) >= MAXSTRLEN)\n                            {\n                               snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: \"\n                                        \"file name too long in include statement\",\n                                        current_file->str, yylineno);\n                               set_error(err_str);\n                               return _ERROR_;\n                            }\n                            g_string_assign(YY_PARSED_STRING, val);\n                        }\n\n                        include_stack[include_stack_index] = YY_CURRENT_BUFFER;\n                        lines_stack[include_stack_index] = yylineno;\n                        g_string_assign(files_stack[include_stack_index], current_file->str);\n\n                        /* relative path management */\n                        new_file_path = g_string_sized_new(FILE_LEN);\n\n                        /* 1) if the new path is absolute, nothing to do\n                         * 2) if there was no '/' in previous dir, the new path\n                         *  is relative to the current dir.\n                         */\n                        if ((YY_PARSED_STRING->str[0] == '/')\n                            || (strchr(current_file->str, '/') == NULL))\n                        {\n                            g_string_assign(new_file_path, YY_PARSED_STRING->str);\n                        }\n                        else\n                        {\n                            /* in any other case, path is relative to the current config file\n                             * directory */\n                            GString *tmp_buf;\n                            char *path;\n\n                            tmp_buf = g_string_new(current_file->str);\n\n                            path = dirname(tmp_buf->str);\n\n                            g_string_printf(new_file_path, \"%s/%s\", path, YY_PARSED_STRING->str);\n                            g_string_free(tmp_buf, TRUE);\n                        }\n\n                        /* loop detection */\n\n                        for ( i = 0; i <= include_stack_index; i++ )\n                        {\n                            if (!strcmp(files_stack[i]->str, new_file_path->str))\n                            {\n                               snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: include loop detected: \\\"%s\\\" already parsed\",\n                                        current_file->str, yylineno, new_file_path->str);\n                               set_error(err_str);\n                               g_string_free(new_file_path, TRUE);\n                               return _ERROR_;\n                            }\n                        }\n\n                        include_stack_index ++;\n\n                        yyin = fopen(new_file_path->str, \"r\");\n\n                        if ( yyin == NULL )\n                        {\n                           /* error */\n                           snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: error %d opening file \\\"%s\\\": %s\",\n                                    current_file->str, yylineno,\n                                    errno, new_file_path->str, strerror(errno));\n                           set_error(err_str);\n                           g_string_free(new_file_path, TRUE);\n                           return _ERROR_;\n                        }\n\n                        yylineno = 1;\n                        g_string_assign(current_file, new_file_path->str);\n                        g_string_free(new_file_path, TRUE);\n\n                        /* change current buffer */\n                        yy_switch_to_buffer(yy_create_buffer(yyin,YY_BUF_SIZE ) );\n\n                        /* next state depends on the state before %include is met */\n                        BEGIN include_prev_state;\n                    }\n\tYY_BREAK\ncase 6:\n/* rule 6 can match eol */\nYY_RULE_SETUP\n#line 252 \"conf_lex.l\"\n{\n                            snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: missing closing quote.\",current_file->str, yylineno);\n                            set_error(err_str);\n                            yylineno++;\n                            return _ERROR_;\n                     }\n\tYY_BREAK\ncase 7:\nYY_RULE_SETUP\n#line 259 \"conf_lex.l\"\n{YY_BUFFER_APPEND(yytext); DEBUG_LEX(\"%c\",*yytext);/* caractere du fichier */}\n\tYY_BREAK\ncase 8:\n/* rule 8 can match eol */\nYY_RULE_SETUP\n#line 261 \"conf_lex.l\"\n{BEGIN INCL_STRING; yylineno++;}/* ignore un saut de ligne echappe*/\n\tYY_BREAK\ncase 9:\nYY_RULE_SETUP\n#line 262 \"conf_lex.l\"\n{DEBUG_LEX(\"%c\",*yytext);YY_BUFFER_APPEND(yytext);BEGIN INCL_STRING;/* caractere du fichier */}\n\tYY_BREAK\ncase YY_STATE_EOF(INITIAL):\ncase YY_STATE_EOF(YY_INIT):\ncase YY_STATE_EOF(INBLOC):\ncase YY_STATE_EOF(STRING1):\ncase YY_STATE_EOF(STRING2):\ncase YY_STATE_EOF(ESC1):\ncase YY_STATE_EOF(INCLUDE):\ncase YY_STATE_EOF(INCL_STRING):\ncase YY_STATE_EOF(INCL_ESC):\n#line 265 \"conf_lex.l\"\n{ /* end of included file */\n            DEBUG_LEX(\"<EOF>\\n\");\n\n            include_stack_index --;\n\n            if ( include_stack_index < 0 )\n            {\n                /* eof of all streams */\n                yyterminate();\n            }\n            else\n            {\n                fclose(yyin);\n                /*go down into stack */\n                yy_delete_buffer(YY_CURRENT_BUFFER );\n\n                yylineno = lines_stack[include_stack_index];\n                g_string_assign(current_file, files_stack[include_stack_index]->str);\n\n                yy_switch_to_buffer(include_stack[include_stack_index] );\n            }\n        }\n\tYY_BREAK\ncase 10:\nYY_RULE_SETUP\n#line 290 \"conf_lex.l\"\n{\n                    /* identifier */\n                    DEBUG_LEX(\"[bloc:%s]\\n\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return IDENTIFIER;\n                 }\n\tYY_BREAK\ncase 11:\nYY_RULE_SETUP\n#line 298 \"conf_lex.l\"\n{/* debut de bloc */\n                        DEBUG_LEX(\"BEGIN_BLOCK\\n\");\n                        BEGIN INBLOC;\n                        accolades++;\n                        return BEGIN_BLOCK;\n                 }\n\tYY_BREAK\ncase 12:\nYY_RULE_SETUP\n#line 305 \"conf_lex.l\"\n{\n                    DEBUG_LEX(\"(\");\n                    parenthesis++;\n                    return BEGIN_PARENTHESIS;\n             }\n\tYY_BREAK\ncase 13:\nYY_RULE_SETUP\n#line 311 \"conf_lex.l\"\n{DEBUG_LEX(\",  \"); return VALUE_SEPARATOR;}\n\tYY_BREAK\ncase 14:\nYY_RULE_SETUP\n#line 312 \"conf_lex.l\"\n{BEGIN INBLOC;  DEBUG_LEX(\")\\n\");\n                if ( parenthesis <= 0 )\n                    {\n                       /* error */\n                       snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' too much closing parenthesis\",current_file->str,yylineno,*yytext);\n                       set_error(err_str);\n                       return _ERROR_;\n                    }\n                    else\n                        parenthesis --;\n\n                    return END_PARENTHESIS;\n                }\n\tYY_BREAK\ncase 15:\nYY_RULE_SETUP\n#line 326 \"conf_lex.l\"\n{ DEBUG_LEX(\" NOT \"); return NOT; }\n\tYY_BREAK\ncase 16:\nYY_RULE_SETUP\n#line 327 \"conf_lex.l\"\n{ DEBUG_LEX(\" AND \"); return AND; }\n\tYY_BREAK\ncase 17:\nYY_RULE_SETUP\n#line 328 \"conf_lex.l\"\n{ DEBUG_LEX(\" OR \"); return OR; }\n\tYY_BREAK\ncase 18:\nYY_RULE_SETUP\n#line 330 \"conf_lex.l\"\n{ DEBUG_LEX(\" UNION \"); return UNION; }\n\tYY_BREAK\ncase 19:\nYY_RULE_SETUP\n#line 331 \"conf_lex.l\"\n{ DEBUG_LEX(\" INTER \"); return INTER; }\n\tYY_BREAK\ncase 20:\nYY_RULE_SETUP\n#line 333 \"conf_lex.l\"\n{\n                    /* environment variable */\n                    DEBUG_LEX(\"[VAR:%s]\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return ENV_VAR;\n                }\n\tYY_BREAK\ncase 21:\nYY_RULE_SETUP\n#line 340 \"conf_lex.l\"\n{\n                    /* identifier */\n                    DEBUG_LEX(\"[%s]\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return IDENTIFIER;\n                }\n\tYY_BREAK\ncase 22:\nYY_RULE_SETUP\n#line 348 \"conf_lex.l\"\n{   /* end of block */\n                    if ( accolades <= 0 )\n                    {\n                       /* error */\n                       snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' closing bracket outside a block\",current_file->str,yylineno,*yytext);\n                       set_error(err_str);\n                       return _ERROR_;\n                    }\n                    else\n                        accolades --;\n\n                    if ( accolades == 0 )\n                    {\n                        DEBUG_LEX(\"END_BLOCK\\n\");\n                        BEGIN YY_INIT;\n                        return END_BLOCK;\n                    }\n                    else\n                    {\n                        DEBUG_LEX(\"END_SUB_BLOCK\\n\");\n                        BEGIN INBLOC;\n                        return END_SUB_BLOCK;\n                    }\n\n                }\n\tYY_BREAK\ncase 23:\nYY_RULE_SETUP\n#line 374 \"conf_lex.l\"\n{ DEBUG_LEX(\" EQUAL \"); return EQUAL; }\n\tYY_BREAK\ncase 24:\nYY_RULE_SETUP\n#line 375 \"conf_lex.l\"\n{ DEBUG_LEX(\" SUP \"); return GT; }\n\tYY_BREAK\ncase 25:\nYY_RULE_SETUP\n#line 376 \"conf_lex.l\"\n{ DEBUG_LEX(\" SUP_OR_EQUAL \"); return GT_EQ; }\n\tYY_BREAK\ncase 26:\nYY_RULE_SETUP\n#line 377 \"conf_lex.l\"\n{ DEBUG_LEX(\" INF  \"); return LT; }\n\tYY_BREAK\ncase 27:\nYY_RULE_SETUP\n#line 378 \"conf_lex.l\"\n{ DEBUG_LEX(\" INF_OR_EQUAL \"); return LT_EQ; }\n\tYY_BREAK\ncase 28:\nYY_RULE_SETUP\n#line 379 \"conf_lex.l\"\n{ DEBUG_LEX(\" DIFF \"); return DIFF; }\n\tYY_BREAK\ncase 29:\nYY_RULE_SETUP\n#line 380 \"conf_lex.l\"\n{ DEBUG_LEX(\" DIFF \"); return DIFF; }\n\tYY_BREAK\ncase 30:\nYY_RULE_SETUP\n#line 381 \"conf_lex.l\"\n{ DEBUG_LEX(\" AFFECT \"); return AFFECT; }\n\tYY_BREAK\ncase 31:\nYY_RULE_SETUP\n#line 383 \"conf_lex.l\"\n{\n                                /* sub-block */\n                                DEBUG_LEX(\"\\nBEGIN_SUB_BLOCK\\n\");\n                                BEGIN INBLOC;\n                                accolades++;\n                                return BEGIN_SUB_BLOCK;\n                            }\n\tYY_BREAK\ncase 32:\nYY_RULE_SETUP\n#line 392 \"conf_lex.l\"\n{BEGIN STRING1;DEBUG_LEX(\"value:<\");YY_BUFFER_RESET();} /* ouverture string 1 */\n\tYY_BREAK\ncase 33:\nYY_RULE_SETUP\n#line 393 \"conf_lex.l\"\n{BEGIN STRING2;DEBUG_LEX(\"value:<\");YY_BUFFER_RESET();} /* ouverture string 2 */\n\tYY_BREAK\ncase 34:\nYY_RULE_SETUP\n#line 395 \"conf_lex.l\"\n{/* valeur */DEBUG_LEX(\"[value:%s]\\n\",yytext);rh_strncpy(yylval.str_val,yytext,MAXSTRLEN); return NON_IDENTIFIER_VALUE;}\n\tYY_BREAK\ncase 35:\nYY_RULE_SETUP\n#line 397 \"conf_lex.l\"\n{DEBUG_LEX(\" end_AFFECT \"); return END_AFFECT; }\n\tYY_BREAK\ncase 36:\nYY_RULE_SETUP\n#line 400 \"conf_lex.l\"\n{BEGIN ESC1;}\n\tYY_BREAK\ncase 37:\nYY_RULE_SETUP\n#line 401 \"conf_lex.l\"\n{DEBUG_LEX(\">\");rh_strncpy(yylval.str_val,YY_PARSED_STRING->str,MAXSTRLEN);BEGIN INBLOC;/* chaine finie */ return NON_IDENTIFIER_VALUE; }\n\tYY_BREAK\ncase 38:\n/* rule 38 can match eol */\nYY_RULE_SETUP\n#line 402 \"conf_lex.l\"\n{snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: missing closing quote.\",current_file->str,yylineno); set_error(err_str);yylineno++;return _ERROR_;}\n\tYY_BREAK\ncase 39:\nYY_RULE_SETUP\n#line 403 \"conf_lex.l\"\n{YY_BUFFER_APPEND(yytext); DEBUG_LEX(\"%c\",*yytext);/* caractere de la chaine */}\n\tYY_BREAK\ncase 40:\n/* rule 40 can match eol */\nYY_RULE_SETUP\n#line 405 \"conf_lex.l\"\n{BEGIN STRING1;yylineno++;}/* ignore un saut de ligne echappe*/\n\tYY_BREAK\ncase 41:\nYY_RULE_SETUP\n#line 406 \"conf_lex.l\"\n{DEBUG_LEX(\"%c\",*yytext);YY_BUFFER_APPEND(yytext);BEGIN STRING1;/* caractere de la chaine */}\n\tYY_BREAK\ncase 42:\nYY_RULE_SETUP\n#line 408 \"conf_lex.l\"\n{DEBUG_LEX(\">\");rh_strncpy(yylval.str_val,YY_PARSED_STRING->str,MAXSTRLEN);BEGIN INBLOC ;/* chaine finie */ return NON_IDENTIFIER_VALUE;}\n\tYY_BREAK\ncase 43:\n/* rule 43 can match eol */\nYY_RULE_SETUP\n#line 409 \"conf_lex.l\"\n{snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: closing quote missing.\",current_file->str,yylineno); set_error(err_str);yylineno++;return _ERROR_;}\n\tYY_BREAK\ncase 44:\nYY_RULE_SETUP\n#line 410 \"conf_lex.l\"\n{YY_BUFFER_APPEND(yytext);DEBUG_LEX(\"%c\",*yytext);/* caractere de la chaine */}\n\tYY_BREAK\ncase 45:\n*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */\n(yy_c_buf_p) = yy_cp -= 1;\nYY_DO_BEFORE_ACTION; /* set up yytext again */\nYY_RULE_SETUP\n#line 412 \"conf_lex.l\"\nDEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n\tYY_BREAK\ncase 46:\n*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */\n(yy_c_buf_p) = yy_cp -= 1;\nYY_DO_BEFORE_ACTION; /* set up yytext again */\nYY_RULE_SETUP\n#line 413 \"conf_lex.l\"\nDEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n\tYY_BREAK\ncase 47:\n*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */\n(yy_c_buf_p) = yy_cp -= 1;\nYY_DO_BEFORE_ACTION; /* set up yytext again */\nYY_RULE_SETUP\n#line 414 \"conf_lex.l\"\nDEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n\tYY_BREAK\ncase 48:\n*yy_cp = (yy_hold_char); /* undo effects of setting up yytext */\n(yy_c_buf_p) = yy_cp -= 1;\nYY_DO_BEFORE_ACTION; /* set up yytext again */\nYY_RULE_SETUP\n#line 415 \"conf_lex.l\"\nDEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n\tYY_BREAK\ncase 49:\nYY_RULE_SETUP\n#line 417 \"conf_lex.l\"\n;/* ignore */\n\tYY_BREAK\ncase 50:\n/* rule 50 can match eol */\nYY_RULE_SETUP\n#line 418 \"conf_lex.l\"\nyylineno++;/* ignore */\n\tYY_BREAK\ncase 51:\nYY_RULE_SETUP\n#line 420 \"conf_lex.l\"\n{ snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' unexpected\",current_file->str,yylineno,*yytext); set_error(err_str);return _ERROR_;}\n\tYY_BREAK\ncase 52:\nYY_RULE_SETUP\n#line 422 \"conf_lex.l\"\nECHO;\n\tYY_BREAK\n#line 1469 \"conf_lex.c\"\n\n\tcase YY_END_OF_BUFFER:\n\t\t{\n\t\t/* Amount of text matched not including the EOB char. */\n\t\tint yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;\n\n\t\t/* Undo the effects of YY_DO_BEFORE_ACTION. */\n\t\t*yy_cp = (yy_hold_char);\n\t\tYY_RESTORE_YY_MORE_OFFSET\n\n\t\tif ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )\n\t\t\t{\n\t\t\t/* We're scanning a new file or input source.  It's\n\t\t\t * possible that this happened because the user\n\t\t\t * just pointed yyin at a new source and called\n\t\t\t * yylex().  If so, then we have to assure\n\t\t\t * consistency between YY_CURRENT_BUFFER and our\n\t\t\t * globals.  Here is the right place to do so, because\n\t\t\t * this is the first action (other than possibly a\n\t\t\t * back-up) that will match for the new input source.\n\t\t\t */\n\t\t\t(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;\n\t\t\t}\n\n\t\t/* Note that here we test for yy_c_buf_p \"<=\" to the position\n\t\t * of the first EOB in the buffer, since yy_c_buf_p will\n\t\t * already have been incremented past the NUL character\n\t\t * (since all states make transitions on EOB to the\n\t\t * end-of-buffer state).  Contrast this with the test\n\t\t * in input().\n\t\t */\n\t\tif ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )\n\t\t\t{ /* This was really a NUL. */\n\t\t\tyy_state_type yy_next_state;\n\n\t\t\t(yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;\n\n\t\t\tyy_current_state = yy_get_previous_state(  );\n\n\t\t\t/* Okay, we're now positioned to make the NUL\n\t\t\t * transition.  We couldn't have\n\t\t\t * yy_get_previous_state() go ahead and do it\n\t\t\t * for us because it doesn't know how to deal\n\t\t\t * with the possibility of jamming (and we don't\n\t\t\t * want to build jamming into it because then it\n\t\t\t * will run more slowly).\n\t\t\t */\n\n\t\t\tyy_next_state = yy_try_NUL_trans( yy_current_state );\n\n\t\t\tyy_bp = (yytext_ptr) + YY_MORE_ADJ;\n\n\t\t\tif ( yy_next_state )\n\t\t\t\t{\n\t\t\t\t/* Consume the NUL. */\n\t\t\t\tyy_cp = ++(yy_c_buf_p);\n\t\t\t\tyy_current_state = yy_next_state;\n\t\t\t\tgoto yy_match;\n\t\t\t\t}\n\n\t\t\telse\n\t\t\t\t{\n\t\t\t\tyy_cp = (yy_c_buf_p);\n\t\t\t\tgoto yy_find_action;\n\t\t\t\t}\n\t\t\t}\n\n\t\telse switch ( yy_get_next_buffer(  ) )\n\t\t\t{\n\t\t\tcase EOB_ACT_END_OF_FILE:\n\t\t\t\t{\n\t\t\t\t(yy_did_buffer_switch_on_eof) = 0;\n\n\t\t\t\tif ( yywrap( ) )\n\t\t\t\t\t{\n\t\t\t\t\t/* Note: because we've taken care in\n\t\t\t\t\t * yy_get_next_buffer() to have set up\n\t\t\t\t\t * yytext, we can now set up\n\t\t\t\t\t * yy_c_buf_p so that if some total\n\t\t\t\t\t * hoser (like flex itself) wants to\n\t\t\t\t\t * call the scanner after we return the\n\t\t\t\t\t * YY_NULL, it'll still work - another\n\t\t\t\t\t * YY_NULL will get returned.\n\t\t\t\t\t */\n\t\t\t\t\t(yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;\n\n\t\t\t\t\tyy_act = YY_STATE_EOF(YY_START);\n\t\t\t\t\tgoto do_action;\n\t\t\t\t\t}\n\n\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\tif ( ! (yy_did_buffer_switch_on_eof) )\n\t\t\t\t\t\tYY_NEW_FILE;\n\t\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\tcase EOB_ACT_CONTINUE_SCAN:\n\t\t\t\t(yy_c_buf_p) =\n\t\t\t\t\t(yytext_ptr) + yy_amount_of_matched_text;\n\n\t\t\t\tyy_current_state = yy_get_previous_state(  );\n\n\t\t\t\tyy_cp = (yy_c_buf_p);\n\t\t\t\tyy_bp = (yytext_ptr) + YY_MORE_ADJ;\n\t\t\t\tgoto yy_match;\n\n\t\t\tcase EOB_ACT_LAST_MATCH:\n\t\t\t\t(yy_c_buf_p) =\n\t\t\t\t&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];\n\n\t\t\t\tyy_current_state = yy_get_previous_state(  );\n\n\t\t\t\tyy_cp = (yy_c_buf_p);\n\t\t\t\tyy_bp = (yytext_ptr) + YY_MORE_ADJ;\n\t\t\t\tgoto yy_find_action;\n\t\t\t}\n\t\tbreak;\n\t\t}\n\n\tdefault:\n\t\tYY_FATAL_ERROR(\n\t\t\t\"fatal flex scanner internal error--no action found\" );\n\t} /* end of action switch */\n\t\t} /* end of scanning one token */\n\t} /* end of user's declarations */\n} /* end of yylex */\n\n/* yy_get_next_buffer - try to read in a new buffer\n *\n * Returns a code representing an action:\n *\tEOB_ACT_LAST_MATCH -\n *\tEOB_ACT_CONTINUE_SCAN - continue scanning from current position\n *\tEOB_ACT_END_OF_FILE - end of file\n */\nstatic int yy_get_next_buffer (void)\n{\n    \tchar *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;\n\tchar *source = (yytext_ptr);\n\tyy_size_t number_to_move, i;\n\tint ret_val;\n\n\tif ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )\n\t\tYY_FATAL_ERROR(\n\t\t\"fatal flex scanner internal error--end of buffer missed\" );\n\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )\n\t\t{ /* Don't try to fill the buffer, so this is an EOF. */\n\t\tif ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )\n\t\t\t{\n\t\t\t/* We matched a single character, the EOB, so\n\t\t\t * treat this as a final EOF.\n\t\t\t */\n\t\t\treturn EOB_ACT_END_OF_FILE;\n\t\t\t}\n\n\t\telse\n\t\t\t{\n\t\t\t/* We matched some text prior to the EOB, first\n\t\t\t * process it.\n\t\t\t */\n\t\t\treturn EOB_ACT_LAST_MATCH;\n\t\t\t}\n\t\t}\n\n\t/* Try to read more data. */\n\n\t/* First move last chars to start of buffer. */\n\tnumber_to_move = (yy_size_t) ((yy_c_buf_p) - (yytext_ptr)) - 1;\n\n\tfor ( i = 0; i < number_to_move; ++i )\n\t\t*(dest++) = *(source++);\n\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )\n\t\t/* don't do the read, it's not guaranteed to return an EOF,\n\t\t * just force an EOF\n\t\t */\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;\n\n\telse\n\t\t{\n\t\t\tint num_to_read =\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;\n\n\t\twhile ( num_to_read <= 0 )\n\t\t\t{ /* Not enough room in the buffer - grow it. */\n\n\t\t\t/* just a shorter name for the current buffer */\n\t\t\tYY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;\n\n\t\t\tint yy_c_buf_p_offset =\n\t\t\t\t(int) ((yy_c_buf_p) - b->yy_ch_buf);\n\n\t\t\tif ( b->yy_is_our_buffer )\n\t\t\t\t{\n\t\t\t\tint new_size = b->yy_buf_size * 2;\n\n\t\t\t\tif ( new_size <= 0 )\n\t\t\t\t\tb->yy_buf_size += b->yy_buf_size / 8;\n\t\t\t\telse\n\t\t\t\t\tb->yy_buf_size *= 2;\n\n\t\t\t\tb->yy_ch_buf = (char *)\n\t\t\t\t\t/* Include room in for 2 EOB chars. */\n\t\t\t\t\tyyrealloc((void *) b->yy_ch_buf,(yy_size_t) (b->yy_buf_size + 2)  );\n\t\t\t\t}\n\t\t\telse\n\t\t\t\t/* Can't grow it, we don't own it. */\n\t\t\t\tb->yy_ch_buf = NULL;\n\n\t\t\tif ( ! b->yy_ch_buf )\n\t\t\t\tYY_FATAL_ERROR(\n\t\t\t\t\"fatal error - scanner input buffer overflow\" );\n\n\t\t\t(yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];\n\n\t\t\tnum_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -\n\t\t\t\t\t\tnumber_to_move - 1;\n\n\t\t\t}\n\n\t\tif ( num_to_read > YY_READ_BUF_SIZE )\n\t\t\tnum_to_read = YY_READ_BUF_SIZE;\n\n\t\t/* Read in more data. */\n\t\tYY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),\n\t\t\t(yy_n_chars), num_to_read );\n\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);\n\t\t}\n\n\tif ( (yy_n_chars) == 0 )\n\t\t{\n\t\tif ( number_to_move == YY_MORE_ADJ )\n\t\t\t{\n\t\t\tret_val = EOB_ACT_END_OF_FILE;\n\t\t\tyyrestart(yyin  );\n\t\t\t}\n\n\t\telse\n\t\t\t{\n\t\t\tret_val = EOB_ACT_LAST_MATCH;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buffer_status =\n\t\t\t\tYY_BUFFER_EOF_PENDING;\n\t\t\t}\n\t\t}\n\n\telse\n\t\tret_val = EOB_ACT_CONTINUE_SCAN;\n\n\tif ((int) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {\n\t\t/* Extend the array by 50%, plus the number we really need. */\n\t\tint new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,(yy_size_t) new_size  );\n\t\tif ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_get_next_buffer()\" );\n\t}\n\n\t(yy_n_chars) += number_to_move;\n\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;\n\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;\n\n\t(yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];\n\n\treturn ret_val;\n}\n\n/* yy_get_previous_state - get the state just before the EOB char was reached */\n\n    static yy_state_type yy_get_previous_state (void)\n{\n\tyy_state_type yy_current_state;\n\tchar *yy_cp;\n    \n\tyy_current_state = (yy_start);\n\n\tfor ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )\n\t\t{\n\t\tYY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);\n\t\tif ( yy_accept[yy_current_state] )\n\t\t\t{\n\t\t\t(yy_last_accepting_state) = yy_current_state;\n\t\t\t(yy_last_accepting_cpos) = yy_cp;\n\t\t\t}\n\t\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t\t{\n\t\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\t\tif ( yy_current_state >= 121 )\n\t\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t\t}\n\t\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (flex_int16_t) yy_c];\n\t\t}\n\n\treturn yy_current_state;\n}\n\n/* yy_try_NUL_trans - try to make a transition on the NUL character\n *\n * synopsis\n *\tnext_state = yy_try_NUL_trans( current_state );\n */\n    static yy_state_type yy_try_NUL_trans  (yy_state_type yy_current_state )\n{\n\tint yy_is_jam;\n    \tchar *yy_cp = (yy_c_buf_p);\n\n\tYY_CHAR yy_c = 1;\n\tif ( yy_accept[yy_current_state] )\n\t\t{\n\t\t(yy_last_accepting_state) = yy_current_state;\n\t\t(yy_last_accepting_cpos) = yy_cp;\n\t\t}\n\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t{\n\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\tif ( yy_current_state >= 121 )\n\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t}\n\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (flex_int16_t) yy_c];\n\tyy_is_jam = (yy_current_state == 120);\n\n\t\treturn yy_is_jam ? 0 : yy_current_state;\n}\n\n#ifndef YY_NO_UNPUT\n\n    static void yyunput (int c, char * yy_bp )\n{\n\tchar *yy_cp;\n    \n    yy_cp = (yy_c_buf_p);\n\n\t/* undo effects of setting up yytext */\n\t*yy_cp = (yy_hold_char);\n\n\tif ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )\n\t\t{ /* need to shift things up to make room */\n\t\t/* +2 for EOB chars. */\n\t\tint number_to_move = (yy_n_chars) + 2;\n\t\tchar *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[\n\t\t\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];\n\t\tchar *source =\n\t\t\t\t&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];\n\n\t\twhile ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )\n\t\t\t*--dest = *--source;\n\n\t\tyy_cp += (int) (dest - source);\n\t\tyy_bp += (int) (dest - source);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars =\n\t\t\t(yy_n_chars) = (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size;\n\n\t\tif ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )\n\t\t\tYY_FATAL_ERROR( \"flex scanner push-back overflow\" );\n\t\t}\n\n\t*--yy_cp = (char) c;\n\n\t(yytext_ptr) = yy_bp;\n\t(yy_hold_char) = *yy_cp;\n\t(yy_c_buf_p) = yy_cp;\n}\n\n#endif\n\n#ifndef YY_NO_INPUT\n#ifdef __cplusplus\n    static int yyinput (void)\n#else\n    static int input  (void)\n#endif\n\n{\n\tint c;\n    \n\t*(yy_c_buf_p) = (yy_hold_char);\n\n\tif ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )\n\t\t{\n\t\t/* yy_c_buf_p now points to the character we want to return.\n\t\t * If this occurs *before* the EOB characters, then it's a\n\t\t * valid NUL; if not, then we've hit the end of the buffer.\n\t\t */\n\t\tif ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )\n\t\t\t/* This was really a NUL. */\n\t\t\t*(yy_c_buf_p) = '\\0';\n\n\t\telse\n\t\t\t{ /* need more input */\n\t\t\tint offset = (yy_c_buf_p) - (yytext_ptr);\n\t\t\t++(yy_c_buf_p);\n\n\t\t\tswitch ( yy_get_next_buffer(  ) )\n\t\t\t\t{\n\t\t\t\tcase EOB_ACT_LAST_MATCH:\n\t\t\t\t\t/* This happens because yy_g_n_b()\n\t\t\t\t\t * sees that we've accumulated a\n\t\t\t\t\t * token and flags that we need to\n\t\t\t\t\t * try matching the token before\n\t\t\t\t\t * proceeding.  But for input(),\n\t\t\t\t\t * there's no matching to consider.\n\t\t\t\t\t * So convert the EOB_ACT_LAST_MATCH\n\t\t\t\t\t * to EOB_ACT_END_OF_FILE.\n\t\t\t\t\t */\n\n\t\t\t\t\t/* Reset buffer status. */\n\t\t\t\t\tyyrestart(yyin );\n\n\t\t\t\t\t/*FALLTHROUGH*/\n\n\t\t\t\tcase EOB_ACT_END_OF_FILE:\n\t\t\t\t\t{\n\t\t\t\t\tif ( yywrap( ) )\n\t\t\t\t\t\treturn 0;\n\n\t\t\t\t\tif ( ! (yy_did_buffer_switch_on_eof) )\n\t\t\t\t\t\tYY_NEW_FILE;\n#ifdef __cplusplus\n\t\t\t\t\treturn yyinput();\n#else\n\t\t\t\t\treturn input();\n#endif\n\t\t\t\t\t}\n\n\t\t\t\tcase EOB_ACT_CONTINUE_SCAN:\n\t\t\t\t\t(yy_c_buf_p) = (yytext_ptr) + offset;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tc = *(unsigned char *) (yy_c_buf_p);\t/* cast for 8-bit char's */\n\t*(yy_c_buf_p) = '\\0';\t/* preserve yytext */\n\t(yy_hold_char) = *++(yy_c_buf_p);\n\n\treturn c;\n}\n#endif\t/* ifndef YY_NO_INPUT */\n\n/** Immediately switch to a different input stream.\n * @param input_file A readable stream.\n * \n * @note This function does not reset the start condition to @c INITIAL .\n */\n    void yyrestart  (FILE * input_file )\n{\n    \n\tif ( ! YY_CURRENT_BUFFER ){\n        yyensure_buffer_stack ();\n\t\tYY_CURRENT_BUFFER_LVALUE =\n            yy_create_buffer(yyin,YY_BUF_SIZE );\n\t}\n\n\tyy_init_buffer(YY_CURRENT_BUFFER,input_file );\n\tyy_load_buffer_state( );\n}\n\n/** Switch to a different input buffer.\n * @param new_buffer The new input buffer.\n * \n */\n    void yy_switch_to_buffer  (YY_BUFFER_STATE  new_buffer )\n{\n    \n\t/* TODO. We should be able to replace this entire function body\n\t * with\n\t *\t\tyypop_buffer_state();\n\t *\t\tyypush_buffer_state(new_buffer);\n     */\n\tyyensure_buffer_stack ();\n\tif ( YY_CURRENT_BUFFER == new_buffer )\n\t\treturn;\n\n\tif ( YY_CURRENT_BUFFER )\n\t\t{\n\t\t/* Flush out information for old buffer. */\n\t\t*(yy_c_buf_p) = (yy_hold_char);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);\n\t\t}\n\n\tYY_CURRENT_BUFFER_LVALUE = new_buffer;\n\tyy_load_buffer_state( );\n\n\t/* We don't actually know whether we did this switch during\n\t * EOF (yywrap()) processing, but the only time this flag\n\t * is looked at is after yywrap() is called, so it's safe\n\t * to go ahead and always set it.\n\t */\n\t(yy_did_buffer_switch_on_eof) = 1;\n}\n\nstatic void yy_load_buffer_state  (void)\n{\n    \t(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;\n\t(yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;\n\tyyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;\n\t(yy_hold_char) = *(yy_c_buf_p);\n}\n\n/** Allocate and initialize an input buffer state.\n * @param file A readable stream.\n * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.\n * \n * @return the allocated buffer state.\n */\n    YY_BUFFER_STATE yy_create_buffer  (FILE * file, int  size )\n{\n\tYY_BUFFER_STATE b;\n    \n\tb = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state )  );\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_create_buffer()\" );\n\n\tb->yy_buf_size = size;\n\n\t/* yy_ch_buf has to be 2 characters longer than the size given because\n\t * we need to put in 2 end-of-buffer characters.\n\t */\n\tb->yy_ch_buf = (char *) yyalloc((yy_size_t) (b->yy_buf_size + 2)  );\n\tif ( ! b->yy_ch_buf )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_create_buffer()\" );\n\n\tb->yy_is_our_buffer = 1;\n\n\tyy_init_buffer(b,file );\n\n\treturn b;\n}\n\n/** Destroy the buffer.\n * @param b a buffer created with yy_create_buffer()\n * \n */\n    void yy_delete_buffer (YY_BUFFER_STATE  b )\n{\n    \n\tif ( ! b )\n\t\treturn;\n\n\tif ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */\n\t\tYY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;\n\n\tif ( b->yy_is_our_buffer )\n\t\tyyfree((void *) b->yy_ch_buf  );\n\n\tyyfree((void *) b  );\n}\n\n/* Initializes or reinitializes a buffer.\n * This function is sometimes called more than once on the same buffer,\n * such as during a yyrestart() or at EOF.\n */\n    static void yy_init_buffer  (YY_BUFFER_STATE  b, FILE * file )\n\n{\n\tint oerrno = errno;\n    \n\tyy_flush_buffer(b );\n\n\tb->yy_input_file = file;\n\tb->yy_fill_buffer = 1;\n\n    /* If b is the current buffer, then yy_init_buffer was _probably_\n     * called from yyrestart() or through yy_get_next_buffer.\n     * In that case, we don't want to reset the lineno or column.\n     */\n    if (b != YY_CURRENT_BUFFER){\n        b->yy_bs_lineno = 1;\n        b->yy_bs_column = 0;\n    }\n\n        b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;\n    \n\terrno = oerrno;\n}\n\n/** Discard all buffered characters. On the next scan, YY_INPUT will be called.\n * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.\n * \n */\n    void yy_flush_buffer (YY_BUFFER_STATE  b )\n{\n    \tif ( ! b )\n\t\treturn;\n\n\tb->yy_n_chars = 0;\n\n\t/* We always need two end-of-buffer characters.  The first causes\n\t * a transition to the end-of-buffer state.  The second causes\n\t * a jam in that state.\n\t */\n\tb->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;\n\tb->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;\n\n\tb->yy_buf_pos = &b->yy_ch_buf[0];\n\n\tb->yy_at_bol = 1;\n\tb->yy_buffer_status = YY_BUFFER_NEW;\n\n\tif ( b == YY_CURRENT_BUFFER )\n\t\tyy_load_buffer_state( );\n}\n\n/** Pushes the new state onto the stack. The new state becomes\n *  the current state. This function will allocate the stack\n *  if necessary.\n *  @param new_buffer The new state.\n *  \n */\nvoid yypush_buffer_state (YY_BUFFER_STATE new_buffer )\n{\n    \tif (new_buffer == NULL)\n\t\treturn;\n\n\tyyensure_buffer_stack();\n\n\t/* This block is copied from yy_switch_to_buffer. */\n\tif ( YY_CURRENT_BUFFER )\n\t\t{\n\t\t/* Flush out information for old buffer. */\n\t\t*(yy_c_buf_p) = (yy_hold_char);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);\n\t\t}\n\n\t/* Only push if top exists. Otherwise, replace top. */\n\tif (YY_CURRENT_BUFFER)\n\t\t(yy_buffer_stack_top)++;\n\tYY_CURRENT_BUFFER_LVALUE = new_buffer;\n\n\t/* copied from yy_switch_to_buffer. */\n\tyy_load_buffer_state( );\n\t(yy_did_buffer_switch_on_eof) = 1;\n}\n\n/** Removes and deletes the top of the stack, if present.\n *  The next element becomes the new top.\n *  \n */\nvoid yypop_buffer_state (void)\n{\n    \tif (!YY_CURRENT_BUFFER)\n\t\treturn;\n\n\tyy_delete_buffer(YY_CURRENT_BUFFER );\n\tYY_CURRENT_BUFFER_LVALUE = NULL;\n\tif ((yy_buffer_stack_top) > 0)\n\t\t--(yy_buffer_stack_top);\n\n\tif (YY_CURRENT_BUFFER) {\n\t\tyy_load_buffer_state( );\n\t\t(yy_did_buffer_switch_on_eof) = 1;\n\t}\n}\n\n/* Allocates the stack if it does not exist.\n *  Guarantees space for at least one push.\n */\nstatic void yyensure_buffer_stack (void)\n{\n\tint num_to_alloc;\n    \n\tif (!(yy_buffer_stack)) {\n\n\t\t/* First allocation is just for 2 elements, since we don't know if this\n\t\t * scanner will even need a stack. We use 2 instead of 1 to avoid an\n\t\t * immediate realloc on the next call.\n         */\n      num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */\n\t\t(yy_buffer_stack) = (struct yy_buffer_state**)yyalloc\n\t\t\t\t\t\t\t\t(num_to_alloc * sizeof(struct yy_buffer_state*)\n\t\t\t\t\t\t\t\t);\n\t\tif ( ! (yy_buffer_stack) )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in yyensure_buffer_stack()\" );\n\t\t\t\t\t\t\t\t  \n\t\tmemset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));\n\t\t\t\t\n\t\t(yy_buffer_stack_max) = num_to_alloc;\n\t\t(yy_buffer_stack_top) = 0;\n\t\treturn;\n\t}\n\n\tif ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){\n\n\t\t/* Increase the buffer to prepare for a possible push. */\n\t\tyy_size_t grow_size = 8 /* arbitrary grow size */;\n\n\t\tnum_to_alloc = (yy_buffer_stack_max) + grow_size;\n\t\t(yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc\n\t\t\t\t\t\t\t\t((yy_buffer_stack),\n\t\t\t\t\t\t\t\tnum_to_alloc * sizeof(struct yy_buffer_state*)\n\t\t\t\t\t\t\t\t);\n\t\tif ( ! (yy_buffer_stack) )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in yyensure_buffer_stack()\" );\n\n\t\t/* zero only the new slots.*/\n\t\tmemset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));\n\t\t(yy_buffer_stack_max) = num_to_alloc;\n\t}\n}\n\n/** Setup the input buffer state to scan directly from a user-specified character buffer.\n * @param base the character buffer\n * @param size the size in bytes of the character buffer\n * \n * @return the newly allocated buffer state object. \n */\nYY_BUFFER_STATE yy_scan_buffer  (char * base, yy_size_t  size )\n{\n\tYY_BUFFER_STATE b;\n    \n\tif ( size < 2 ||\n\t     base[size-2] != YY_END_OF_BUFFER_CHAR ||\n\t     base[size-1] != YY_END_OF_BUFFER_CHAR )\n\t\t/* They forgot to leave room for the EOB's. */\n\t\treturn NULL;\n\n\tb = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state )  );\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_scan_buffer()\" );\n\n\tb->yy_buf_size = (int) (size - 2);\t/* \"- 2\" to take care of EOB's */\n\tb->yy_buf_pos = b->yy_ch_buf = base;\n\tb->yy_is_our_buffer = 0;\n\tb->yy_input_file = NULL;\n\tb->yy_n_chars = b->yy_buf_size;\n\tb->yy_is_interactive = 0;\n\tb->yy_at_bol = 1;\n\tb->yy_fill_buffer = 0;\n\tb->yy_buffer_status = YY_BUFFER_NEW;\n\n\tyy_switch_to_buffer(b  );\n\n\treturn b;\n}\n\n/** Setup the input buffer state to scan a string. The next call to yylex() will\n * scan from a @e copy of @a str.\n * @param yystr a NUL-terminated string to scan\n * \n * @return the newly allocated buffer state object.\n * @note If you want to scan bytes that may contain NUL values, then use\n *       yy_scan_bytes() instead.\n */\nYY_BUFFER_STATE yy_scan_string (yyconst char * yystr )\n{\n    \n\treturn yy_scan_bytes(yystr,(int) strlen(yystr) );\n}\n\n/** Setup the input buffer state to scan the given bytes. The next call to yylex() will\n * scan from a @e copy of @a bytes.\n * @param yybytes the byte buffer to scan\n * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.\n * \n * @return the newly allocated buffer state object.\n */\nYY_BUFFER_STATE yy_scan_bytes  (yyconst char * yybytes, int  _yybytes_len )\n{\n\tYY_BUFFER_STATE b;\n\tchar *buf;\n\tyy_size_t n;\n\tint i;\n    \n\t/* Get memory for full buffer, including space for trailing EOB's. */\n\tn = (yy_size_t) (_yybytes_len + 2);\n\tbuf = (char *) yyalloc(n  );\n\tif ( ! buf )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_scan_bytes()\" );\n\n\tfor ( i = 0; i < _yybytes_len; ++i )\n\t\tbuf[i] = yybytes[i];\n\n\tbuf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;\n\n\tb = yy_scan_buffer(buf,n );\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"bad buffer in yy_scan_bytes()\" );\n\n\t/* It's okay to grow etc. this buffer, and we should throw it\n\t * away when we're done.\n\t */\n\tb->yy_is_our_buffer = 1;\n\n\treturn b;\n}\n\n#ifndef YY_EXIT_FAILURE\n#define YY_EXIT_FAILURE 2\n#endif\n\nstatic void yynoreturn yy_fatal_error (yyconst char* msg )\n{\n\t\t\t(void) fprintf( stderr, \"%s\\n\", msg );\n\texit( YY_EXIT_FAILURE );\n}\n\n/* Redefine yyless() so it works in section 3 code. */\n\n#undef yyless\n#define yyless(n) \\\n\tdo \\\n\t\t{ \\\n\t\t/* Undo effects of setting up yytext. */ \\\n        yy_size_t yyless_macro_arg = (n); \\\n        YY_LESS_LINENO(yyless_macro_arg);\\\n\t\tyytext[yyleng] = (yy_hold_char); \\\n\t\t(yy_c_buf_p) = yytext + yyless_macro_arg; \\\n\t\t(yy_hold_char) = *(yy_c_buf_p); \\\n\t\t*(yy_c_buf_p) = '\\0'; \\\n\t\tyyleng = yyless_macro_arg; \\\n\t\t} \\\n\twhile ( 0 )\n\n/* Accessor  methods (get/set functions) to struct members. */\n\n/** Get the current line number.\n * \n */\nint yyget_lineno  (void)\n{\n        \n    return yylineno;\n}\n\n/** Get the input stream.\n * \n */\nFILE *yyget_in  (void)\n{\n        return yyin;\n}\n\n/** Get the output stream.\n * \n */\nFILE *yyget_out  (void)\n{\n        return yyout;\n}\n\n/** Get the length of the current token.\n * \n */\nint yyget_leng  (void)\n{\n        return yyleng;\n}\n\n/** Get the current token.\n * \n */\n\nchar *yyget_text  (void)\n{\n        return yytext;\n}\n\n/** Set the current line number.\n * @param _line_number line number\n * \n */\nvoid yyset_lineno (int  _line_number )\n{\n    \n    yylineno = _line_number;\n}\n\n/** Set the input stream. This does not discard the current\n * input buffer.\n * @param _in_str A readable stream.\n * \n * @see yy_switch_to_buffer\n */\nvoid yyset_in (FILE *  _in_str )\n{\n        yyin = _in_str ;\n}\n\nvoid yyset_out (FILE *  _out_str )\n{\n        yyout = _out_str ;\n}\n\nint yyget_debug  (void)\n{\n        return yy_flex_debug;\n}\n\nvoid yyset_debug (int  _bdebug )\n{\n        yy_flex_debug = _bdebug ;\n}\n\nstatic int yy_init_globals (void)\n{\n        /* Initialization is the same as for the non-reentrant scanner.\n     * This function is called from yylex_destroy(), so don't allocate here.\n     */\n\n    (yy_buffer_stack) = NULL;\n    (yy_buffer_stack_top) = 0;\n    (yy_buffer_stack_max) = 0;\n    (yy_c_buf_p) = NULL;\n    (yy_init) = 0;\n    (yy_start) = 0;\n\n/* Defined in main.c */\n#ifdef YY_STDINIT\n    yyin = stdin;\n    yyout = stdout;\n#else\n    yyin = NULL;\n    yyout = NULL;\n#endif\n\n    /* For future reference: Set errno on error, since we are called by\n     * yylex_init()\n     */\n    return 0;\n}\n\n/* yylex_destroy is for both reentrant and non-reentrant scanners. */\nint yylex_destroy  (void)\n{\n    \n    /* Pop the buffer stack, destroying each element. */\n\twhile(YY_CURRENT_BUFFER){\n\t\tyy_delete_buffer(YY_CURRENT_BUFFER  );\n\t\tYY_CURRENT_BUFFER_LVALUE = NULL;\n\t\tyypop_buffer_state();\n\t}\n\n\t/* Destroy the stack itself. */\n\tyyfree((yy_buffer_stack) );\n\t(yy_buffer_stack) = NULL;\n\n    /* Reset the globals. This is important in a non-reentrant scanner so the next time\n     * yylex() is called, initialization will occur. */\n    yy_init_globals( );\n\n    return 0;\n}\n\n/*\n * Internal utility routines.\n */\n\n#ifndef yytext_ptr\nstatic void yy_flex_strncpy (char* s1, yyconst char * s2, int n )\n{\n\t\t\n\tint i;\n\tfor ( i = 0; i < n; ++i )\n\t\ts1[i] = s2[i];\n}\n#endif\n\n#ifdef YY_NEED_STRLEN\nstatic int yy_flex_strlen (yyconst char * s )\n{\n\tint n;\n\tfor ( n = 0; s[n]; ++n )\n\t\t;\n\n\treturn n;\n}\n#endif\n\nvoid *yyalloc (yy_size_t  size )\n{\n\t\t\treturn malloc(size);\n}\n\nvoid *yyrealloc  (void * ptr, yy_size_t  size )\n{\n\t\t\n\t/* The cast to (char *) in the following accommodates both\n\t * implementations that use char* generic pointers, and those\n\t * that use void* generic pointers.  It works with the latter\n\t * because both ANSI C and C++ allow castless assignment from\n\t * any pointer type to void*, and deal with argument conversions\n\t * as though doing an assignment.\n\t */\n\treturn realloc(ptr, size);\n}\n\nvoid yyfree (void * ptr )\n{\n\t\t\tfree( (char *) ptr );\t/* see yyrealloc() for (char *) cast */\n}\n\n#define YYTABLES_NAME \"yytables\"\n\n#line 422 \"conf_lex.l\"\n\n\n\nint yywrap(void){\n    return 1;\n}\n\nvoid yyreset(void){\n    YY_FLUSH_BUFFER;\n    YY_USER_INIT;\n}\n\nvoid yy_set_current_file(const char *file)\n{\n    g_string_assign(current_file, file);\n}\n\n"
  },
  {
    "path": "src/cfg_parsing/conf_lex.l",
    "content": "%{\n/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#include \"config.h\"\n#include \"analyze.h\"\n#include \"conf_yacc.h\"\n\n#include <stdio.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <libgen.h>\n#include <glib.h>\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\n/* current line number */\nint yylineno;\n\n/* levels of brackets nbr */\nint accolades;\n\n/* level of parenthesis nbr */\nint parenthesis;\n\n/* Traitement des messages d'erreur */\nvoid set_error(const char *s);\n\n#define ERRLEN 1024\nchar err_str[ERRLEN]=\"\";\n\n/* Stockage des chaines\n*/\nGString *YY_PARSED_STRING;\n\nvoid YY_BUFFER_APPEND(const char *s) {\n    g_string_append(YY_PARSED_STRING, s);\n}\n\nvoid YY_BUFFER_RESET(void) {\n    g_string_set_size(YY_PARSED_STRING, 0);\n}\n\n/* includes management */\n#define FILE_LEN 1024\nGString *current_file;\n\n#define MAX_INCLUDE_DEPTH  10\nYY_BUFFER_STATE include_stack[MAX_INCLUDE_DEPTH];\nint include_prev_state;\n\n/* keep track of filenames and line numbers */\nunsigned int lines_stack[MAX_INCLUDE_DEPTH];\nGString *files_stack[MAX_INCLUDE_DEPTH];\n\nint include_stack_index = 0;\n\n\n/* initialisation du parser */\n#define YY_USER_INIT {          \\\n    unsigned int i;             \\\n    yylineno = 1;               \\\n    accolades = 0;              \\\n    parenthesis = 0;            \\\n    include_stack_index = 0;    \\\n    for (i = 0; i < MAX_INCLUDE_DEPTH; i++) { \\\n        lines_stack[i] = 0;     \\\n        if (files_stack[i] == NULL) \\\n            files_stack[i] = g_string_sized_new(FILE_LEN); \\\n    }                           \\\n    if (YY_PARSED_STRING == NULL) YY_PARSED_STRING = g_string_sized_new(MAXSTRLEN); \\\n    if (current_file == NULL) current_file = g_string_sized_new(FILE_LEN); \\\n    BEGIN YY_INIT; \\\n}\n\n#ifdef _DEBUG_PARSING\n#define DEBUG_LEX   printf\n#else\n/* do nothing */\nstatic void DEBUG_LEX( char * format, ... ) { return ; }\n#endif\n\n\n/* These functions are defined by bison/yacc but not used, which\n * causes a compiler warning. */\nstatic void yyunput(int c, char *buf_ptr) __attribute__((unused));\nstatic int input(void) __attribute__((unused));\n\n\n%}\n\n\nSPACE        [ \\t\\r\\f]\nNL           [\\n]\nVAL_CHAR     [^ \\t\\r\\n\\f\"'#(),;=<>!{}]\nCOMMENT  #.*$\nCOMMENT2 \\/\\/.*$\n/* lettre posant probleme dans une chaine */\nSTRING_CHAR       [^\\n]\n/* comment est compose un identifiant */\nLETTER          [a-zA-Z_.]\nIDENTIFIER_CHAR    [a-zA-Z0-9_.\\-]\n\n/* INCLUDE state is used for picking the name of the include file */\n%START  YY_INIT INBLOC STRING1 STRING2 ESC1 INCLUDE INCL_STRING INCL_ESC\n\n%%\n\n<YY_INIT>\"%include\" {/* Start reading name of included file */\n                        DEBUG_LEX(\"INCLUDE\\n\");\n                        BEGIN INCLUDE;\n                        include_prev_state = YY_INIT;\n                        /* not a token, return nothing */\n                     }\n\n<INBLOC>\"%include\" {/* Start reading name of included file */\n                        DEBUG_LEX(\"INCLUDE\\n\");\n                        BEGIN INCLUDE;\n                        include_prev_state = INBLOC;\n                        /* not a token, return nothing */\n                    }\n\n<INCLUDE>\"\\\"\"       { /* start include file name */\n                      BEGIN INCL_STRING;\n                      DEBUG_LEX(\"file:<\");\n                      YY_BUFFER_RESET();\n                    }\n\n<INCL_STRING>\\\\     {BEGIN INCL_ESC;}\n\n<INCL_STRING>\"\\\"\"   { /* include file read */\n                        unsigned int i;\n                        GString *new_file_path;\n                        DEBUG_LEX(\">\");\n\n                        if ( include_stack_index >= MAX_INCLUDE_DEPTH )\n                        {\n                           /* error */\n                           snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: includes nested too deeply\",current_file->str, yylineno);\n                           set_error(err_str);\n                           return _ERROR_;\n                        }\n\n                        /* replace environment variables */\n                        if (YY_PARSED_STRING->str[0] == '$')\n                        {\n                            /* included file is an environment variable */\n                            char *val = getenv(YY_PARSED_STRING->str + 1); /* skip '$' */\n                            if (val == NULL)\n                            {\n                               snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: environment variable '%s' is not set\",\n                                        current_file->str, yylineno, YY_PARSED_STRING->str + 1);\n                               set_error(err_str);\n                               return _ERROR_;\n                            }\n                            else if (strlen(val) >= MAXSTRLEN)\n                            {\n                               snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: \"\n                                        \"file name too long in include statement\",\n                                        current_file->str, yylineno);\n                               set_error(err_str);\n                               return _ERROR_;\n                            }\n                            g_string_assign(YY_PARSED_STRING, val);\n                        }\n\n                        include_stack[include_stack_index] = YY_CURRENT_BUFFER;\n                        lines_stack[include_stack_index] = yylineno;\n                        g_string_assign(files_stack[include_stack_index], current_file->str);\n\n                        /* relative path management */\n                        new_file_path = g_string_sized_new(FILE_LEN);\n\n                        /* 1) if the new path is absolute, nothing to do\n                         * 2) if there was no '/' in previous dir, the new path\n                         *  is relative to the current dir.\n                         */\n                        if ((YY_PARSED_STRING->str[0] == '/')\n                            || (strchr(current_file->str, '/') == NULL))\n                        {\n                            g_string_assign(new_file_path, YY_PARSED_STRING->str);\n                        }\n                        else\n                        {\n                            /* in any other case, path is relative to the current config file\n                             * directory */\n                            GString *tmp_buf;\n                            char *path;\n\n                            tmp_buf = g_string_new(current_file->str);\n\n                            path = dirname(tmp_buf->str);\n\n                            g_string_printf(new_file_path, \"%s/%s\", path, YY_PARSED_STRING->str);\n                            g_string_free(tmp_buf, TRUE);\n                        }\n\n                        /* loop detection */\n\n                        for ( i = 0; i <= include_stack_index; i++ )\n                        {\n                            if (!strcmp(files_stack[i]->str, new_file_path->str))\n                            {\n                               snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: include loop detected: \\\"%s\\\" already parsed\",\n                                        current_file->str, yylineno, new_file_path->str);\n                               set_error(err_str);\n                               g_string_free(new_file_path, TRUE);\n                               return _ERROR_;\n                            }\n                        }\n\n                        include_stack_index ++;\n\n                        yyin = fopen(new_file_path->str, \"r\");\n\n                        if ( yyin == NULL )\n                        {\n                           /* error */\n                           snprintf(err_str, ERRLEN, \"in \\\"%s\\\", line %d: error %d opening file \\\"%s\\\": %s\",\n                                    current_file->str, yylineno,\n                                    errno, new_file_path->str, strerror(errno));\n                           set_error(err_str);\n                           g_string_free(new_file_path, TRUE);\n                           return _ERROR_;\n                        }\n\n                        yylineno = 1;\n                        g_string_assign(current_file, new_file_path->str);\n                        g_string_free(new_file_path, TRUE);\n\n                        /* change current buffer */\n                        yy_switch_to_buffer( yy_create_buffer( yyin, YY_BUF_SIZE ) );\n\n                        /* next state depends on the state before %include is met */\n                        BEGIN include_prev_state;\n                    }\n\n\n<INCL_STRING>\\n      {\n                            snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: missing closing quote.\",current_file->str, yylineno);\n                            set_error(err_str);\n                            yylineno++;\n                            return _ERROR_;\n                     }\n\n<INCL_STRING>.      {YY_BUFFER_APPEND(yytext); DEBUG_LEX(\"%c\",*yytext);/* caractere du fichier */}\n\n<INCL_ESC>\\n        {BEGIN INCL_STRING; yylineno++;}/* ignore un saut de ligne echappe*/\n<INCL_ESC>.         {DEBUG_LEX(\"%c\",*yytext);YY_BUFFER_APPEND(yytext);BEGIN INCL_STRING;/* caractere du fichier */}\n\n\n<<EOF>> { /* end of included file */\n            DEBUG_LEX(\"<EOF>\\n\");\n\n            include_stack_index --;\n\n            if ( include_stack_index < 0 )\n            {\n                /* eof of all streams */\n                yyterminate();\n            }\n            else\n            {\n                fclose(yyin);\n                /*go down into stack */\n                yy_delete_buffer( YY_CURRENT_BUFFER );\n\n                yylineno = lines_stack[include_stack_index];\n                g_string_assign(current_file, files_stack[include_stack_index]->str);\n\n                yy_switch_to_buffer( include_stack[include_stack_index] );\n            }\n        }\n\n\n\n<YY_INIT>{LETTER}({IDENTIFIER_CHAR})* {\n                    /* identifier */\n                    DEBUG_LEX(\"[bloc:%s]\\n\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return IDENTIFIER;\n                 }\n\n\n<YY_INIT>\"{\"        {/* debut de bloc */\n                        DEBUG_LEX(\"BEGIN_BLOCK\\n\");\n                        BEGIN INBLOC;\n                        accolades++;\n                        return BEGIN_BLOCK;\n                 }\n\n<INBLOC>\"(\" {\n                    DEBUG_LEX(\"(\");\n                    parenthesis++;\n                    return BEGIN_PARENTHESIS;\n             }\n\n<INBLOC>\",\"     {DEBUG_LEX(\",  \"); return VALUE_SEPARATOR;}\n<INBLOC>\")\"    {BEGIN INBLOC;  DEBUG_LEX(\")\\n\");\n                if ( parenthesis <= 0 )\n                    {\n                       /* error */\n                       snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' too much closing parenthesis\",current_file->str,yylineno,*yytext);\n                       set_error(err_str);\n                       return _ERROR_;\n                    }\n                    else\n                        parenthesis --;\n\n                    return END_PARENTHESIS;\n                }\n\n<INBLOC>\"not\"  { DEBUG_LEX(\" NOT \"); return NOT; }\n<INBLOC>\"and\"  { DEBUG_LEX(\" AND \"); return AND; }\n<INBLOC>\"or\"  { DEBUG_LEX(\" OR \"); return OR; }\n\n<INBLOC>\"union\"  { DEBUG_LEX(\" UNION \"); return UNION; }\n<INBLOC>\"inter\"  { DEBUG_LEX(\" INTER \"); return INTER; }\n\n<INBLOC>\"$\"{LETTER}({IDENTIFIER_CHAR})* {\n                    /* environment variable */\n                    DEBUG_LEX(\"[VAR:%s]\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return ENV_VAR;\n                }\n\n<INBLOC>{LETTER}({IDENTIFIER_CHAR})* {\n                    /* identifier */\n                    DEBUG_LEX(\"[%s]\",yytext);\n                    rh_strncpy(yylval.str_val,yytext,MAXSTRLEN);\n                    return IDENTIFIER;\n                }\n\n\n<INBLOC>\"}\"     {   /* end of block */\n                    if ( accolades <= 0 )\n                    {\n                       /* error */\n                       snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' closing bracket outside a block\",current_file->str,yylineno,*yytext);\n                       set_error(err_str);\n                       return _ERROR_;\n                    }\n                    else\n                        accolades --;\n\n                    if ( accolades == 0 )\n                    {\n                        DEBUG_LEX(\"END_BLOCK\\n\");\n                        BEGIN YY_INIT;\n                        return END_BLOCK;\n                    }\n                    else\n                    {\n                        DEBUG_LEX(\"END_SUB_BLOCK\\n\");\n                        BEGIN INBLOC;\n                        return END_SUB_BLOCK;\n                    }\n\n                }\n\n<INBLOC>\"==\"  { DEBUG_LEX(\" EQUAL \"); return EQUAL; }\n<INBLOC>\">\"  { DEBUG_LEX(\" SUP \"); return GT; }\n<INBLOC>\">=\"  { DEBUG_LEX(\" SUP_OR_EQUAL \"); return GT_EQ; }\n<INBLOC>\"<\"  { DEBUG_LEX(\" INF  \"); return LT; }\n<INBLOC>\"<=\"  { DEBUG_LEX(\" INF_OR_EQUAL \"); return LT_EQ; }\n<INBLOC>\"<>\"  { DEBUG_LEX(\" DIFF \"); return DIFF; }\n<INBLOC>\"!=\"  { DEBUG_LEX(\" DIFF \"); return DIFF; }\n<INBLOC>\"=\"  { DEBUG_LEX(\" AFFECT \"); return AFFECT; }\n\n<INBLOC>\"{\"    {\n                                /* sub-block */\n                                DEBUG_LEX(\"\\nBEGIN_SUB_BLOCK\\n\");\n                                BEGIN INBLOC;\n                                accolades++;\n                                return BEGIN_SUB_BLOCK;\n                            }\n\n\n<INBLOC>\"\\\"\"           {BEGIN STRING1;DEBUG_LEX(\"value:<\");YY_BUFFER_RESET();} /* ouverture string 1 */\n<INBLOC>\"'\"            {BEGIN STRING2;DEBUG_LEX(\"value:<\");YY_BUFFER_RESET();} /* ouverture string 2 */\n\n<INBLOC>({VAL_CHAR})+  {/* valeur */DEBUG_LEX(\"[value:%s]\\n\",yytext);rh_strncpy(yylval.str_val,yytext,MAXSTRLEN); return NON_IDENTIFIER_VALUE;}\n\n<INBLOC>\";\"     {DEBUG_LEX(\" end_AFFECT \"); return END_AFFECT; }\n\n\n<STRING1>\\\\     {BEGIN ESC1;}\n<STRING1>\"\\\"\"   {DEBUG_LEX(\">\");rh_strncpy(yylval.str_val,YY_PARSED_STRING->str,MAXSTRLEN);BEGIN INBLOC;/* chaine finie */ return NON_IDENTIFIER_VALUE; }\n<STRING1>\\n      {snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: missing closing quote.\",current_file->str,yylineno); set_error(err_str);yylineno++;return _ERROR_;}\n<STRING1>.      {YY_BUFFER_APPEND(yytext); DEBUG_LEX(\"%c\",*yytext);/* caractere de la chaine */}\n\n<ESC1>\\n        {BEGIN STRING1;yylineno++;}/* ignore un saut de ligne echappe*/\n<ESC1>.         {DEBUG_LEX(\"%c\",*yytext);YY_BUFFER_APPEND(yytext);BEGIN STRING1;/* caractere de la chaine */}\n\n<STRING2>\"'\"    {DEBUG_LEX(\">\");rh_strncpy(yylval.str_val,YY_PARSED_STRING->str,MAXSTRLEN);BEGIN INBLOC ;/* chaine finie */ return NON_IDENTIFIER_VALUE;}\n<STRING2>\\n     {snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: closing quote missing.\",current_file->str,yylineno); set_error(err_str);yylineno++;return _ERROR_;}\n<STRING2>.      {YY_BUFFER_APPEND(yytext);DEBUG_LEX(\"%c\",*yytext);/* caractere de la chaine */}\n\n<INBLOC>{COMMENT}   DEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n<YY_INIT>{COMMENT}  DEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n<INBLOC>{COMMENT2}  DEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n<YY_INIT>{COMMENT2} DEBUG_LEX(\"comment: \\\"%s\\\"\\n\", yytext);/* ignore */\n\n{SPACE}        ;/* ignore */\n{NL}          yylineno++;/* ignore */\n\n. { snprintf(err_str,ERRLEN,\"in \\\"%s\\\", line %d: '%c' unexpected\",current_file->str,yylineno,*yytext); set_error(err_str);return _ERROR_;}\n\n%%\n\nint yywrap(void){\n    return 1;\n}\n\nvoid yyreset(void){\n    YY_FLUSH_BUFFER;\n    YY_USER_INIT;\n}\n\nvoid yy_set_current_file(const char *file)\n{\n    g_string_assign(current_file, file);\n}\n"
  },
  {
    "path": "src/cfg_parsing/conf_yacc.c",
    "content": "/* A Bison parser, made by GNU Bison 3.0.4.  */\n\n/* Bison implementation for Yacc-like parsers in C\n\n   Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.\n\n   This program is free software: you can redistribute it and/or modify\n   it under the terms of the GNU General Public License as published by\n   the Free Software Foundation, either version 3 of the License, or\n   (at your option) any later version.\n\n   This program is distributed in the hope that it will be useful,\n   but WITHOUT ANY WARRANTY; without even the implied warranty of\n   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n   GNU General Public License for more details.\n\n   You should have received a copy of the GNU General Public License\n   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */\n\n/* As a special exception, you may create a larger work that contains\n   part or all of the Bison parser skeleton and distribute that work\n   under terms of your choice, so long as that work isn't itself a\n   parser generator using the skeleton or a modified version thereof\n   as a parser skeleton.  Alternatively, if you modify or redistribute\n   the parser skeleton itself, you may (at your option) remove this\n   special exception, which will cause the skeleton and the resulting\n   Bison output files to be licensed under the GNU General Public\n   License without this special exception.\n\n   This special exception was added by the Free Software Foundation in\n   version 2.2 of Bison.  */\n\n/* C LALR(1) parser skeleton written by Richard Stallman, by\n   simplifying the original so-called \"semantic\" parser.  */\n\n/* All symbols defined below should begin with yy or YY, to avoid\n   infringing on user name space.  This should be done even for local\n   variables, as they might otherwise be expanded by user macros.\n   There are some unavoidable exceptions within include files to\n   define necessary library symbols; they are noted \"INFRINGES ON\n   USER NAME SPACE\" below.  */\n\n/* Identify Bison output.  */\n#define YYBISON 1\n\n/* Bison version.  */\n#define YYBISON_VERSION \"3.0.4\"\n\n/* Skeleton name.  */\n#define YYSKELETON_NAME \"yacc.c\"\n\n/* Pure parsers.  */\n#define YYPURE 0\n\n/* Push parsers.  */\n#define YYPUSH 0\n\n/* Pull parsers.  */\n#define YYPULL 1\n\n\n\n\n/* Copy the first part of user declarations.  */\n#line 1 \"conf_yacc.y\" /* yacc.c:339  */\n\n\n/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n#include \"config.h\"\n#include \"analyze.h\"\n\n#include <stdio.h>\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\n    int yylex(void);\n    void yyerror(const char *);\n    extern int yylineno;\n    extern char * yytext;\n\n    list_items * program_result=NULL;\n\n\t/* stock le message d'erreur donne par le lexer */\n    char local_errormsg[1024]=\"\";\n\n    /* stock le message d'erreur complet */\n    char extern_errormsg[1024]=\"\";\n\n#ifdef _DEBUG_PARSING\n#define DEBUG_YACC   rh_config_print_list\n#else\n/* do nothing */\nstatic void DEBUG_YACC( FILE * output, list_items * list ) {return ;}\n#endif\n\n\n\n#line 115 \"conf_yacc.c\" /* yacc.c:339  */\n\n# ifndef YY_NULLPTR\n#  if defined __cplusplus && 201103L <= __cplusplus\n#   define YY_NULLPTR nullptr\n#  else\n#   define YY_NULLPTR 0\n#  endif\n# endif\n\n/* Enabling verbose error messages.  */\n#ifdef YYERROR_VERBOSE\n# undef YYERROR_VERBOSE\n# define YYERROR_VERBOSE 1\n#else\n# define YYERROR_VERBOSE 1\n#endif\n\n/* In a future release of Bison, this section will be replaced\n   by #include \"y.tab.h\".  */\n#ifndef YY_YY_CONF_YACC_H_INCLUDED\n# define YY_YY_CONF_YACC_H_INCLUDED\n/* Debug traces.  */\n#ifndef YYDEBUG\n# define YYDEBUG 0\n#endif\n#if YYDEBUG\nextern int yydebug;\n#endif\n\n/* Token type.  */\n#ifndef YYTOKENTYPE\n# define YYTOKENTYPE\n  enum yytokentype\n  {\n    _ERROR_ = 258,\n    BEGIN_BLOCK = 259,\n    END_BLOCK = 260,\n    END_AFFECT = 261,\n    BEGIN_SUB_BLOCK = 262,\n    END_SUB_BLOCK = 263,\n    BEGIN_PARENTHESIS = 264,\n    END_PARENTHESIS = 265,\n    VALUE_SEPARATOR = 266,\n    AFFECT = 267,\n    EQUAL = 268,\n    DIFF = 269,\n    GT = 270,\n    GT_EQ = 271,\n    LT = 272,\n    LT_EQ = 273,\n    AND = 274,\n    OR = 275,\n    NOT = 276,\n    UNION = 277,\n    INTER = 278,\n    IDENTIFIER = 279,\n    NON_IDENTIFIER_VALUE = 280,\n    ENV_VAR = 281\n  };\n#endif\n/* Tokens.  */\n#define _ERROR_ 258\n#define BEGIN_BLOCK 259\n#define END_BLOCK 260\n#define END_AFFECT 261\n#define BEGIN_SUB_BLOCK 262\n#define END_SUB_BLOCK 263\n#define BEGIN_PARENTHESIS 264\n#define END_PARENTHESIS 265\n#define VALUE_SEPARATOR 266\n#define AFFECT 267\n#define EQUAL 268\n#define DIFF 269\n#define GT 270\n#define GT_EQ 271\n#define LT 272\n#define LT_EQ 273\n#define AND 274\n#define OR 275\n#define NOT 276\n#define UNION 277\n#define INTER 278\n#define IDENTIFIER 279\n#define NON_IDENTIFIER_VALUE 280\n#define ENV_VAR 281\n\n/* Value type.  */\n#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED\n\nunion YYSTYPE\n{\n#line 52 \"conf_yacc.y\" /* yacc.c:355  */\n\n    char         str_val[MAXSTRLEN];\n    list_items              *  list;\n    generic_item            *  item;\n    arg_list_t\t            *  arg_list;\n\n#line 214 \"conf_yacc.c\" /* yacc.c:355  */\n};\n\ntypedef union YYSTYPE YYSTYPE;\n# define YYSTYPE_IS_TRIVIAL 1\n# define YYSTYPE_IS_DECLARED 1\n#endif\n\n\nextern YYSTYPE yylval;\n\nint yyparse (void);\n\n#endif /* !YY_YY_CONF_YACC_H_INCLUDED  */\n\n/* Copy the second part of user declarations.  */\n\n#line 231 \"conf_yacc.c\" /* yacc.c:358  */\n\n#ifdef short\n# undef short\n#endif\n\n#ifdef YYTYPE_UINT8\ntypedef YYTYPE_UINT8 yytype_uint8;\n#else\ntypedef unsigned char yytype_uint8;\n#endif\n\n#ifdef YYTYPE_INT8\ntypedef YYTYPE_INT8 yytype_int8;\n#else\ntypedef signed char yytype_int8;\n#endif\n\n#ifdef YYTYPE_UINT16\ntypedef YYTYPE_UINT16 yytype_uint16;\n#else\ntypedef unsigned short int yytype_uint16;\n#endif\n\n#ifdef YYTYPE_INT16\ntypedef YYTYPE_INT16 yytype_int16;\n#else\ntypedef short int yytype_int16;\n#endif\n\n#ifndef YYSIZE_T\n# ifdef __SIZE_TYPE__\n#  define YYSIZE_T __SIZE_TYPE__\n# elif defined size_t\n#  define YYSIZE_T size_t\n# elif ! defined YYSIZE_T\n#  include <stddef.h> /* INFRINGES ON USER NAME SPACE */\n#  define YYSIZE_T size_t\n# else\n#  define YYSIZE_T unsigned int\n# endif\n#endif\n\n#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)\n\n#ifndef YY_\n# if defined YYENABLE_NLS && YYENABLE_NLS\n#  if ENABLE_NLS\n#   include <libintl.h> /* INFRINGES ON USER NAME SPACE */\n#   define YY_(Msgid) dgettext (\"bison-runtime\", Msgid)\n#  endif\n# endif\n# ifndef YY_\n#  define YY_(Msgid) Msgid\n# endif\n#endif\n\n#ifndef YY_ATTRIBUTE\n# if (defined __GNUC__                                               \\\n      && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__)))  \\\n     || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C\n#  define YY_ATTRIBUTE(Spec) __attribute__(Spec)\n# else\n#  define YY_ATTRIBUTE(Spec) /* empty */\n# endif\n#endif\n\n#ifndef YY_ATTRIBUTE_PURE\n# define YY_ATTRIBUTE_PURE   YY_ATTRIBUTE ((__pure__))\n#endif\n\n#ifndef YY_ATTRIBUTE_UNUSED\n# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))\n#endif\n\n#if !defined _Noreturn \\\n     && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)\n# if defined _MSC_VER && 1200 <= _MSC_VER\n#  define _Noreturn __declspec (noreturn)\n# else\n#  define _Noreturn YY_ATTRIBUTE ((__noreturn__))\n# endif\n#endif\n\n/* Suppress unused-variable warnings by \"using\" E.  */\n#if ! defined lint || defined __GNUC__\n# define YYUSE(E) ((void) (E))\n#else\n# define YYUSE(E) /* empty */\n#endif\n\n#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__\n/* Suppress an incorrect diagnostic about yylval being uninitialized.  */\n# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \\\n    _Pragma (\"GCC diagnostic push\") \\\n    _Pragma (\"GCC diagnostic ignored \\\"-Wuninitialized\\\"\")\\\n    _Pragma (\"GCC diagnostic ignored \\\"-Wmaybe-uninitialized\\\"\")\n# define YY_IGNORE_MAYBE_UNINITIALIZED_END \\\n    _Pragma (\"GCC diagnostic pop\")\n#else\n# define YY_INITIAL_VALUE(Value) Value\n#endif\n#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN\n# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN\n# define YY_IGNORE_MAYBE_UNINITIALIZED_END\n#endif\n#ifndef YY_INITIAL_VALUE\n# define YY_INITIAL_VALUE(Value) /* Nothing. */\n#endif\n\n\n#if ! defined yyoverflow || YYERROR_VERBOSE\n\n/* The parser invokes alloca or malloc; define the necessary symbols.  */\n\n# ifdef YYSTACK_USE_ALLOCA\n#  if YYSTACK_USE_ALLOCA\n#   ifdef __GNUC__\n#    define YYSTACK_ALLOC __builtin_alloca\n#   elif defined __BUILTIN_VA_ARG_INCR\n#    include <alloca.h> /* INFRINGES ON USER NAME SPACE */\n#   elif defined _AIX\n#    define YYSTACK_ALLOC __alloca\n#   elif defined _MSC_VER\n#    include <malloc.h> /* INFRINGES ON USER NAME SPACE */\n#    define alloca _alloca\n#   else\n#    define YYSTACK_ALLOC alloca\n#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS\n#     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */\n      /* Use EXIT_SUCCESS as a witness for stdlib.h.  */\n#     ifndef EXIT_SUCCESS\n#      define EXIT_SUCCESS 0\n#     endif\n#    endif\n#   endif\n#  endif\n# endif\n\n# ifdef YYSTACK_ALLOC\n   /* Pacify GCC's 'empty if-body' warning.  */\n#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)\n#  ifndef YYSTACK_ALLOC_MAXIMUM\n    /* The OS might guarantee only one guard page at the bottom of the stack,\n       and a page size can be as small as 4096 bytes.  So we cannot safely\n       invoke alloca (N) if N exceeds 4096.  Use a slightly smaller number\n       to allow for a few compiler-allocated temporary stack slots.  */\n#   define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */\n#  endif\n# else\n#  define YYSTACK_ALLOC YYMALLOC\n#  define YYSTACK_FREE YYFREE\n#  ifndef YYSTACK_ALLOC_MAXIMUM\n#   define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM\n#  endif\n#  if (defined __cplusplus && ! defined EXIT_SUCCESS \\\n       && ! ((defined YYMALLOC || defined malloc) \\\n             && (defined YYFREE || defined free)))\n#   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */\n#   ifndef EXIT_SUCCESS\n#    define EXIT_SUCCESS 0\n#   endif\n#  endif\n#  ifndef YYMALLOC\n#   define YYMALLOC malloc\n#   if ! defined malloc && ! defined EXIT_SUCCESS\nvoid *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */\n#   endif\n#  endif\n#  ifndef YYFREE\n#   define YYFREE free\n#   if ! defined free && ! defined EXIT_SUCCESS\nvoid free (void *); /* INFRINGES ON USER NAME SPACE */\n#   endif\n#  endif\n# endif\n#endif /* ! defined yyoverflow || YYERROR_VERBOSE */\n\n\n#if (! defined yyoverflow \\\n     && (! defined __cplusplus \\\n         || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))\n\n/* A type that is properly aligned for any stack member.  */\nunion yyalloc\n{\n  yytype_int16 yyss_alloc;\n  YYSTYPE yyvs_alloc;\n};\n\n/* The size of the maximum gap between one aligned stack and the next.  */\n# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)\n\n/* The size of an array large to enough to hold all stacks, each with\n   N elements.  */\n# define YYSTACK_BYTES(N) \\\n     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \\\n      + YYSTACK_GAP_MAXIMUM)\n\n# define YYCOPY_NEEDED 1\n\n/* Relocate STACK from its old location to the new one.  The\n   local variables YYSIZE and YYSTACKSIZE give the old and new number of\n   elements in the stack, and YYPTR gives the new location of the\n   stack.  Advance YYPTR to a properly aligned location for the next\n   stack.  */\n# define YYSTACK_RELOCATE(Stack_alloc, Stack)                           \\\n    do                                                                  \\\n      {                                                                 \\\n        YYSIZE_T yynewbytes;                                            \\\n        YYCOPY (&yyptr->Stack_alloc, Stack, yysize);                    \\\n        Stack = &yyptr->Stack_alloc;                                    \\\n        yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \\\n        yyptr += yynewbytes / sizeof (*yyptr);                          \\\n      }                                                                 \\\n    while (0)\n\n#endif\n\n#if defined YYCOPY_NEEDED && YYCOPY_NEEDED\n/* Copy COUNT objects from SRC to DST.  The source and destination do\n   not overlap.  */\n# ifndef YYCOPY\n#  if defined __GNUC__ && 1 < __GNUC__\n#   define YYCOPY(Dst, Src, Count) \\\n      __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))\n#  else\n#   define YYCOPY(Dst, Src, Count)              \\\n      do                                        \\\n        {                                       \\\n          YYSIZE_T yyi;                         \\\n          for (yyi = 0; yyi < (Count); yyi++)   \\\n            (Dst)[yyi] = (Src)[yyi];            \\\n        }                                       \\\n      while (0)\n#  endif\n# endif\n#endif /* !YYCOPY_NEEDED */\n\n/* YYFINAL -- State number of the termination state.  */\n#define YYFINAL  7\n/* YYLAST -- Last index in YYTABLE.  */\n#define YYLAST   112\n\n/* YYNTOKENS -- Number of terminals.  */\n#define YYNTOKENS  27\n/* YYNNTS -- Number of nonterminals.  */\n#define YYNNTS  15\n/* YYNRULES -- Number of rules.  */\n#define YYNRULES  43\n/* YYNSTATES -- Number of states.  */\n#define YYNSTATES  95\n\n/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned\n   by yylex, with out-of-bounds checking.  */\n#define YYUNDEFTOK  2\n#define YYMAXUTOK   281\n\n#define YYTRANSLATE(YYX)                                                \\\n  ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)\n\n/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM\n   as returned by yylex, without out-of-bounds checking.  */\nstatic const yytype_uint8 yytranslate[] =\n{\n       0,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,\n       2,     2,     2,     2,     2,     2,     1,     2,     3,     4,\n       5,     6,     7,     8,     9,    10,    11,    12,    13,    14,\n      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,\n      25,    26\n};\n\n#if YYDEBUG\n  /* YYRLINE[YYN] -- Source line where rule number YYN was defined.  */\nstatic const yytype_uint8 yyrline[] =\n{\n       0,   101,   101,   105,   106,   110,   111,   115,   116,   120,\n     121,   125,   126,   127,   131,   134,   135,   139,   140,   141,\n     142,   143,   144,   148,   149,   153,   154,   155,   159,   160,\n     161,   162,   163,   164,   168,   169,   170,   171,   172,   176,\n     177,   178,   179,   180\n};\n#endif\n\n#if YYDEBUG || YYERROR_VERBOSE || 1\n/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.\n   First, the terminals, then, starting at YYNTOKENS, nonterminals.  */\nstatic const char *const yytname[] =\n{\n  \"$end\", \"error\", \"$undefined\", \"_ERROR_\", \"BEGIN_BLOCK\", \"END_BLOCK\",\n  \"END_AFFECT\", \"BEGIN_SUB_BLOCK\", \"END_SUB_BLOCK\", \"BEGIN_PARENTHESIS\",\n  \"END_PARENTHESIS\", \"VALUE_SEPARATOR\", \"AFFECT\", \"EQUAL\", \"DIFF\", \"GT\",\n  \"GT_EQ\", \"LT\", \"LT_EQ\", \"AND\", \"OR\", \"NOT\", \"UNION\", \"INTER\",\n  \"IDENTIFIER\", \"NON_IDENTIFIER_VALUE\", \"ENV_VAR\", \"$accept\", \"program\",\n  \"listblock\", \"block\", \"listitems\", \"definition\", \"value\", \"affect\",\n  \"extended_affect\", \"key_value\", \"arglist\", \"extended_key_value\",\n  \"expression\", \"set\", \"subblock\", YY_NULLPTR\n};\n#endif\n\n# ifdef YYPRINT\n/* YYTOKNUM[NUM] -- (External) token number corresponding to the\n   (internal) symbol number NUM (which must be that of a token).  */\nstatic const yytype_uint16 yytoknum[] =\n{\n       0,   256,   257,   258,   259,   260,   261,   262,   263,   264,\n     265,   266,   267,   268,   269,   270,   271,   272,   273,   274,\n     275,   276,   277,   278,   279,   280,   281\n};\n# endif\n\n#define YYPACT_NINF -25\n\n#define yypact_value_is_default(Yystate) \\\n  (!!((Yystate) == (-25)))\n\n#define YYTABLE_NINF -1\n\n#define yytable_value_is_error(Yytable_value) \\\n  0\n\n  /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing\n     STATE-NUM.  */\nstatic const yytype_int8 yypact[] =\n{\n      -3,     1,    24,   -25,    -3,     2,    66,   -25,   -25,    -1,\n      74,     2,     4,   -25,   -25,     2,    -2,   -10,    39,   -25,\n     -25,   -25,   -10,    82,    19,    41,    42,    86,    65,   -25,\n      61,    22,   -25,   -25,   -25,   -25,    43,   -25,     7,   -25,\n      84,    72,    62,    19,    52,   -25,    81,   -10,   -10,   -10,\n     -10,   -10,   -10,   -10,   -25,   -10,   -25,    54,    54,   -25,\n      52,    52,    54,    59,    42,   101,    69,   104,   -10,   -25,\n     -25,    76,    52,   -25,    80,   -25,   -25,   -25,   -25,   -25,\n     -25,    95,    84,    88,    88,    81,    81,    54,   -25,   -25,\n     -25,   -25,   -25,   -25,   -25\n};\n\n  /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.\n     Performed when YYTABLE does not specify something else to do.  Zero\n     means the default is an error.  */\nstatic const yytype_uint8 yydefact[] =\n{\n       4,     0,     0,     2,     4,     8,     0,     1,     3,     0,\n       0,     8,     0,     9,    10,     8,     8,     0,     0,     6,\n       7,    16,     0,     0,     0,     0,    38,     0,    26,    28,\n       0,     0,    11,    12,    13,    14,     8,    24,     0,     5,\n      38,     0,     0,     0,     0,    29,    35,     0,     0,     0,\n       0,     0,     0,     0,    41,     0,    42,     0,     0,    43,\n       0,     0,     0,     0,     0,     0,     0,     0,     0,    31,\n      34,     0,     0,    38,     0,    17,    18,    19,    20,    21,\n      22,     0,     0,    32,    33,    36,    37,     0,    39,    40,\n      15,    23,    30,    27,    25\n};\n\n  /* YYPGOTO[NTERM-NUM].  */\nstatic const yytype_int8 yypgoto[] =\n{\n     -25,   -25,   107,   -25,    -7,   -25,   -15,   -25,   -25,   -25,\n      57,   -24,   -16,   -13,   -25\n};\n\n  /* YYDEFGOTO[NTERM-NUM].  */\nstatic const yytype_int8 yydefgoto[] =\n{\n      -1,     2,     3,     4,    10,    11,    37,    12,    13,    28,\n      38,    29,    41,    42,    14\n};\n\n  /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM.  If\n     positive, shift that token.  If negative, reduce the rule whose\n     number is the opposite.  If YYTABLE_NINF, syntax error.  */\nstatic const yytype_uint8 yytable[] =\n{\n      30,    45,    35,    31,    20,     5,    16,    24,    23,    27,\n      21,    17,    46,    22,    32,    33,    34,    67,    68,    25,\n      66,     1,    26,    18,     7,     6,     9,    71,    24,    65,\n      59,    46,    74,    75,    76,    77,    78,    79,    80,    45,\n      25,    83,    84,    40,    60,    61,    36,    85,    86,    16,\n      43,    47,    62,    91,    17,    48,    49,    50,    51,    52,\n      53,    72,    44,    62,    63,    40,    18,    64,    87,    56,\n      15,    71,    70,    44,    55,    63,    73,    89,    82,    19,\n      57,    58,    69,    82,    60,    61,    92,    39,    57,    58,\n      93,    57,    58,    47,    54,    57,    58,    48,    49,    50,\n      51,    52,    53,    60,    61,    94,    68,    57,    58,    88,\n      90,     8,    81\n};\n\nstatic const yytype_uint8 yycheck[] =\n{\n      16,    25,    17,    16,    11,     4,     7,     9,    15,    16,\n       6,    12,    25,     9,    24,    25,    26,    10,    11,    21,\n      36,    24,    24,    24,     0,    24,    24,    43,     9,    36,\n       8,    44,    47,    48,    49,    50,    51,    52,    53,    63,\n      21,    57,    58,    24,    22,    23,     7,    60,    61,     7,\n       9,     9,     9,    68,    12,    13,    14,    15,    16,    17,\n      18,     9,    21,     9,    21,    24,    24,    24,     9,     8,\n       4,    87,    10,    21,     9,    21,    24,     8,    24,     5,\n      19,    20,    10,    24,    22,    23,    10,     5,    19,    20,\n      10,    19,    20,     9,     8,    19,    20,    13,    14,    15,\n      16,    17,    18,    22,    23,    10,    11,    19,    20,     8,\n       6,     4,    55\n};\n\n  /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing\n     symbol of state STATE-NUM.  */\nstatic const yytype_uint8 yystos[] =\n{\n       0,    24,    28,    29,    30,     4,    24,     0,    29,    24,\n      31,    32,    34,    35,    41,     4,     7,    12,    24,     5,\n      31,     6,     9,    31,     9,    21,    24,    31,    36,    38,\n      39,    40,    24,    25,    26,    33,     7,    33,    37,     5,\n      24,    39,    40,     9,    21,    38,    40,     9,    13,    14,\n      15,    16,    17,    18,     8,     9,     8,    19,    20,     8,\n      22,    23,     9,    21,    24,    31,    39,    10,    11,    10,\n      10,    39,     9,    24,    33,    33,    33,    33,    33,    33,\n      33,    37,    24,    39,    39,    40,    40,     9,     8,     8,\n       6,    33,    10,    10,    10\n};\n\n  /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */\nstatic const yytype_uint8 yyr1[] =\n{\n       0,    27,    28,    29,    29,    30,    30,    31,    31,    32,\n      32,    33,    33,    33,    34,    35,    35,    36,    36,    36,\n      36,    36,    36,    37,    37,    38,    38,    38,    39,    39,\n      39,    39,    39,    39,    40,    40,    40,    40,    40,    41,\n      41,    41,    41,    41\n};\n\n  /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN.  */\nstatic const yytype_uint8 yyr2[] =\n{\n       0,     2,     1,     2,     0,     5,     4,     2,     0,     1,\n       1,     1,     1,     1,     3,     5,     2,     3,     3,     3,\n       3,     3,     3,     3,     1,     4,     1,     4,     1,     2,\n       4,     3,     3,     3,     3,     2,     3,     3,     1,     5,\n       5,     4,     4,     4\n};\n\n\n#define yyerrok         (yyerrstatus = 0)\n#define yyclearin       (yychar = YYEMPTY)\n#define YYEMPTY         (-2)\n#define YYEOF           0\n\n#define YYACCEPT        goto yyacceptlab\n#define YYABORT         goto yyabortlab\n#define YYERROR         goto yyerrorlab\n\n\n#define YYRECOVERING()  (!!yyerrstatus)\n\n#define YYBACKUP(Token, Value)                                  \\\ndo                                                              \\\n  if (yychar == YYEMPTY)                                        \\\n    {                                                           \\\n      yychar = (Token);                                         \\\n      yylval = (Value);                                         \\\n      YYPOPSTACK (yylen);                                       \\\n      yystate = *yyssp;                                         \\\n      goto yybackup;                                            \\\n    }                                                           \\\n  else                                                          \\\n    {                                                           \\\n      yyerror (YY_(\"syntax error: cannot back up\")); \\\n      YYERROR;                                                  \\\n    }                                                           \\\nwhile (0)\n\n/* Error token number */\n#define YYTERROR        1\n#define YYERRCODE       256\n\n\n\n/* Enable debugging if requested.  */\n#if YYDEBUG\n\n# ifndef YYFPRINTF\n#  include <stdio.h> /* INFRINGES ON USER NAME SPACE */\n#  define YYFPRINTF fprintf\n# endif\n\n# define YYDPRINTF(Args)                        \\\ndo {                                            \\\n  if (yydebug)                                  \\\n    YYFPRINTF Args;                             \\\n} while (0)\n\n/* This macro is provided for backward compatibility. */\n#ifndef YY_LOCATION_PRINT\n# define YY_LOCATION_PRINT(File, Loc) ((void) 0)\n#endif\n\n\n# define YY_SYMBOL_PRINT(Title, Type, Value, Location)                    \\\ndo {                                                                      \\\n  if (yydebug)                                                            \\\n    {                                                                     \\\n      YYFPRINTF (stderr, \"%s \", Title);                                   \\\n      yy_symbol_print (stderr,                                            \\\n                  Type, Value); \\\n      YYFPRINTF (stderr, \"\\n\");                                           \\\n    }                                                                     \\\n} while (0)\n\n\n/*----------------------------------------.\n| Print this symbol's value on YYOUTPUT.  |\n`----------------------------------------*/\n\nstatic void\nyy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)\n{\n  FILE *yyo = yyoutput;\n  YYUSE (yyo);\n  if (!yyvaluep)\n    return;\n# ifdef YYPRINT\n  if (yytype < YYNTOKENS)\n    YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);\n# endif\n  YYUSE (yytype);\n}\n\n\n/*--------------------------------.\n| Print this symbol on YYOUTPUT.  |\n`--------------------------------*/\n\nstatic void\nyy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)\n{\n  YYFPRINTF (yyoutput, \"%s %s (\",\n             yytype < YYNTOKENS ? \"token\" : \"nterm\", yytname[yytype]);\n\n  yy_symbol_value_print (yyoutput, yytype, yyvaluep);\n  YYFPRINTF (yyoutput, \")\");\n}\n\n/*------------------------------------------------------------------.\n| yy_stack_print -- Print the state stack from its BOTTOM up to its |\n| TOP (included).                                                   |\n`------------------------------------------------------------------*/\n\nstatic void\nyy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)\n{\n  YYFPRINTF (stderr, \"Stack now\");\n  for (; yybottom <= yytop; yybottom++)\n    {\n      int yybot = *yybottom;\n      YYFPRINTF (stderr, \" %d\", yybot);\n    }\n  YYFPRINTF (stderr, \"\\n\");\n}\n\n# define YY_STACK_PRINT(Bottom, Top)                            \\\ndo {                                                            \\\n  if (yydebug)                                                  \\\n    yy_stack_print ((Bottom), (Top));                           \\\n} while (0)\n\n\n/*------------------------------------------------.\n| Report that the YYRULE is going to be reduced.  |\n`------------------------------------------------*/\n\nstatic void\nyy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)\n{\n  unsigned long int yylno = yyrline[yyrule];\n  int yynrhs = yyr2[yyrule];\n  int yyi;\n  YYFPRINTF (stderr, \"Reducing stack by rule %d (line %lu):\\n\",\n             yyrule - 1, yylno);\n  /* The symbols being reduced.  */\n  for (yyi = 0; yyi < yynrhs; yyi++)\n    {\n      YYFPRINTF (stderr, \"   $%d = \", yyi + 1);\n      yy_symbol_print (stderr,\n                       yystos[yyssp[yyi + 1 - yynrhs]],\n                       &(yyvsp[(yyi + 1) - (yynrhs)])\n                                              );\n      YYFPRINTF (stderr, \"\\n\");\n    }\n}\n\n# define YY_REDUCE_PRINT(Rule)          \\\ndo {                                    \\\n  if (yydebug)                          \\\n    yy_reduce_print (yyssp, yyvsp, Rule); \\\n} while (0)\n\n/* Nonzero means print parse trace.  It is left uninitialized so that\n   multiple parsers can coexist.  */\nint yydebug;\n#else /* !YYDEBUG */\n# define YYDPRINTF(Args)\n# define YY_SYMBOL_PRINT(Title, Type, Value, Location)\n# define YY_STACK_PRINT(Bottom, Top)\n# define YY_REDUCE_PRINT(Rule)\n#endif /* !YYDEBUG */\n\n\n/* YYINITDEPTH -- initial size of the parser's stacks.  */\n#ifndef YYINITDEPTH\n# define YYINITDEPTH 200\n#endif\n\n/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only\n   if the built-in stack extension method is used).\n\n   Do not make this value too large; the results are undefined if\n   YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)\n   evaluated with infinite-precision integer arithmetic.  */\n\n#ifndef YYMAXDEPTH\n# define YYMAXDEPTH 10000\n#endif\n\n\n#if YYERROR_VERBOSE\n\n# ifndef yystrlen\n#  if defined __GLIBC__ && defined _STRING_H\n#   define yystrlen strlen\n#  else\n/* Return the length of YYSTR.  */\nstatic YYSIZE_T\nyystrlen (const char *yystr)\n{\n  YYSIZE_T yylen;\n  for (yylen = 0; yystr[yylen]; yylen++)\n    continue;\n  return yylen;\n}\n#  endif\n# endif\n\n# ifndef yystpcpy\n#  if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE\n#   define yystpcpy stpcpy\n#  else\n/* Copy YYSRC to YYDEST, returning the address of the terminating '\\0' in\n   YYDEST.  */\nstatic char *\nyystpcpy (char *yydest, const char *yysrc)\n{\n  char *yyd = yydest;\n  const char *yys = yysrc;\n\n  while ((*yyd++ = *yys++) != '\\0')\n    continue;\n\n  return yyd - 1;\n}\n#  endif\n# endif\n\n# ifndef yytnamerr\n/* Copy to YYRES the contents of YYSTR after stripping away unnecessary\n   quotes and backslashes, so that it's suitable for yyerror.  The\n   heuristic is that double-quoting is unnecessary unless the string\n   contains an apostrophe, a comma, or backslash (other than\n   backslash-backslash).  YYSTR is taken from yytname.  If YYRES is\n   null, do not copy; instead, return the length of what the result\n   would have been.  */\nstatic YYSIZE_T\nyytnamerr (char *yyres, const char *yystr)\n{\n  if (*yystr == '\"')\n    {\n      YYSIZE_T yyn = 0;\n      char const *yyp = yystr;\n\n      for (;;)\n        switch (*++yyp)\n          {\n          case '\\'':\n          case ',':\n            goto do_not_strip_quotes;\n\n          case '\\\\':\n            if (*++yyp != '\\\\')\n              goto do_not_strip_quotes;\n            /* Fall through.  */\n          default:\n            if (yyres)\n              yyres[yyn] = *yyp;\n            yyn++;\n            break;\n\n          case '\"':\n            if (yyres)\n              yyres[yyn] = '\\0';\n            return yyn;\n          }\n    do_not_strip_quotes: ;\n    }\n\n  if (! yyres)\n    return yystrlen (yystr);\n\n  return yystpcpy (yyres, yystr) - yyres;\n}\n# endif\n\n/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message\n   about the unexpected token YYTOKEN for the state stack whose top is\n   YYSSP.\n\n   Return 0 if *YYMSG was successfully written.  Return 1 if *YYMSG is\n   not large enough to hold the message.  In that case, also set\n   *YYMSG_ALLOC to the required number of bytes.  Return 2 if the\n   required number of bytes is too large to store.  */\nstatic int\nyysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,\n                yytype_int16 *yyssp, int yytoken)\n{\n  YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);\n  YYSIZE_T yysize = yysize0;\n  enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };\n  /* Internationalized format string. */\n  const char *yyformat = YY_NULLPTR;\n  /* Arguments of yyformat. */\n  char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];\n  /* Number of reported tokens (one for the \"unexpected\", one per\n     \"expected\"). */\n  int yycount = 0;\n\n  /* There are many possibilities here to consider:\n     - If this state is a consistent state with a default action, then\n       the only way this function was invoked is if the default action\n       is an error action.  In that case, don't check for expected\n       tokens because there are none.\n     - The only way there can be no lookahead present (in yychar) is if\n       this state is a consistent state with a default action.  Thus,\n       detecting the absence of a lookahead is sufficient to determine\n       that there is no unexpected or expected token to report.  In that\n       case, just report a simple \"syntax error\".\n     - Don't assume there isn't a lookahead just because this state is a\n       consistent state with a default action.  There might have been a\n       previous inconsistent state, consistent state with a non-default\n       action, or user semantic action that manipulated yychar.\n     - Of course, the expected token list depends on states to have\n       correct lookahead information, and it depends on the parser not\n       to perform extra reductions after fetching a lookahead from the\n       scanner and before detecting a syntax error.  Thus, state merging\n       (from LALR or IELR) and default reductions corrupt the expected\n       token list.  However, the list is correct for canonical LR with\n       one exception: it will still contain any token that will not be\n       accepted due to an error action in a later state.\n  */\n  if (yytoken != YYEMPTY)\n    {\n      int yyn = yypact[*yyssp];\n      yyarg[yycount++] = yytname[yytoken];\n      if (!yypact_value_is_default (yyn))\n        {\n          /* Start YYX at -YYN if negative to avoid negative indexes in\n             YYCHECK.  In other words, skip the first -YYN actions for\n             this state because they are default actions.  */\n          int yyxbegin = yyn < 0 ? -yyn : 0;\n          /* Stay within bounds of both yycheck and yytname.  */\n          int yychecklim = YYLAST - yyn + 1;\n          int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;\n          int yyx;\n\n          for (yyx = yyxbegin; yyx < yyxend; ++yyx)\n            if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR\n                && !yytable_value_is_error (yytable[yyx + yyn]))\n              {\n                if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)\n                  {\n                    yycount = 1;\n                    yysize = yysize0;\n                    break;\n                  }\n                yyarg[yycount++] = yytname[yyx];\n                {\n                  YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);\n                  if (! (yysize <= yysize1\n                         && yysize1 <= YYSTACK_ALLOC_MAXIMUM))\n                    return 2;\n                  yysize = yysize1;\n                }\n              }\n        }\n    }\n\n  switch (yycount)\n    {\n# define YYCASE_(N, S)                      \\\n      case N:                               \\\n        yyformat = S;                       \\\n      break\n      YYCASE_(0, YY_(\"syntax error\"));\n      YYCASE_(1, YY_(\"syntax error, unexpected %s\"));\n      YYCASE_(2, YY_(\"syntax error, unexpected %s, expecting %s\"));\n      YYCASE_(3, YY_(\"syntax error, unexpected %s, expecting %s or %s\"));\n      YYCASE_(4, YY_(\"syntax error, unexpected %s, expecting %s or %s or %s\"));\n      YYCASE_(5, YY_(\"syntax error, unexpected %s, expecting %s or %s or %s or %s\"));\n# undef YYCASE_\n    }\n\n  {\n    YYSIZE_T yysize1 = yysize + yystrlen (yyformat);\n    if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))\n      return 2;\n    yysize = yysize1;\n  }\n\n  if (*yymsg_alloc < yysize)\n    {\n      *yymsg_alloc = 2 * yysize;\n      if (! (yysize <= *yymsg_alloc\n             && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))\n        *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;\n      return 1;\n    }\n\n  /* Avoid sprintf, as that infringes on the user's name space.\n     Don't have undefined behavior even if the translation\n     produced a string with the wrong number of \"%s\"s.  */\n  {\n    char *yyp = *yymsg;\n    int yyi = 0;\n    while ((*yyp = *yyformat) != '\\0')\n      if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)\n        {\n          yyp += yytnamerr (yyp, yyarg[yyi++]);\n          yyformat += 2;\n        }\n      else\n        {\n          yyp++;\n          yyformat++;\n        }\n  }\n  return 0;\n}\n#endif /* YYERROR_VERBOSE */\n\n/*-----------------------------------------------.\n| Release the memory associated to this symbol.  |\n`-----------------------------------------------*/\n\nstatic void\nyydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)\n{\n  YYUSE (yyvaluep);\n  if (!yymsg)\n    yymsg = \"Deleting\";\n  YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);\n\n  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN\n  YYUSE (yytype);\n  YY_IGNORE_MAYBE_UNINITIALIZED_END\n}\n\n\n\n\n/* The lookahead symbol.  */\nint yychar;\n\n/* The semantic value of the lookahead symbol.  */\nYYSTYPE yylval;\n/* Number of syntax errors so far.  */\nint yynerrs;\n\n\n/*----------.\n| yyparse.  |\n`----------*/\n\nint\nyyparse (void)\n{\n    int yystate;\n    /* Number of tokens to shift before error messages enabled.  */\n    int yyerrstatus;\n\n    /* The stacks and their tools:\n       'yyss': related to states.\n       'yyvs': related to semantic values.\n\n       Refer to the stacks through separate pointers, to allow yyoverflow\n       to reallocate them elsewhere.  */\n\n    /* The state stack.  */\n    yytype_int16 yyssa[YYINITDEPTH];\n    yytype_int16 *yyss;\n    yytype_int16 *yyssp;\n\n    /* The semantic value stack.  */\n    YYSTYPE yyvsa[YYINITDEPTH];\n    YYSTYPE *yyvs;\n    YYSTYPE *yyvsp;\n\n    YYSIZE_T yystacksize;\n\n  int yyn;\n  int yyresult;\n  /* Lookahead token as an internal (translated) token number.  */\n  int yytoken = 0;\n  /* The variables used to return semantic value and location from the\n     action routines.  */\n  YYSTYPE yyval;\n\n#if YYERROR_VERBOSE\n  /* Buffer for error messages, and its allocated size.  */\n  char yymsgbuf[128];\n  char *yymsg = yymsgbuf;\n  YYSIZE_T yymsg_alloc = sizeof yymsgbuf;\n#endif\n\n#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N))\n\n  /* The number of symbols on the RHS of the reduced rule.\n     Keep to zero when no symbol should be popped.  */\n  int yylen = 0;\n\n  yyssp = yyss = yyssa;\n  yyvsp = yyvs = yyvsa;\n  yystacksize = YYINITDEPTH;\n\n  YYDPRINTF ((stderr, \"Starting parse\\n\"));\n\n  yystate = 0;\n  yyerrstatus = 0;\n  yynerrs = 0;\n  yychar = YYEMPTY; /* Cause a token to be read.  */\n  goto yysetstate;\n\n/*------------------------------------------------------------.\n| yynewstate -- Push a new state, which is found in yystate.  |\n`------------------------------------------------------------*/\n yynewstate:\n  /* In all cases, when you get here, the value and location stacks\n     have just been pushed.  So pushing a state here evens the stacks.  */\n  yyssp++;\n\n yysetstate:\n  *yyssp = yystate;\n\n  if (yyss + yystacksize - 1 <= yyssp)\n    {\n      /* Get the current used size of the three stacks, in elements.  */\n      YYSIZE_T yysize = yyssp - yyss + 1;\n\n#ifdef yyoverflow\n      {\n        /* Give user a chance to reallocate the stack.  Use copies of\n           these so that the &'s don't force the real ones into\n           memory.  */\n        YYSTYPE *yyvs1 = yyvs;\n        yytype_int16 *yyss1 = yyss;\n\n        /* Each stack pointer address is followed by the size of the\n           data in use in that stack, in bytes.  This used to be a\n           conditional around just the two extra args, but that might\n           be undefined if yyoverflow is a macro.  */\n        yyoverflow (YY_(\"memory exhausted\"),\n                    &yyss1, yysize * sizeof (*yyssp),\n                    &yyvs1, yysize * sizeof (*yyvsp),\n                    &yystacksize);\n\n        yyss = yyss1;\n        yyvs = yyvs1;\n      }\n#else /* no yyoverflow */\n# ifndef YYSTACK_RELOCATE\n      goto yyexhaustedlab;\n# else\n      /* Extend the stack our own way.  */\n      if (YYMAXDEPTH <= yystacksize)\n        goto yyexhaustedlab;\n      yystacksize *= 2;\n      if (YYMAXDEPTH < yystacksize)\n        yystacksize = YYMAXDEPTH;\n\n      {\n        yytype_int16 *yyss1 = yyss;\n        union yyalloc *yyptr =\n          (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));\n        if (! yyptr)\n          goto yyexhaustedlab;\n        YYSTACK_RELOCATE (yyss_alloc, yyss);\n        YYSTACK_RELOCATE (yyvs_alloc, yyvs);\n#  undef YYSTACK_RELOCATE\n        if (yyss1 != yyssa)\n          YYSTACK_FREE (yyss1);\n      }\n# endif\n#endif /* no yyoverflow */\n\n      yyssp = yyss + yysize - 1;\n      yyvsp = yyvs + yysize - 1;\n\n      YYDPRINTF ((stderr, \"Stack size increased to %lu\\n\",\n                  (unsigned long int) yystacksize));\n\n      if (yyss + yystacksize - 1 <= yyssp)\n        YYABORT;\n    }\n\n  YYDPRINTF ((stderr, \"Entering state %d\\n\", yystate));\n\n  if (yystate == YYFINAL)\n    YYACCEPT;\n\n  goto yybackup;\n\n/*-----------.\n| yybackup.  |\n`-----------*/\nyybackup:\n\n  /* Do appropriate processing given the current state.  Read a\n     lookahead token if we need one and don't already have one.  */\n\n  /* First try to decide what to do without reference to lookahead token.  */\n  yyn = yypact[yystate];\n  if (yypact_value_is_default (yyn))\n    goto yydefault;\n\n  /* Not known => get a lookahead token if don't already have one.  */\n\n  /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol.  */\n  if (yychar == YYEMPTY)\n    {\n      YYDPRINTF ((stderr, \"Reading a token: \"));\n      yychar = yylex ();\n    }\n\n  if (yychar <= YYEOF)\n    {\n      yychar = yytoken = YYEOF;\n      YYDPRINTF ((stderr, \"Now at end of input.\\n\"));\n    }\n  else\n    {\n      yytoken = YYTRANSLATE (yychar);\n      YY_SYMBOL_PRINT (\"Next token is\", yytoken, &yylval, &yylloc);\n    }\n\n  /* If the proper action on seeing token YYTOKEN is to reduce or to\n     detect an error, take that action.  */\n  yyn += yytoken;\n  if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)\n    goto yydefault;\n  yyn = yytable[yyn];\n  if (yyn <= 0)\n    {\n      if (yytable_value_is_error (yyn))\n        goto yyerrlab;\n      yyn = -yyn;\n      goto yyreduce;\n    }\n\n  /* Count tokens shifted since error; after three, turn off error\n     status.  */\n  if (yyerrstatus)\n    yyerrstatus--;\n\n  /* Shift the lookahead token.  */\n  YY_SYMBOL_PRINT (\"Shifting\", yytoken, &yylval, &yylloc);\n\n  /* Discard the shifted token.  */\n  yychar = YYEMPTY;\n\n  yystate = yyn;\n  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN\n  *++yyvsp = yylval;\n  YY_IGNORE_MAYBE_UNINITIALIZED_END\n\n  goto yynewstate;\n\n\n/*-----------------------------------------------------------.\n| yydefault -- do the default action for the current state.  |\n`-----------------------------------------------------------*/\nyydefault:\n  yyn = yydefact[yystate];\n  if (yyn == 0)\n    goto yyerrlab;\n  goto yyreduce;\n\n\n/*-----------------------------.\n| yyreduce -- Do a reduction.  |\n`-----------------------------*/\nyyreduce:\n  /* yyn is the number of a rule to reduce with.  */\n  yylen = yyr2[yyn];\n\n  /* If YYLEN is nonzero, implement the default value of the action:\n     '$$ = $1'.\n\n     Otherwise, the following line sets YYVAL to garbage.\n     This behavior is undocumented and Bison\n     users should not rely upon it.  Assigning to YYVAL\n     unconditionally makes the parser a bit smaller, and it avoids a\n     GCC warning that YYVAL may be used uninitialized.  */\n  yyval = yyvsp[1-yylen];\n\n\n  YY_REDUCE_PRINT (yyn);\n  switch (yyn)\n    {\n        case 2:\n#line 101 \"conf_yacc.y\" /* yacc.c:1646  */\n    {DEBUG_YACC(stderr,(yyvsp[0].list));program_result=(yyvsp[0].list);}\n#line 1370 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 3:\n#line 105 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_AddItem((yyvsp[0].list),(yyvsp[-1].item));(yyval.list)=(yyvsp[0].list);}\n#line 1376 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 4:\n#line 106 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.list)=rh_config_CreateItemsList();}\n#line 1382 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 5:\n#line 110 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBlock((yyvsp[-4].str_val),(yyvsp[-3].str_val),(yyvsp[-1].list));}\n#line 1388 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 6:\n#line 111 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBlock((yyvsp[-3].str_val),NULL,(yyvsp[-1].list));}\n#line 1394 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 7:\n#line 115 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_AddItem((yyvsp[0].list),(yyvsp[-1].item));(yyval.list)=(yyvsp[0].list);}\n#line 1400 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 8:\n#line 116 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.list)=rh_config_CreateItemsList();}\n#line 1406 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 11:\n#line 125 \"conf_yacc.y\" /* yacc.c:1646  */\n    {strcpy((yyval.str_val),(yyvsp[0].str_val));}\n#line 1412 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 12:\n#line 126 \"conf_yacc.y\" /* yacc.c:1646  */\n    {strcpy((yyval.str_val),(yyvsp[0].str_val));}\n#line 1418 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 13:\n#line 127 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_resolv_var((yyval.str_val),(yyvsp[0].str_val));}\n#line 1424 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 14:\n#line 131 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateAffect((yyvsp[-2].str_val), (yyvsp[0].str_val));}\n#line 1430 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 15:\n#line 134 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_SetArglist( (yyvsp[-4].item), (yyvsp[-2].arg_list) ); (yyval.item)=(yyvsp[-4].item);}\n#line 1436 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 16:\n#line 135 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=(yyvsp[-1].item);}\n#line 1442 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 17:\n#line 139 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_EQUAL, (yyvsp[0].str_val));}\n#line 1448 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 18:\n#line 140 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_DIFF, (yyvsp[0].str_val));}\n#line 1454 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 19:\n#line 141 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_GT, (yyvsp[0].str_val));}\n#line 1460 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 20:\n#line 142 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_GT_EQ, (yyvsp[0].str_val));}\n#line 1466 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 21:\n#line 143 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_LT, (yyvsp[0].str_val));}\n#line 1472 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 22:\n#line 144 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-2].str_val),OP_LT_EQ, (yyvsp[0].str_val));}\n#line 1478 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 23:\n#line 148 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_AddArg( (yyvsp[-2].arg_list), (yyvsp[0].str_val) ); (yyval.arg_list)=(yyvsp[-2].arg_list);}\n#line 1484 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 24:\n#line 149 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.arg_list)=rh_config_CreateArgList(); rh_config_AddArg((yyval.arg_list),(yyvsp[0].str_val));}\n#line 1490 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 25:\n#line 153 \"conf_yacc.y\" /* yacc.c:1646  */\n    {rh_config_SetArglist( (yyvsp[-3].item), (yyvsp[-1].arg_list) ); (yyval.item)=(yyvsp[-3].item);}\n#line 1496 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 26:\n#line 154 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=(yyvsp[0].item);}\n#line 1502 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 27:\n#line 155 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateKeyValueExpr((yyvsp[-3].str_val),OP_CMD, (yyvsp[-1].str_val));  }\n#line 1508 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 28:\n#line 159 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=(yyvsp[0].item);}\n#line 1514 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 29:\n#line 160 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateBoolExpr_Unary( BOOL_OP_NOT, (yyvsp[0].item) ); }\n#line 1520 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 30:\n#line 161 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateBoolExpr_Unary( BOOL_OP_NOT, (yyvsp[-1].item) ); }\n#line 1526 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 31:\n#line 162 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=(yyvsp[-1].item); }\n#line 1532 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 32:\n#line 163 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateBoolExpr_Binary( BOOL_OP_AND, (yyvsp[-2].item), (yyvsp[0].item) ); }\n#line 1538 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 33:\n#line 164 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateBoolExpr_Binary( BOOL_OP_OR, (yyvsp[-2].item), (yyvsp[0].item) ); }\n#line 1544 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 34:\n#line 168 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=(yyvsp[-1].item); }\n#line 1550 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 35:\n#line 169 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateSet_Unary( SET_OP_NOT, (yyvsp[0].item) ); }\n#line 1556 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 36:\n#line 170 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateSet_Binary( SET_OP_UNION, (yyvsp[-2].item), (yyvsp[0].item) ); }\n#line 1562 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 37:\n#line 171 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateSet_Binary( SET_OP_INTER, (yyvsp[-2].item), (yyvsp[0].item) ); }\n#line 1568 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 38:\n#line 172 \"conf_yacc.y\" /* yacc.c:1646  */\n    { (yyval.item)=rh_config_CreateSet_Singleton( (yyvsp[0].str_val) ); }\n#line 1574 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 39:\n#line 176 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBlock((yyvsp[-4].str_val),(yyvsp[-3].str_val),(yyvsp[-1].list));}\n#line 1580 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 40:\n#line 177 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBoolExpr((yyvsp[-4].str_val),(yyvsp[-3].str_val),(yyvsp[-1].item));}\n#line 1586 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 41:\n#line 178 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBlock((yyvsp[-3].str_val),NULL,(yyvsp[-1].list));}\n#line 1592 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 42:\n#line 179 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateBoolExpr((yyvsp[-3].str_val),NULL,(yyvsp[-1].item));}\n#line 1598 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n  case 43:\n#line 180 \"conf_yacc.y\" /* yacc.c:1646  */\n    {(yyval.item)=rh_config_CreateSet((yyvsp[-3].str_val),NULL,(yyvsp[-1].item));}\n#line 1604 \"conf_yacc.c\" /* yacc.c:1646  */\n    break;\n\n\n#line 1608 \"conf_yacc.c\" /* yacc.c:1646  */\n      default: break;\n    }\n  /* User semantic actions sometimes alter yychar, and that requires\n     that yytoken be updated with the new translation.  We take the\n     approach of translating immediately before every use of yytoken.\n     One alternative is translating here after every semantic action,\n     but that translation would be missed if the semantic action invokes\n     YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or\n     if it invokes YYBACKUP.  In the case of YYABORT or YYACCEPT, an\n     incorrect destructor might then be invoked immediately.  In the\n     case of YYERROR or YYBACKUP, subsequent parser actions might lead\n     to an incorrect destructor call or verbose syntax error message\n     before the lookahead is translated.  */\n  YY_SYMBOL_PRINT (\"-> $$ =\", yyr1[yyn], &yyval, &yyloc);\n\n  YYPOPSTACK (yylen);\n  yylen = 0;\n  YY_STACK_PRINT (yyss, yyssp);\n\n  *++yyvsp = yyval;\n\n  /* Now 'shift' the result of the reduction.  Determine what state\n     that goes to, based on the state we popped back to and the rule\n     number reduced by.  */\n\n  yyn = yyr1[yyn];\n\n  yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;\n  if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)\n    yystate = yytable[yystate];\n  else\n    yystate = yydefgoto[yyn - YYNTOKENS];\n\n  goto yynewstate;\n\n\n/*--------------------------------------.\n| yyerrlab -- here on detecting error.  |\n`--------------------------------------*/\nyyerrlab:\n  /* Make sure we have latest lookahead translation.  See comments at\n     user semantic actions for why this is necessary.  */\n  yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);\n\n  /* If not already recovering from an error, report this error.  */\n  if (!yyerrstatus)\n    {\n      ++yynerrs;\n#if ! YYERROR_VERBOSE\n      yyerror (YY_(\"syntax error\"));\n#else\n# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \\\n                                        yyssp, yytoken)\n      {\n        char const *yymsgp = YY_(\"syntax error\");\n        int yysyntax_error_status;\n        yysyntax_error_status = YYSYNTAX_ERROR;\n        if (yysyntax_error_status == 0)\n          yymsgp = yymsg;\n        else if (yysyntax_error_status == 1)\n          {\n            if (yymsg != yymsgbuf)\n              YYSTACK_FREE (yymsg);\n            yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);\n            if (!yymsg)\n              {\n                yymsg = yymsgbuf;\n                yymsg_alloc = sizeof yymsgbuf;\n                yysyntax_error_status = 2;\n              }\n            else\n              {\n                yysyntax_error_status = YYSYNTAX_ERROR;\n                yymsgp = yymsg;\n              }\n          }\n        yyerror (yymsgp);\n        if (yysyntax_error_status == 2)\n          goto yyexhaustedlab;\n      }\n# undef YYSYNTAX_ERROR\n#endif\n    }\n\n\n\n  if (yyerrstatus == 3)\n    {\n      /* If just tried and failed to reuse lookahead token after an\n         error, discard it.  */\n\n      if (yychar <= YYEOF)\n        {\n          /* Return failure if at end of input.  */\n          if (yychar == YYEOF)\n            YYABORT;\n        }\n      else\n        {\n          yydestruct (\"Error: discarding\",\n                      yytoken, &yylval);\n          yychar = YYEMPTY;\n        }\n    }\n\n  /* Else will try to reuse lookahead token after shifting the error\n     token.  */\n  goto yyerrlab1;\n\n\n/*---------------------------------------------------.\n| yyerrorlab -- error raised explicitly by YYERROR.  |\n`---------------------------------------------------*/\nyyerrorlab:\n\n  /* Pacify compilers like GCC when the user code never invokes\n     YYERROR and the label yyerrorlab therefore never appears in user\n     code.  */\n  if (/*CONSTCOND*/ 0)\n     goto yyerrorlab;\n\n  /* Do not reclaim the symbols of the rule whose action triggered\n     this YYERROR.  */\n  YYPOPSTACK (yylen);\n  yylen = 0;\n  YY_STACK_PRINT (yyss, yyssp);\n  yystate = *yyssp;\n  goto yyerrlab1;\n\n\n/*-------------------------------------------------------------.\n| yyerrlab1 -- common code for both syntax error and YYERROR.  |\n`-------------------------------------------------------------*/\nyyerrlab1:\n  yyerrstatus = 3;      /* Each real token shifted decrements this.  */\n\n  for (;;)\n    {\n      yyn = yypact[yystate];\n      if (!yypact_value_is_default (yyn))\n        {\n          yyn += YYTERROR;\n          if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)\n            {\n              yyn = yytable[yyn];\n              if (0 < yyn)\n                break;\n            }\n        }\n\n      /* Pop the current state because it cannot handle the error token.  */\n      if (yyssp == yyss)\n        YYABORT;\n\n\n      yydestruct (\"Error: popping\",\n                  yystos[yystate], yyvsp);\n      YYPOPSTACK (1);\n      yystate = *yyssp;\n      YY_STACK_PRINT (yyss, yyssp);\n    }\n\n  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN\n  *++yyvsp = yylval;\n  YY_IGNORE_MAYBE_UNINITIALIZED_END\n\n\n  /* Shift the error token.  */\n  YY_SYMBOL_PRINT (\"Shifting\", yystos[yyn], yyvsp, yylsp);\n\n  yystate = yyn;\n  goto yynewstate;\n\n\n/*-------------------------------------.\n| yyacceptlab -- YYACCEPT comes here.  |\n`-------------------------------------*/\nyyacceptlab:\n  yyresult = 0;\n  goto yyreturn;\n\n/*-----------------------------------.\n| yyabortlab -- YYABORT comes here.  |\n`-----------------------------------*/\nyyabortlab:\n  yyresult = 1;\n  goto yyreturn;\n\n#if !defined yyoverflow || YYERROR_VERBOSE\n/*-------------------------------------------------.\n| yyexhaustedlab -- memory exhaustion comes here.  |\n`-------------------------------------------------*/\nyyexhaustedlab:\n  yyerror (YY_(\"memory exhausted\"));\n  yyresult = 2;\n  /* Fall through.  */\n#endif\n\nyyreturn:\n  if (yychar != YYEMPTY)\n    {\n      /* Make sure we have latest lookahead translation.  See comments at\n         user semantic actions for why this is necessary.  */\n      yytoken = YYTRANSLATE (yychar);\n      yydestruct (\"Cleanup: discarding lookahead\",\n                  yytoken, &yylval);\n    }\n  /* Do not reclaim the symbols of the rule whose action triggered\n     this YYABORT or YYACCEPT.  */\n  YYPOPSTACK (yylen);\n  YY_STACK_PRINT (yyss, yyssp);\n  while (yyssp != yyss)\n    {\n      yydestruct (\"Cleanup: popping\",\n                  yystos[*yyssp], yyvsp);\n      YYPOPSTACK (1);\n    }\n#ifndef yyoverflow\n  if (yyss != yyssa)\n    YYSTACK_FREE (yyss);\n#endif\n#if YYERROR_VERBOSE\n  if (yymsg != yymsgbuf)\n    YYSTACK_FREE (yymsg);\n#endif\n  return yyresult;\n}\n#line 184 \"conf_yacc.y\" /* yacc.c:1906  */\n\n\nvoid yyerror(const char *s)\n{\n    int rc;\n    if (local_errormsg[0] && s[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s (%s) at '%s' line %d in '%s'\",\n                      local_errormsg, s, (yytext?yytext:\"???\"), yylineno,\n                      current_file->str);\n    else if (local_errormsg[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s at '%s' line %d in '%s'\",\n                      local_errormsg, (yytext?yytext:\"???\"), yylineno,\n                      current_file->str);\n    else if (s[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s at '%s' line %d in '%s'\",\n                      s, (yytext?yytext:\"???\"), yylineno, current_file->str);\n    else\n        rc = snprintf(extern_errormsg, 1024,\n                      \"Syntax error at '%s' line %d in '%s'\",\n                      (yytext?yytext:\"???\"), yylineno, current_file->str);\n\n    if (rc >= sizeof(extern_errormsg)) {\n        snprintf(extern_errormsg + sizeof(extern_errormsg) - 4, 4, \"...\");\n    }\n}\n\nvoid set_error(const char * s)\n{\n\trh_strncpy(local_errormsg, s, 1024);\n}\n"
  },
  {
    "path": "src/cfg_parsing/conf_yacc.h",
    "content": "/* A Bison parser, made by GNU Bison 3.0.4.  */\n\n/* Bison interface for Yacc-like parsers in C\n\n   Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.\n\n   This program is free software: you can redistribute it and/or modify\n   it under the terms of the GNU General Public License as published by\n   the Free Software Foundation, either version 3 of the License, or\n   (at your option) any later version.\n\n   This program is distributed in the hope that it will be useful,\n   but WITHOUT ANY WARRANTY; without even the implied warranty of\n   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n   GNU General Public License for more details.\n\n   You should have received a copy of the GNU General Public License\n   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */\n\n/* As a special exception, you may create a larger work that contains\n   part or all of the Bison parser skeleton and distribute that work\n   under terms of your choice, so long as that work isn't itself a\n   parser generator using the skeleton or a modified version thereof\n   as a parser skeleton.  Alternatively, if you modify or redistribute\n   the parser skeleton itself, you may (at your option) remove this\n   special exception, which will cause the skeleton and the resulting\n   Bison output files to be licensed under the GNU General Public\n   License without this special exception.\n\n   This special exception was added by the Free Software Foundation in\n   version 2.2 of Bison.  */\n\n#ifndef YY_YY_CONF_YACC_H_INCLUDED\n# define YY_YY_CONF_YACC_H_INCLUDED\n/* Debug traces.  */\n#ifndef YYDEBUG\n# define YYDEBUG 0\n#endif\n#if YYDEBUG\nextern int yydebug;\n#endif\n\n/* Token type.  */\n#ifndef YYTOKENTYPE\n# define YYTOKENTYPE\n  enum yytokentype\n  {\n    _ERROR_ = 258,\n    BEGIN_BLOCK = 259,\n    END_BLOCK = 260,\n    END_AFFECT = 261,\n    BEGIN_SUB_BLOCK = 262,\n    END_SUB_BLOCK = 263,\n    BEGIN_PARENTHESIS = 264,\n    END_PARENTHESIS = 265,\n    VALUE_SEPARATOR = 266,\n    AFFECT = 267,\n    EQUAL = 268,\n    DIFF = 269,\n    GT = 270,\n    GT_EQ = 271,\n    LT = 272,\n    LT_EQ = 273,\n    AND = 274,\n    OR = 275,\n    NOT = 276,\n    UNION = 277,\n    INTER = 278,\n    IDENTIFIER = 279,\n    NON_IDENTIFIER_VALUE = 280,\n    ENV_VAR = 281\n  };\n#endif\n/* Tokens.  */\n#define _ERROR_ 258\n#define BEGIN_BLOCK 259\n#define END_BLOCK 260\n#define END_AFFECT 261\n#define BEGIN_SUB_BLOCK 262\n#define END_SUB_BLOCK 263\n#define BEGIN_PARENTHESIS 264\n#define END_PARENTHESIS 265\n#define VALUE_SEPARATOR 266\n#define AFFECT 267\n#define EQUAL 268\n#define DIFF 269\n#define GT 270\n#define GT_EQ 271\n#define LT 272\n#define LT_EQ 273\n#define AND 274\n#define OR 275\n#define NOT 276\n#define UNION 277\n#define INTER 278\n#define IDENTIFIER 279\n#define NON_IDENTIFIER_VALUE 280\n#define ENV_VAR 281\n\n/* Value type.  */\n#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED\n\nunion YYSTYPE\n{\n#line 52 \"conf_yacc.y\" /* yacc.c:1909  */\n\n    char         str_val[MAXSTRLEN];\n    list_items              *  list;\n    generic_item            *  item;\n    arg_list_t\t            *  arg_list;\n\n#line 113 \"conf_yacc.h\" /* yacc.c:1909  */\n};\n\ntypedef union YYSTYPE YYSTYPE;\n# define YYSTYPE_IS_TRIVIAL 1\n# define YYSTYPE_IS_DECLARED 1\n#endif\n\n\nextern YYSTYPE yylval;\n\nint yyparse (void);\n\n#endif /* !YY_YY_CONF_YACC_H_INCLUDED  */\n"
  },
  {
    "path": "src/cfg_parsing/conf_yacc.y",
    "content": "%{\n\n/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n#include \"config.h\"\n#include \"analyze.h\"\n\n#include <stdio.h>\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\n    int yylex(void);\n    void yyerror(const char *);\n    extern int yylineno;\n    extern char * yytext;\n\n    list_items * program_result=NULL;\n\n\t/* stock le message d'erreur donne par le lexer */\n    char local_errormsg[1024]=\"\";\n\n    /* stock le message d'erreur complet */\n    char extern_errormsg[1024]=\"\";\n\n#ifdef _DEBUG_PARSING\n#define DEBUG_YACC   rh_config_print_list\n#else\n/* do nothing */\nstatic void DEBUG_YACC( FILE * output, list_items * list ) {return ;}\n#endif\n\n\n%}\n\n%error-verbose\n\n%union {\n    char         str_val[MAXSTRLEN];\n    list_items              *  list;\n    generic_item            *  item;\n    arg_list_t\t            *  arg_list;\n};\n\n%token _ERROR_\n%token BEGIN_BLOCK\n%token END_BLOCK\n%token END_AFFECT\n%token BEGIN_SUB_BLOCK\n%token END_SUB_BLOCK\n%token BEGIN_PARENTHESIS\n%token END_PARENTHESIS\n%token VALUE_SEPARATOR\n%token AFFECT\n%token EQUAL\n%token DIFF\n%token GT\n%token GT_EQ\n%token LT\n%token LT_EQ\n%token AND\n%token OR\n%token NOT\n%token UNION\n%token INTER\n%token <str_val> IDENTIFIER\n%token <str_val> NON_IDENTIFIER_VALUE\n%token <str_val> ENV_VAR\n\n%type <str_val> value\n%type <list> listblock\n%type <list> listitems\n%type <item> block\n%type <item> definition\n%type <item> expression\n%type <item> subblock\n%type <item> key_value\n%type <item> extended_key_value\n%type <item> affect\n%type <item> extended_affect\n%type <item> set\n%type <arg_list> arglist\n\n\n%%\n\nprogram: listblock {DEBUG_YACC(stderr,$1);program_result=$1;}\n    ;\n\nlistblock:\n    block listblock {rh_config_AddItem($2,$1);$$=$2;}\n    | {$$=rh_config_CreateItemsList();}\n    ;\n\nblock:\n    IDENTIFIER IDENTIFIER BEGIN_BLOCK listitems END_BLOCK {$$=rh_config_CreateBlock($1,$2,$4);}\n    | IDENTIFIER BEGIN_BLOCK listitems END_BLOCK {$$=rh_config_CreateBlock($1,NULL,$3);}\n    ;\n\nlistitems:\n    definition listitems   {rh_config_AddItem($2,$1);$$=$2;}\n    |                      {$$=rh_config_CreateItemsList();}\n    ;\n\ndefinition:\n    extended_affect\n    | subblock\n    ;\n\nvalue:\n\tIDENTIFIER\t{strcpy($$,$1);}\n\t| NON_IDENTIFIER_VALUE {strcpy($$,$1);}\n    | ENV_VAR {rh_config_resolv_var($$,$1);}\n\t;\n\naffect:\n    IDENTIFIER AFFECT value {$$=rh_config_CreateAffect($1, $3);}\n\nextended_affect:\n    affect BEGIN_PARENTHESIS arglist END_PARENTHESIS END_AFFECT {rh_config_SetArglist( $1, $3 ); $$=$1;}\n    | affect END_AFFECT {$$=$1;}\n    ;\n\nkey_value:\n    IDENTIFIER EQUAL value {$$=rh_config_CreateKeyValueExpr($1,OP_EQUAL, $3);}\n    | IDENTIFIER DIFF value {$$=rh_config_CreateKeyValueExpr($1,OP_DIFF, $3);}\n    | IDENTIFIER GT value {$$=rh_config_CreateKeyValueExpr($1,OP_GT, $3);}\n    | IDENTIFIER GT_EQ value {$$=rh_config_CreateKeyValueExpr($1,OP_GT_EQ, $3);}\n    | IDENTIFIER LT value {$$=rh_config_CreateKeyValueExpr($1,OP_LT, $3);}\n    | IDENTIFIER LT_EQ value {$$=rh_config_CreateKeyValueExpr($1,OP_LT_EQ, $3);}\n    ;\n\narglist:\n\targlist VALUE_SEPARATOR value {rh_config_AddArg( $1, $3 ); $$=$1;}\n\t| value {$$=rh_config_CreateArgList(); rh_config_AddArg($$,$1);}\n\t;\n\nextended_key_value:\n    key_value BEGIN_PARENTHESIS arglist END_PARENTHESIS {rh_config_SetArglist( $1, $3 ); $$=$1;}\n    | key_value {$$=$1;}\n    | /*function definition*/ IDENTIFIER BEGIN_PARENTHESIS value END_PARENTHESIS { $$=rh_config_CreateKeyValueExpr($1,OP_CMD, $3);  }\n    ;\n\nexpression:\n    extended_key_value {$$=$1;}\n    | NOT extended_key_value { $$=rh_config_CreateBoolExpr_Unary( BOOL_OP_NOT, $2 ); }\n    | NOT BEGIN_PARENTHESIS expression END_PARENTHESIS { $$=rh_config_CreateBoolExpr_Unary( BOOL_OP_NOT, $3 ); }\n    | BEGIN_PARENTHESIS expression END_PARENTHESIS { $$=$2; }\n    | expression AND expression { $$=rh_config_CreateBoolExpr_Binary( BOOL_OP_AND, $1, $3 ); }\n    | expression OR expression { $$=rh_config_CreateBoolExpr_Binary( BOOL_OP_OR, $1, $3 ); }\n    ;\n\nset:\n    BEGIN_PARENTHESIS set END_PARENTHESIS { $$=$2; }\n    | NOT set       { $$=rh_config_CreateSet_Unary( SET_OP_NOT, $2 ); }\n    | set UNION set { $$=rh_config_CreateSet_Binary( SET_OP_UNION, $1, $3 ); }\n    | set INTER set { $$=rh_config_CreateSet_Binary( SET_OP_INTER, $1, $3 ); }\n    | IDENTIFIER    { $$=rh_config_CreateSet_Singleton( $1 ); }\n    ;\n\nsubblock:\n    IDENTIFIER IDENTIFIER BEGIN_SUB_BLOCK listitems END_SUB_BLOCK {$$=rh_config_CreateBlock($1,$2,$4);}\n    | IDENTIFIER IDENTIFIER BEGIN_SUB_BLOCK expression END_SUB_BLOCK {$$=rh_config_CreateBoolExpr($1,$2,$4);}\n    | IDENTIFIER BEGIN_SUB_BLOCK listitems END_SUB_BLOCK {$$=rh_config_CreateBlock($1,NULL,$3);}\n    | IDENTIFIER BEGIN_SUB_BLOCK expression END_SUB_BLOCK {$$=rh_config_CreateBoolExpr($1,NULL,$3);}\n    | IDENTIFIER BEGIN_SUB_BLOCK set END_SUB_BLOCK {$$=rh_config_CreateSet($1,NULL,$3);}\n    ;\n\n\n%%\n\nvoid yyerror(const char *s)\n{\n    int rc;\n    if (local_errormsg[0] && s[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s (%s) at '%s' line %d in '%s'\",\n                      local_errormsg, s, (yytext?yytext:\"???\"), yylineno,\n                      current_file->str);\n    else if (local_errormsg[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s at '%s' line %d in '%s'\",\n                      local_errormsg, (yytext?yytext:\"???\"), yylineno,\n                      current_file->str);\n    else if (s[0])\n        rc = snprintf(extern_errormsg, 1024, \"%s at '%s' line %d in '%s'\",\n                      s, (yytext?yytext:\"???\"), yylineno, current_file->str);\n    else\n        rc = snprintf(extern_errormsg, 1024,\n                      \"Syntax error at '%s' line %d in '%s'\",\n                      (yytext?yytext:\"???\"), yylineno, current_file->str);\n\n    if (rc >= sizeof(extern_errormsg)) {\n        snprintf(extern_errormsg + sizeof(extern_errormsg) - 4, 4, \"...\");\n    }\n}\n\nvoid set_error(const char * s)\n{\n\trh_strncpy(local_errormsg, s, 1024);\n}\n"
  },
  {
    "path": "src/cfg_parsing/config_parsing.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"config_parsing.h\"\n#include \"analyze.h\"\n#include <stdio.h>\n#include <errno.h>\n\n#if HAVE_STRING_H\n#include <string.h>\n#endif\n\n/* case unsensitivity */\n#define STRNCMP   strncasecmp\n\ntypedef struct config_struct_t {\n    /* Syntax tree */\n    list_items *syntax_tree;\n\n} config_struct_t;\n\n/***************************************\n * ACCESS TO EXTERNAL VARIABLES\n ***************************************/\n\n/* lexer input file */\nextern FILE *yyin;\n\n/* main parsing function */\nint yyparse();\n\n/* parser reinitialization */\nvoid yyreset(void);\n\n/* returns the file currently parsed (for tracing in case of error) */\nvoid yy_set_current_file(char *file);\n\n/* free parser resources */\nint yylex_destroy(void);\n\n/* the global program structure, set after parsing */\nextern list_items *program_result;\n\n/* error message */\nextern char extern_errormsg[1024];\n\n/* config_ParseFile:\n * Reads the content of a configuration file and\n * stores it in a memory structure.\n */\nconfig_file_t rh_config_ParseFile(char *file_path)\n{\n    FILE *configuration_file;\n    config_struct_t *output_struct;\n\n    /* Inits error message */\n\n    extern_errormsg[0] = '\\0';\n\n    /* Sanity check */\n\n    if (!file_path || !file_path[0]) {\n        strcpy(extern_errormsg, \"Invalid arguments\");\n        return NULL;\n    }\n\n    /* First, opens the file. */\n\n    configuration_file = fopen(file_path, \"r\");\n\n    if (!configuration_file) {\n        strcpy(extern_errormsg, strerror(errno));\n        return NULL;\n    }\n\n    /* Then, parse the file. */\n    program_result = NULL;\n\n    yyreset();\n\n    yy_set_current_file(file_path);\n    yyin = configuration_file;\n\n    if (yyparse()) {\n        yylex_destroy();\n        fclose(configuration_file);\n        return NULL;\n    }\n\n    /* Finally, build the output struct. */\n\n    output_struct = (config_struct_t *)malloc(sizeof(config_struct_t));\n\n    if (!output_struct) {\n        strcpy(extern_errormsg, strerror(errno));\n        yylex_destroy();\n        fclose(configuration_file);\n        return NULL;\n    }\n\n    output_struct->syntax_tree = program_result;\n\n    yylex_destroy();\n    fclose(configuration_file);\n    return (config_file_t) output_struct;\n}\n\n/* If config_ParseFile returns a NULL pointer,\n * config_GetErrorMsg returns a detailed message\n * to indicate the reason for this error.\n */\nchar *rh_config_GetErrorMsg(void)\n{\n    return extern_errormsg;\n}\n\n/**\n * config_Print:\n * Print the content of the syntax tree\n * to a file.\n */\nvoid rh_config_Print(FILE *output, config_file_t config)\n{\n    /* sanity check */\n    if (!config)\n        return;\n\n    rh_config_print_list(output, ((config_struct_t *)config)->syntax_tree);\n}\n\n/**\n * config_Free:\n * Free the memory structure that store the configuration.\n */\n\nvoid rh_config_Free(config_file_t config)\n{\n    config_struct_t *config_struct = (config_struct_t *)config;\n\n    if (!config_struct)\n        return;\n\n    rh_config_free_list(config_struct->syntax_tree);\n\n    free(config_struct);\n\n    return;\n}\n\n/**\n * config_GetNbBlocks:\n * Indicates how many blocks are defined into the config file.\n */\nint rh_config_GetNbBlocks(config_file_t config)\n{\n\n    config_struct_t *config_struct = (config_struct_t *)config;\n\n    if (!config_struct)\n        return -EFAULT;\n\n    /* is list empty? */\n    if (!(*config_struct->syntax_tree)) {\n        return 0;\n    }\n    /* count how many items are in the list */\n    else {\n        /* there is at least one item: the first */\n        generic_item *curr_block = (*config_struct->syntax_tree);\n        int           nb = 1;\n\n        while ((curr_block = curr_block->next) != NULL) {\n            nb++;\n        }\n\n        return nb;\n    }\n}\n\n/* retrieves a given block from the config file, from its index */\nconfig_item_t rh_config_GetBlockByIndex(config_file_t config,\n                                        unsigned int block_no)\n{\n    config_struct_t *config_struct = (config_struct_t *)config;\n    generic_item    *curr_block;\n    unsigned int     i;\n\n    if (!config_struct->syntax_tree || !(*config_struct->syntax_tree))\n        return NULL;\n\n    for (i = 0, curr_block = (*config_struct->syntax_tree);\n         curr_block != NULL; curr_block = curr_block->next, i++) {\n        if (i == block_no)\n            return (config_item_t) curr_block;\n    }\n\n    /* not found */\n    return NULL;\n}\n\n/* Return the name of a block */\nchar *rh_config_GetBlockName(config_item_t block)\n{\n    generic_item *curr_block = (generic_item *)block;\n\n    if (!curr_block || (curr_block->type != TYPE_BLOCK))\n        return NULL;\n\n    return curr_block->item.block.block_name;\n}\n\n/**\n * Return the block identifier, if it exists\n */\nchar *rh_config_GetBlockId(config_item_t block)\n{\n    generic_item *curr_block = (generic_item *)block;\n\n    if (!curr_block || (curr_block->type != TYPE_BLOCK))\n        return NULL;\n\n    return curr_block->item.block.block_id;\n}\n\n/* Indicates how many items are defines in a block */\nint rh_config_GetNbItems(config_item_t block)\n{\n    generic_item *the_block = (generic_item *)block;\n\n    if (!the_block || (the_block->type != TYPE_BLOCK))\n        return -1;\n\n    /* check if list is empty */\n    if (!(the_block->item.block.block_content)) {\n        return 0;\n    }\n    /* count the number of items in the list */\n    else {\n        /* there is at least one item: the first */\n        generic_item *curr_block = the_block->item.block.block_content;\n        int nb = 1;\n\n        while ((curr_block = curr_block->next) != NULL) {\n            nb++;\n        }\n\n        return nb;\n    }\n\n}\n\n/**\n * Count how many items with the given name are defined in a block\n */\nint rh_config_CountItemNames(config_item_t block, const char *name)\n{\n    generic_item *the_block = (generic_item *)block;\n\n    if (!the_block || (the_block->type != TYPE_BLOCK))\n        return -1;\n\n    /* is the list empty */\n    if (!(the_block->item.block.block_content)) {\n        return 0;\n    } else {\n        /* count items */\n        generic_item *curr_item;\n        int           nb = 0;\n\n        for (curr_item = the_block->item.block.block_content;\n             curr_item != NULL; curr_item = curr_item->next) {\n            switch (rh_config_ItemType((config_item_t) curr_item)) {\n            case CONFIG_ITEM_BLOCK:\n                if (!STRNCMP(curr_item->item.block.block_name, name, MAXSTRLEN))\n                    nb++;\n                break;\n            case CONFIG_ITEM_VAR:\n                if (!STRNCMP(curr_item->item.affect.varname, name, MAXSTRLEN))\n                    nb++;\n                break;\n            default:\n                return -1;\n            }\n        }\n\n        return nb;\n    }\n\n}\n\n/**\n * Count how many blocks with the given name are in config file\n */\nint rh_config_CountBlockNames(config_file_t cfg, const char *name)\n{\n    config_struct_t *config_struct = (config_struct_t *)cfg;\n    generic_item    *curr_block;\n    unsigned int     i;\n    unsigned int     count = 0;\n\n    if (!config_struct->syntax_tree || !(*config_struct->syntax_tree))\n        return -1;\n\n    for (i = 0, curr_block = (*config_struct->syntax_tree);\n         curr_block != NULL; curr_block = curr_block->next, i++) {\n        if (!strcasecmp(curr_block->item.block.block_name, name))\n            count++;\n    }\n\n    return count;\n}\n\n/* retrieves a given block from the config file, from its index */\nconfig_item_t rh_config_GetItemByIndex(config_item_t block,\n                                       unsigned int item_no)\n{\n    generic_item *the_block = (generic_item *)block;\n    generic_item *curr_item;\n    unsigned int i;\n\n    if (!the_block || (the_block->type != TYPE_BLOCK))\n        return NULL;\n\n    for (i = 0, curr_item = the_block->item.block.block_content;\n         curr_item != NULL; curr_item = curr_item->next, i++) {\n        if (i == item_no)\n            return (config_item_t) curr_item;\n    }\n\n    /* not found */\n    return NULL;\n}\n\n/* indicates which type of item it is */\nconfig_item_type rh_config_ItemType(config_item_t item)\n{\n    generic_item *the_item = (generic_item *)item;\n\n    switch (the_item->type) {\n    case TYPE_BLOCK:\n        return CONFIG_ITEM_BLOCK;\n    case TYPE_AFFECT:\n        return CONFIG_ITEM_VAR;\n    case TYPE_BOOL_EXPR:\n        return CONFIG_ITEM_BOOL_EXPR;\n    case TYPE_SET:\n        return CONFIG_ITEM_SET;\n    default:\n        return 0;\n    }\n}\n\n/* indicates which type of block content */\nconfig_item_type rh_config_ContentType(config_item_t block)\n{\n    generic_item *item = (generic_item *)block;\n\n    if (item->type != TYPE_BLOCK)\n        return 0;\n\n    if (item->item.block.block_content == NULL)\n        return 0;\n\n    return rh_config_ItemType((config_item_t)item->item.block.block_content);\n}\n\n/* Retrieves a key-value peer from a CONFIG_ITEM_VAR */\nint rh_config_GetKeyValue(config_item_t item,\n                          char **var_name, char **var_value,\n                          int *have_extra_args)\n{\n    generic_item *var = (generic_item *)item;\n\n    if (rh_config_ItemType(item) != CONFIG_ITEM_VAR) {\n        strcpy(extern_errormsg,\n               \"Expression needs to be interpreted as a <key>=<value> expression, but it is not.\");\n        return -1;\n    }\n\n    *var_name = var->item.affect.varname;\n    *var_value = var->item.affect.varvalue;\n\n    *have_extra_args =\n        ((var->item.affect.arg_list != NULL)\n         && (var->item.affect.arg_list->nb_args > 0));\n\n    return 0;\n}\n\n/* Returns the number of arguments */\nint rh_config_GetExtraArgs(config_item_t item, char ***p_extra_arg_array)\n{\n    generic_item *var = (generic_item *)item;\n    arg_list_t   *arglist = NULL;\n\n    if (!var)\n        return -1;\n\n    if ((var->type == TYPE_BOOL_EXPR)\n        && (var->item.bool_expr.type == BOOL_CONDITION)) {\n        arglist = var->item.bool_expr.expr_u.key_value.arg_list;\n    } else if (var->type == TYPE_AFFECT) {\n        arglist = var->item.affect.arg_list;\n    } else {\n        return -1;\n    }\n\n    if (arglist) {\n        *p_extra_arg_array = arglist->args;\n        return arglist->nb_args;\n    } else {\n        *p_extra_arg_array = NULL;\n        return 0;\n    }\n}\n\nstatic const char *cfg_item_name(generic_item *item)\n{\n    switch (rh_config_ItemType((config_item_t)item)) {\n    case CONFIG_ITEM_BLOCK:\n        return item->item.block.block_name;\n    case CONFIG_ITEM_VAR:\n        return item->item.affect.varname;\n    default:\n        return NULL;\n    }\n}\n\n/* Get an item from a list with the given name,\n * and mark the item as read. */\nstatic generic_item *GetItemFromList(generic_item *list, const char *name,\n                                     bool *ensure_unique)\n{\n    generic_item *curr;\n    generic_item *save = NULL;\n\n    for (curr = list; curr != NULL; curr = curr->next) {\n        const char *item_name = cfg_item_name(curr);\n\n        if (item_name == NULL)  /*unnamed item */\n            continue;\n\n        if (!STRNCMP(item_name, name, MAXSTRLEN)) {\n            if (!(*ensure_unique)) {\n                curr->is_read = true;\n                /* return first match */\n                return curr;\n            }\n\n            /* must check unicity */\n            if (save != NULL) {\n                *ensure_unique = false;\n                curr->is_read = true;\n                return curr;    /* return conflicting item */\n            } else\n                save = curr;\n        }\n    }\n    if (save != NULL)\n        save->is_read = true;\n    return save;\n}\n\n/* Returns the block with the specified name.\n * This name can be \"BLOCK::SUBBLOCK::SUBBLOCK\" */\nconfig_item_t rh_config_FindItemByName(config_file_t config, const char *name,\n                                       bool *ensure_unique)\n{\n    config_struct_t *config_struct = (config_struct_t *)config;\n    generic_item    *block;\n    generic_item    *list;\n    char            *separ;\n    char            *current;\n    char             tmp_name[MAXSTRLEN];\n\n    /* cannot be found if empty */\n    if (!config_struct->syntax_tree || !(*config_struct->syntax_tree))\n        return NULL;\n\n    list = *config_struct->syntax_tree;\n\n    rh_strncpy(tmp_name, name, MAXSTRLEN);\n    current = tmp_name;\n\n    while (current) {\n        /* first, split the name into BLOCK/SUBBLOC/SUBBLOC */\n        separ = strstr(current, \"::\");\n\n        /* it is a whole name */\n        if (!separ)\n            return (config_item_t) GetItemFromList(list, current,\n                                                   ensure_unique);\n        else {\n            /* split the name */\n            *separ = '\\0';\n\n            if ((separ - tmp_name) < MAXSTRLEN - 2)\n                separ += 2;\n            else\n                return NULL;    /* overflow */\n\n            block = GetItemFromList(list, current, ensure_unique);\n\n            /* not found or not a block ? */\n            if (!block || (block->type != TYPE_BLOCK))\n                return NULL;\n\n            list = block->item.block.block_content;\n\n            /* \"::\" was found, must have something after */\n            current = separ;\n        }\n    }\n\n    /* not found */\n    return NULL;\n\n}\n\n/* Directly returns the value of the key with the specified name.\n * This name can be \"BLOCK::SUBBLOCK::SUBBLOCK::VARNAME\"\n */\nchar *rh_config_FindKeyValueByName(config_file_t config, const char *key_name,\n                                   bool *ensure_unique)\n{\n    generic_item *var;\n\n    var = (generic_item *)rh_config_FindItemByName(config, key_name,\n                                                   ensure_unique);\n\n    if (!var || (rh_config_ItemType((config_item_t) var) != CONFIG_ITEM_VAR))\n        return NULL;\n    else\n        return var->item.bool_expr.expr_u.key_value.varvalue;\n\n}\n\n/* Returns a block or variable with the specified name from the given block\" */\nconfig_item_t rh_config_GetItemByName(config_item_t block, const char *name,\n                                      bool *ensure_unique)\n{\n    generic_item  *curr_block = (generic_item *)block;\n    generic_item  *list;\n    char          *separ;\n    char          *current;\n    char           tmp_name[MAXSTRLEN];\n\n    /* cannot be found if empty or non block */\n    if (!curr_block || (curr_block->type != TYPE_BLOCK))\n        return NULL;\n\n    list = curr_block->item.block.block_content;\n\n    rh_strncpy(tmp_name, name, MAXSTRLEN);\n    current = tmp_name;\n\n    while (current) {\n        /* first, split the name into BLOCK/SUBBLOC/SUBBLOC */\n        separ = strstr(current, \"::\");\n\n        /* it is a whole name */\n        if (!separ)\n            return (config_item_t) GetItemFromList(list, current,\n                                                   ensure_unique);\n        else {\n            /* split the name */\n            *separ = '\\0';\n\n            if ((separ - tmp_name) < MAXSTRLEN - 2)\n                separ += 2;\n            else\n                return NULL;    /* overflow */\n\n            curr_block = GetItemFromList(list, current, ensure_unique);\n\n            /* not found or not a block ? */\n            if (!curr_block || (curr_block->type != TYPE_BLOCK))\n                return NULL;\n\n            list = curr_block->item.block.block_content;\n\n            /* \"::\" was found, must have something after */\n            current = separ;\n        }\n    }\n\n    /* not found */\n    return NULL;\n\n}\n\n/* Directly returns the value of the key with the specified name\n * relative to the given block.\n */\nchar *rh_config_GetKeyValueByName(config_item_t block, const char *key_name,\n                                  bool *ensure_unique)\n{\n    generic_item *var;\n\n    var = (generic_item *)rh_config_GetItemByName(block, key_name,\n                                                  ensure_unique);\n\n    if (!var || (rh_config_ItemType((config_item_t) var) != CONFIG_ITEM_VAR)) {\n        strcpy(extern_errormsg, \"item not found (or is not a parameter)\");\n        return NULL;\n    }\n\n    return var->item.affect.varvalue;\n}\n\n/* Get item line */\nint rh_config_GetItemLine(config_item_t item)\n{\n    generic_item *curr = (generic_item *)item;\n\n    return curr->line;\n}\n\n/* Indicate if the item has been read */\nbool rh_config_IsRead(config_item_t item)\n{\n    generic_item *curr = (generic_item *)item;\n\n    return curr->is_read;\n}\n"
  },
  {
    "path": "src/cfg_parsing/rbh_boolexpr.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  Module for configuration management and parsing.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_boolexpr.h\"\n#include \"rbh_misc.h\"\n#include \"analyze.h\"\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n\n/**\n *  convert the syntaxic code for comparator to the configuration equivalent code\n */\nstatic inline compare_direction_t syntax2conf_comparator(operator_t op)\n{\n    switch (op) {\n    case OP_EQUAL:\n        return COMP_EQUAL;\n    case OP_DIFF:\n        return COMP_DIFF;\n    case OP_GT:\n        return COMP_GRTHAN;\n    case OP_GT_EQ:\n        return COMP_GRTHAN_EQ;\n    case OP_LT:\n        return COMP_LSTHAN;\n    case OP_LT_EQ:\n        return COMP_LSTHAN_EQ;\n    case OP_CMD:\n    default:\n        return COMP_NONE;\n\n    }\n}\n\n/**\n *  convert the syntaxic code for unary boolean operator to the configuration equivalent code\n */\nstatic inline bool_op_t syntax2conf_boolop(bool_operator_t boolop)\n{\n    switch (boolop) {\n    case BOOL_OP_NOT:\n        return BOOL_NOT;\n    case BOOL_OP_AND:\n        return BOOL_AND;\n    case BOOL_OP_OR:\n        return BOOL_OR;\n    default:\n        return BOOL_ERR;\n    }\n}\n\nstatic int process_any_level_condition(char *regexpr, char *err_msg)\n{\n    char   *curr = strstr(regexpr, \"**\");\n    size_t  len  = strlen(regexpr);\n\n    /* characters before and after '**' can only be '/' */\n    for (curr = strstr(regexpr, \"**\"); curr != NULL;\n         curr = strstr(curr + 2, \"**\")) {\n        if (curr > regexpr) {\n            char *prev = curr - 1;\n            /* check character before '**' */\n            if (*prev != '/') {\n                sprintf(err_msg,\n                        \"Character before and after '**' must be a '/' in '%s'\",\n                        regexpr);\n                return EINVAL;\n            }\n        }\n        /* - last char is 'regexpr + len - 1'\n         * - curr + 2 is the first char after '**'\n         */\n        if ((curr + 2) <= (regexpr + len - 1)) {\n            /* check the character after '**' */\n            if (curr[2] != '/') {\n                sprintf(err_msg,\n                        \"Character before and after '**' must be a '/' in '%s'\",\n                        regexpr);\n                return EINVAL;\n            }\n        }\n    }\n\n    for (curr = strchr(regexpr, '*'); curr != NULL;\n         curr = strchr(curr + 2, '*')) {\n        if (curr[1] != '*') {\n            sprintf(err_msg,\n                    \"Single wildcard '*' cannot be used in the same expression as double wildcard '**' in '%s'\",\n                    regexpr);\n            return EINVAL;\n        }\n    }\n\n    /* non escaped '?' must be replaced by '[!/]'\n     * '**' must be replaced by '*'\n     */\n    str_subst(regexpr, \"?\", \"[!/]\");\n    str_subst(regexpr, \"**\", \"*\");\n\n    return 0;\n}\n\n#if (!defined (_LUSTRE) || !defined(_HAVE_FID))\n#define XATTR_NEED ATTR_MASK_fullpath\n#else\n#define XATTR_NEED 0\n#endif\n\n/** criteria parsing */\nstatic struct criteria_descr_t {\n    const char     *name;\n    uint32_t        std_attr_mask;\n    cfg_param_type  type;\n    int             parsing_flags;\n    int             crit_flags;\n} const criteria_descr[] = {\n    [CRITERIA_TREE] = {\"tree\", ATTR_MASK_fullpath, PT_STRING,\n                       PFLG_ALLOW_ANY_DEPTH | PFLG_NOT_EMPTY\n                       | PFLG_REMOVE_FINAL_SLASH, 0},\n    [CRITERIA_PATH] = {\"path\", ATTR_MASK_fullpath, PT_STRING,\n                       PFLG_ALLOW_ANY_DEPTH | PFLG_NOT_EMPTY\n                       | PFLG_REMOVE_FINAL_SLASH, 0},\n    [CRITERIA_NAME] = {\"name\", ATTR_MASK_name, PT_STRING,\n                           PFLG_NOT_EMPTY | PFLG_NO_SLASH, 0},\n    [CRITERIA_INAME] = {\"iname\", ATTR_MASK_name, PT_STRING,\n                          PFLG_NOT_EMPTY | PFLG_NO_SLASH, CMP_FLG_INSENSITIVE},\n    [CRITERIA_TYPE] = {\"type\", ATTR_MASK_type, PT_TYPE, 0, 0},\n    [CRITERIA_OWNER] = {\"owner\", ATTR_MASK_uid, PT_STRING, PFLG_NOT_EMPTY, 0},\n    [CRITERIA_GROUP] = {\"group\", ATTR_MASK_gid, PT_STRING, PFLG_NOT_EMPTY, 0},\n#ifdef _LUSTRE\n    [CRITERIA_PROJID] = {\"projid\", ATTR_MASK_projid, PT_INT,\n                         PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n#endif\n    [CRITERIA_SIZE] = {\"size\", ATTR_MASK_size, PT_SIZE,\n                       PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_DEPTH] = {\"depth\", ATTR_MASK_depth, PT_INT,\n                        PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_DIRCOUNT] = {\"dircount\", ATTR_MASK_dircount, PT_INT,\n                           PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_NLINK] = {\"nlink\", ATTR_MASK_nlink, PT_INT,\n                            PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_LAST_ACCESS] = {\"last_access\", ATTR_MASK_last_access, PT_DURATION,\n                              PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_LAST_MOD] = {\"last_mod\", ATTR_MASK_last_mod, PT_DURATION,\n                           PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_LAST_MDCHANGE] =\n        {\"last_mdchange\", ATTR_MASK_last_mdchange, PT_DURATION,\n         PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    [CRITERIA_CREATION] = {\"creation\", ATTR_MASK_creation_time, PT_DURATION,\n                           PFLG_POSITIVE | PFLG_COMPARABLE, 0},\n    /* needs a 'remove' status manager */\n    [CRITERIA_RMTIME] = {\"rm_time\", ATTR_MASK_rm_time, PT_DURATION,\n                         PFLG_POSITIVE | PFLG_COMPARABLE | PFLG_STATUS, 0},\n#ifdef _LUSTRE\n    [CRITERIA_POOL] = {\"ost_pool\", ATTR_MASK_stripe_info, PT_STRING, 0, 0},\n    [CRITERIA_OST] =\n        {\"ost_index\", ATTR_MASK_stripe_items, PT_INT, PFLG_POSITIVE, 0},\n#endif\n    [CRITERIA_FILECLASS] = {\"fileclass\", ATTR_MASK_fileclass, PT_STRING,\n                            PFLG_NO_SLASH, CMP_FLG_INSENSITIVE},\n    /* status mask is context dependent */\n    [CRITERIA_STATUS] =\n        {\"status\", 0, PT_STRING, PFLG_STATUS | PFLG_NO_WILDCARDS,\n         CMP_FLG_INSENSITIVE},\n    /* /!\\ str2criteria relies on the fact that CRITERIA_XATTR is after\n     * the last standard criteria */\n    [CRITERIA_XATTR] = {XATTR_PREFIX, XATTR_NEED, PT_STRING, PFLG_XATTR, 0},\n\n    /* CRITERIA_SM_INFO: type and flags are provided by status managers\n     * (sm_info_def_t) */\n};\n\nconst char *criteria2str(compare_criteria_t crit)\n{\n    if (crit > CRITERIA_XATTR)\n        return \"?\";\n\n    return criteria_descr[crit].name;\n}\n\ncompare_criteria_t str2criteria(const char *str, const struct sm_instance *smi,\n                                const sm_info_def_t **def, unsigned int *idx)\n{\n    int i;\n\n    /* special case of XATTR criteria: xattr.<attr_name> */\n    if (!strncasecmp(str, XATTR_PREFIX \".\", strlen(XATTR_PREFIX \".\")))\n        return CRITERIA_XATTR;\n\n    for (i = 0; i < CRITERIA_XATTR; i++)\n        if (!strcasecmp(str, criteria_descr[i].name))\n            return i;\n\n    i = sm_attr_get(smi, NULL, str, NULL, def, idx);\n    if (i == 0)\n        /* found this criteria in SM info */\n        return CRITERIA_SM_INFO;\n\n    return NO_CRITERIA;\n}\n\nunsigned int str2lru_attr(const char *str, const struct sm_instance *smi)\n{\n    const sm_info_def_t *def;\n    unsigned int idx;\n    int rc;\n\n    if (!strcasecmp(str, criteria2str(CRITERIA_LAST_ACCESS)))\n        return ATTR_INDEX_last_access;\n    else if (!strcasecmp(str, criteria2str(CRITERIA_LAST_MOD)))\n        return ATTR_INDEX_last_mod;\n    else if (!strcasecmp(str, criteria2str(CRITERIA_LAST_MDCHANGE)))\n        return ATTR_INDEX_last_mdchange;\n    else if (!strcasecmp(str, criteria2str(CRITERIA_CREATION)))\n        return ATTR_INDEX_creation_time;\n    else if (!strcasecmp(str, criteria2str(CRITERIA_RMTIME)))\n        return ATTR_INDEX_rm_time;\n    else if (!strcasecmp(str, criteria2str(CRITERIA_SIZE)))\n        return ATTR_INDEX_size;\n    else if (!strcasecmp(str, \"none\"))\n        return LRU_ATTR_NONE;\n\n    rc = sm_attr_get(smi, NULL, str, NULL, &def, &idx);\n    if (rc < 0)\n        return LRU_ATTR_INVAL;\n    else if (def->crit_type != PT_DURATION)\n        return LRU_ATTR_INVAL;\n\n    return idx;\n}\n\n#define CHECK_INT_VALUE(_v, _flg) do {\\\n                if (((_flg) & PFLG_POSITIVE) && ((_v) < 0)) { \\\n                    sprintf(err_msg, \"Positive value expected for %s criteria\",\\\n                            key_value->varname); \\\n                    return EINVAL; \\\n                } \\\n                if (((_flg) & PFLG_NOT_NULL) && ((_v) == 0)) { \\\n                    sprintf(err_msg, \"Null value not allowed for %s criteria\", \\\n                            key_value->varname); \\\n                    return EINVAL; \\\n                } \\\n            } while (0)\n\nstatic int criteria2condition(const type_key_value *key_value,\n                              compare_triplet_t *p_triplet,\n                              attr_mask_t *p_attr_mask, char *err_msg,\n                              compare_criteria_t crit, cfg_param_type type,\n                              attr_mask_t attr_mask, int flags,\n                              const sm_instance_t *smi)\n{\n    /* unexpected status in this context */\n    if (flags & PFLG_STATUS) {\n        if (smi == NULL) {\n            sprintf(err_msg, \"'%s' criteria is not expected in this context\",\n                    key_value->varname);\n            return EINVAL;\n        } else if (!strcasecmp(key_value->varname, \"status\"))\n            /* status attribute */\n            attr_mask_set_index(p_attr_mask,\n                                ATTR_INDEX_FLG_STATUS | smi->smi_index);\n        else if (smi->sm->flags & SM_DELETED)\n            /* attribute for deleted entries (e.g. rm_time) */\n            *p_attr_mask = attr_mask_or(p_attr_mask, &attr_mask);\n        else {\n            /* this status manager does not support deleted entries */\n            sprintf(err_msg,\n                    \"Attribute '%s' only applies to deleted entries but status manager '%s' does not manage deleted entries\",\n                    key_value->varname, smi->sm->name);\n            return EINVAL;\n        }\n    } else\n        *p_attr_mask = attr_mask_or(p_attr_mask, &attr_mask);\n\n    if (crit == CRITERIA_SM_INFO) {\n        rh_strncpy(p_triplet->attr_name, key_value->varname,\n                   sizeof(p_triplet->attr_name));\n    }\n\n    p_triplet->crit = crit;\n    p_triplet->op = syntax2conf_comparator(key_value->op_type);\n\n    switch (type) {\n    case PT_STRING:\n        if ((flags & PFLG_NOT_EMPTY) && EMPTY_STRING(key_value->varvalue)) {\n            sprintf(err_msg, \"non-empty string expected for %s parameter\",\n                    key_value->varname);\n            return EINVAL;\n        }\n        if ((flags & PFLG_NO_SLASH) && SLASH_IN(key_value->varvalue)) {\n            sprintf(err_msg, \"no slash (/) expected in %s parameter\",\n                    key_value->varname);\n            return EINVAL;\n        }\n\n        if (global_config.uid_gid_as_numbers &&\n            (crit == CRITERIA_OWNER || crit == CRITERIA_GROUP)) {\n            db_type_u value;\n\n            if (crit == CRITERIA_OWNER) {\n                if (set_uid_val(key_value->varvalue, &value))\n                    return EINVAL;\n            } else {\n                if (set_gid_val(key_value->varvalue, &value))\n                    return EINVAL;\n            }\n\n            p_triplet->val.integer = value.val_int;\n\n            if (p_triplet->op == COMP_LIKE)\n                p_triplet->op = COMP_EQUAL;\n            else if (p_triplet->op == COMP_UNLIKE)\n                p_triplet->op = COMP_DIFF;\n\n            return 0;\n        }\n\n        /* in case the string contains regexpr, those comparators\n         * are changed to LIKE / UNLIKE */\n        if (WILDCARDS_IN(key_value->varvalue)) {\n            if (flags & PFLG_NO_WILDCARDS) {\n                sprintf(err_msg, \"No wildcard is allowed in %s criteria\",\n                        key_value->varname);\n                return EINVAL;\n            }\n\n            if (p_triplet->op == COMP_EQUAL)\n                p_triplet->op = COMP_LIKE;\n            else if (p_triplet->op == COMP_DIFF)\n                p_triplet->op = COMP_UNLIKE;\n        }\n\n        rh_strncpy(p_triplet->val.str, key_value->varvalue,\n                   sizeof(p_triplet->val.str));\n\n        if (flags & PFLG_XATTR) {\n            char *p_xattr = strchr(key_value->varname, '.');\n            p_xattr++;\n            rh_strncpy(p_triplet->attr_name, p_xattr,\n                       sizeof(p_triplet->attr_name));\n        }\n        /* PFLG_STATUS flag means the attibute is only allowed\n         * in a policy scope. This is the case of 'status',\n         * but also 'rm_time'... */\n        else if ((flags & PFLG_STATUS)\n                 && !strcasecmp(key_value->varname, \"status\")) {\n\n            if ((get_status_str(smi->sm, p_triplet->val.str) == NULL)\n                && (strlen(p_triplet->val.str) > 0)) {\n                char tmp[RBH_NAME_MAX];\n\n                /* non empty config parameter with NULL match\n                 * => invalid status name */\n                sprintf(err_msg, \"Invalid status '%s' for '%s' status manager: \"\n                        \"allowed values are %s\",\n                        key_value->varvalue, smi->sm->name,\n                        allowed_status_str(smi->sm, tmp, sizeof(tmp)));\n                return EINVAL;\n            }\n        } else if (ANY_LEVEL_MATCH(p_triplet->val.str)) {\n            /* don't care for xattr and status value */\n            if (flags & PFLG_ALLOW_ANY_DEPTH) {\n                int rc;\n\n                /* check the expression and adapt it to fnmatch */\n                rc = process_any_level_condition(p_triplet->val.str, err_msg);\n                if (rc)\n                    return rc;\n                p_triplet->flags |= CMP_FLG_ANY_LEVEL;\n            } else {\n                sprintf(err_msg,\n                        \"double star wildcard (**) not expected in %s parameter\",\n                        key_value->varname);\n                return EINVAL;\n            }\n        }\n        if ((flags & PFLG_REMOVE_FINAL_SLASH)\n            && FINAL_SLASH(p_triplet->val.str))\n            REMOVE_FINAL_SLASH(p_triplet->val.str);\n\n        break;\n\n    case PT_SIZE:\n        /* a size is expected */\n        p_triplet->val.size = str2size(key_value->varvalue);\n        if (p_triplet->val.size == -1LL) {\n            sprintf(err_msg, \"%s criteria: invalid format for size: '%s'\",\n                    key_value->varname, key_value->varvalue);\n            return EINVAL;\n        }\n        CHECK_INT_VALUE(p_triplet->val.size, flags);\n        break;\n\n    case PT_INT:\n        p_triplet->val.integer = str2int(key_value->varvalue);\n        if (p_triplet->val.integer == -1) {\n            sprintf(err_msg, \"%s criteria: integer expected: '%s'\",\n                    key_value->varname, key_value->varvalue);\n            return EINVAL;\n        }\n        CHECK_INT_VALUE(p_triplet->val.integer, flags);\n        break;\n\n    case PT_BOOL:\n        p_triplet->val.integer = str2bool(key_value->varvalue);\n        if (p_triplet->val.integer == -1) {\n            sprintf(err_msg,\n                    \"%s criteria: boolean expected (0, 1, true, false, yes, no, enabled, disabled): '%s'\",\n                    key_value->varname, key_value->varvalue);\n            return EINVAL;\n        }\n        break;\n\n    case PT_DURATION:\n        p_triplet->val.duration = str2duration(key_value->varvalue);\n        if (p_triplet->val.duration == -1) {\n            sprintf(err_msg, \"%s criteria: duration expected: '%s'\",\n                    key_value->varname, key_value->varvalue);\n            return EINVAL;\n        }\n        CHECK_INT_VALUE(p_triplet->val.duration, flags);\n        break;\n\n    case PT_TYPE:\n        p_triplet->val.type = str2type(key_value->varvalue);\n        if (p_triplet->val.type == TYPE_NONE) {\n            strcpy(err_msg, \"Illegal condition on type: file, directory, \"\n                   \"symlink, chr, blk, fifo or sock expected.\");\n            return EINVAL;\n        }\n        break;\n\n    default:\n        sprintf(err_msg, \"Unsupported criteria type for '%s'\",\n                key_value->varname);\n        return ENOTSUP;\n    }\n\n    /* > or < for a non comparable item */\n    if (!(flags & PFLG_COMPARABLE)\n        && (p_triplet->op != COMP_EQUAL)\n        && (p_triplet->op != COMP_DIFF)\n        && (p_triplet->op != COMP_LIKE)\n        && (p_triplet->op != COMP_UNLIKE)) {\n        sprintf(err_msg,\n                \"Illegal comparator for %s criteria: == or != expected\",\n                key_value->varname);\n        return EINVAL;\n    }\n\n    return 0;\n}\n\n/**\n *  interpret and check a condition.\n */\nstatic int interpret_condition(type_key_value *key_value,\n                               compare_triplet_t *p_triplet,\n                               attr_mask_t *p_attr_mask, char *err_msg,\n                               const sm_instance_t *smi)\n{\n    const struct criteria_descr_t *pcrit;\n    const sm_info_def_t *def;\n    unsigned int idx;\n    attr_mask_t tmp = null_mask;\n    /* check the name for the condition */\n    compare_criteria_t crit = str2criteria(key_value->varname, smi, &def, &idx);\n\n    if (crit == NO_CRITERIA) {\n        sprintf(err_msg, \"Unknown or unsupported criteria '%s'\",\n                key_value->varname);\n        return EINVAL;\n    }\n\n    /* lighten the following line of code */\n    pcrit = &criteria_descr[crit];\n\n    p_triplet->flags = pcrit->crit_flags;\n\n    if (crit == CRITERIA_SM_INFO) {\n        cfg_param_type t = def->crit_type;\n        int pflags = (t == PT_DURATION || t == PT_SIZE || t == PT_INT\n                      || t == PT_INT64 || t == PT_FLOAT) ? PFLG_COMPARABLE : 0;\n\n        attr_mask_set_index(&tmp, idx);\n\n        return criteria2condition(key_value, p_triplet, p_attr_mask, err_msg,\n                                  crit, t, tmp, pflags, smi);\n    } else {\n        tmp.std = pcrit->std_attr_mask;\n\n        return criteria2condition(key_value, p_triplet, p_attr_mask, err_msg,\n                                  crit, pcrit->type, tmp, pcrit->parsing_flags,\n                                  smi);\n    }\n}\n\n/**\n * Set attribute value in attrs, given the criteria name and\n * the text representation of the value.\n */\nint set_attr_value_from_strings(const char *name, const char *val,\n                                attr_set_t *attrs, const struct sm_instance *smi)\n{\n    const struct criteria_descr_t *pcrit;\n    const sm_info_def_t *def;\n    unsigned int idx;\n    attr_mask_t tmp = null_mask;\n    compare_criteria_t crit;\n    char err[1024];\n    int rc;\n\n    compare_triplet_t cond = {0};\n    type_key_value kv = {.op_type = OP_EQUAL};\n\n    /* check the name of the attribute */\n    crit = str2criteria(name, smi, &def, &idx);\n\n    if (crit == NO_CRITERIA) {\n        DisplayLog(LVL_CRIT, __func__, \"Unknown or unsupported criteria '%s'\",\n                   name);\n        return -EINVAL;\n    }\n\n    pcrit = &criteria_descr[crit];\n\n    if (crit == CRITERIA_SM_INFO) {\n        cfg_param_type t = def->crit_type;\n        int pflags = (t == PT_DURATION || t == PT_SIZE || t == PT_INT\n                      || t == PT_INT64 || t == PT_FLOAT) ? PFLG_COMPARABLE : 0;\n\n        rh_strncpy(kv.varname, def->user_name, sizeof(kv.varname));\n        rh_strncpy(kv.varvalue, val, sizeof(kv.varvalue));\n\n        attr_mask_set_index(&tmp, idx);\n        rc = criteria2condition(&kv, &cond, &tmp, err, crit, t, tmp, pflags,\n                                smi);\n        if (rc) {\n            DisplayLog(LVL_CRIT, __func__, \"Failed to parse value '%s'\", val);\n            return -EINVAL;\n        }\n\n        rc = set_sm_info(smi, attrs,\n                         attr2sminfo_index(idx) - smi->sm_info_offset,\n                         &cond.val);\n        if (rc) {\n            DisplayLog(LVL_CRIT, __func__, \"Failed to assign value in attribute set\");\n            return rc;\n        }\n\n    } else if (crit == CRITERIA_STATUS) {\n        /* always str value */\n        sm_status_ensure_alloc(&attrs->attr_values.sm_status);\n        STATUS_ATTR(attrs, smi->smi_index) = val;\n        attr_mask_set_index(&attrs->attr_mask,\n                            ATTR_INDEX_FLG_STATUS | smi->smi_index);\n\n    } else {\n        rh_strncpy(kv.varname, pcrit->name, sizeof(kv.varname));\n        rh_strncpy(kv.varvalue, val, sizeof(kv.varvalue));\n\n        tmp.std = pcrit->std_attr_mask;\n\n        /*  Parse value according to criteria type */\n        rc = criteria2condition(&kv, &cond, &tmp, err, crit, pcrit->type, tmp,\n                                  pcrit->parsing_flags, smi);\n        if (rc) {\n            DisplayLog(LVL_CRIT, __func__, \"Failed to parse value '%s'\", val);\n            return -EINVAL;\n        }\n\n        /* set attr value ... */\n        DisplayLog(LVL_CRIT, __func__, \"Attibute %s not supported in %s\",\n                   kv.varname, __func__);\n        return -ENOTSUP;\n    }\n    return 0;\n}\n\n/**\n *  Recursive function for building boolean expression.\n */\nstatic int build_bool_expr(type_bool_expr *p_in_bool_expr,\n                           bool_node_t *p_out_node,\n                           attr_mask_t *p_attr_mask, char *err_msg,\n                           const sm_instance_t *smi)\n{\n    int rc;\n\n    switch (p_in_bool_expr->type) {\n    case BOOL_CONDITION:\n        p_out_node->node_type = NODE_CONDITION;\n        p_out_node->content_u.condition =\n            (compare_triplet_t *)malloc(sizeof(compare_triplet_t));\n        if (!p_out_node->content_u.condition)\n            goto errmem;\n\n        rc = interpret_condition(&p_in_bool_expr->expr_u.key_value,\n                                 p_out_node->content_u.condition, p_attr_mask,\n                                 err_msg, smi);\n        if (rc)\n            goto freecondition;\n        return 0;\n\n        break;\n\n    case BOOL_UNARY:\n\n        /* in case of identity, directly return sub expression */\n        if (p_in_bool_expr->oper == BOOL_OP_IDENTITY)\n            return build_bool_expr(p_in_bool_expr->expr_u.members.expr1,\n                                   p_out_node, p_attr_mask, err_msg, smi);\n\n        p_out_node->node_type = NODE_UNARY_EXPR;\n        p_out_node->content_u.bool_expr.bool_op =\n            syntax2conf_boolop(p_in_bool_expr->oper);\n        if (p_out_node->content_u.bool_expr.bool_op == BOOL_ERR) {\n            strcpy(err_msg, \"Unexpected boolean operator in expression\");\n            return EINVAL;\n        }\n\n        p_out_node->content_u.bool_expr.owner = 1;\n        p_out_node->content_u.bool_expr.expr1 =\n            (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr1)\n            goto errmem;\n        p_out_node->content_u.bool_expr.expr2 = NULL;\n\n        rc = build_bool_expr(p_in_bool_expr->expr_u.members.expr1,\n                             p_out_node->content_u.bool_expr.expr1, p_attr_mask,\n                             err_msg, smi);\n        if (rc)\n            goto free_expr1;\n        return 0;\n\n        break;\n\n    case BOOL_BINARY:\n\n        p_out_node->node_type = NODE_BINARY_EXPR;\n        p_out_node->content_u.bool_expr.bool_op =\n            syntax2conf_boolop(p_in_bool_expr->oper);\n\n        if (p_out_node->content_u.bool_expr.bool_op == BOOL_ERR) {\n            strcpy(err_msg, \"Unexpected boolean operator in expression\");\n            return EINVAL;\n        }\n\n        p_out_node->content_u.bool_expr.owner = 1;\n        p_out_node->content_u.bool_expr.expr1 =\n            (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr1)\n            goto errmem;\n        rc = build_bool_expr(p_in_bool_expr->expr_u.members.expr1,\n                             p_out_node->content_u.bool_expr.expr1, p_attr_mask,\n                             err_msg, smi);\n\n        if (rc)\n            goto free_expr1;\n\n        p_out_node->content_u.bool_expr.expr2 =\n            (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr2)\n            goto errmem;\n        rc = build_bool_expr(p_in_bool_expr->expr_u.members.expr2,\n                             p_out_node->content_u.bool_expr.expr2, p_attr_mask,\n                             err_msg, smi);\n\n        if (rc)\n            goto free_expr2;\n\n        return 0;\n\n        break;\n\n    default:\n        sprintf(err_msg, \"Invalid boolean node type %d while parsing\",\n                p_in_bool_expr->type);\n        return EINVAL;\n    }\n\n errmem:\n    strcpy(err_msg, \"Could not allocate memory\");\n    return ENOMEM;\n\n freecondition:\n    free(p_out_node->content_u.condition);\n    p_out_node->content_u.condition = NULL;\n    return rc;\n\n free_expr2:\n    free(p_out_node->content_u.bool_expr.expr2);\n    p_out_node->content_u.bool_expr.expr2 = NULL;\n free_expr1:\n    free(p_out_node->content_u.bool_expr.expr1);\n    p_out_node->content_u.bool_expr.expr1 = NULL;\n    return rc;\n}\n\n/** Create a boolean condition */\nint CreateBoolCond(bool_node_t *p_out_node, compare_direction_t compar,\n                   compare_criteria_t crit, compare_value_t val,\n                   enum compare_flags flags)\n{\n    p_out_node->node_type = NODE_CONDITION;\n    p_out_node->content_u.condition =\n        (compare_triplet_t *)malloc(sizeof(compare_triplet_t));\n    if (!p_out_node->content_u.condition)\n        return -ENOMEM;\n    memset(p_out_node->content_u.condition, 0, sizeof(compare_triplet_t));\n    p_out_node->content_u.condition->flags = flags;\n    p_out_node->content_u.condition->crit = crit;\n    p_out_node->content_u.condition->op = compar;\n    p_out_node->content_u.condition->val = val;\n    return 0;\n}\n\n/** Append a boolean condition with bool op = AND */\nint AppendBoolCond(bool_node_t *p_in_out_node, compare_direction_t compar,\n                   compare_criteria_t crit, compare_value_t val,\n                   enum compare_flags flags)\n{\n    bool_node_t copy_prev = *p_in_out_node;\n    int rc = 0;\n\n    p_in_out_node->node_type = NODE_BINARY_EXPR;\n    p_in_out_node->content_u.bool_expr.bool_op = BOOL_AND;\n\n    /* bool expr will be allocated */\n    p_in_out_node->content_u.bool_expr.owner = 1;\n\n    /* first expression = the previous expression */\n    p_in_out_node->content_u.bool_expr.expr1 =\n        (bool_node_t *)malloc(sizeof(bool_node_t));\n    if (!p_in_out_node->content_u.bool_expr.expr1)\n        return -ENOMEM;\n    *p_in_out_node->content_u.bool_expr.expr1 = copy_prev;\n\n    /* second expression = the appended value */\n    p_in_out_node->content_u.bool_expr.expr2 =\n        (bool_node_t *)malloc(sizeof(bool_node_t));\n    if (!p_in_out_node->content_u.bool_expr.expr2) {\n        rc = -ENOMEM;\n        goto free_expr1;\n    }\n\n    /* expr2 is a triplet */\n    rc = CreateBoolCond(p_in_out_node->content_u.bool_expr.expr2, compar,\n                        crit, val, flags);\n    if (rc)\n        goto free_expr2;\n\n    return 0;\n\n free_expr2:\n    free(p_in_out_node->content_u.bool_expr.expr2);\n free_expr1:\n    FreeBoolExpr(p_in_out_node->content_u.bool_expr.expr1, true);\n    return rc;\n}\n\nint ConstantBoolExpr(bool constant, bool_node_t *p_bool_node)\n{\n    if (!p_bool_node)\n        return EINVAL;\n\n    p_bool_node->node_type = NODE_CONSTANT;\n    p_bool_node->content_u.constant = constant;\n    return 0;\n}\n\n/**\n * Build a policy boolean expression from the given block\n * \\param smi(in) when specifying a policy scope, indicate the\n *          related status manager ('status' criteria is policy dependent).\n */\nint GetBoolExpr(config_item_t block, const char *block_name,\n                bool_node_t *p_bool_node, attr_mask_t *p_attr_mask,\n                char *err_msg, const sm_instance_t *smi)\n{\n    generic_item *curr_block = (generic_item *)block;\n    generic_item *subitem;\n    int rc;\n\n    /* initialize attr mask */\n    *p_attr_mask = null_mask;\n\n    /* check it is a block */\n    if (!curr_block || (curr_block->type != TYPE_BLOCK)) {\n        sprintf(err_msg, \"'%s' is expected to be a block\", block_name);\n        return EINVAL;\n    }\n\n    /* Check the block contains something  */\n    if (!curr_block->item.block.block_content) {\n        sprintf(err_msg, \"'%s' block is empty, line %d\", block_name,\n                rh_config_GetItemLine(block));\n        return ENOENT;\n    }\n\n    /* check bloc content */\n    subitem = curr_block->item.block.block_content;\n\n    if (subitem->type != TYPE_BOOL_EXPR) {\n        sprintf(err_msg, \"Boolean expression expected in block '%s', line %d\",\n                block_name, rh_config_GetItemLine((config_item_t) subitem));\n        return EINVAL;\n    }\n\n    if (subitem->next) {\n        sprintf(err_msg,\n                \"A single boolean expression is expected in block '%s', line %d\",\n                block_name, rh_config_GetItemLine((config_item_t) subitem));\n        return EINVAL;\n    }\n\n    /* now we can analyze the boolean expression */\n    rc = build_bool_expr(&subitem->item.bool_expr, p_bool_node, p_attr_mask,\n                         err_msg, smi);\n    if (rc)\n        sprintf(err_msg + strlen(err_msg), \", line %d\",\n                rh_config_GetItemLine((config_item_t) subitem));\n\n    return rc;\n\n}\n\n/**\n *  Recursive function for freeing boolean expression.\n *  TODO: check these functions, in particular the 'owner'\n *        system, when an expression is a sub-part of another.\n */\nint FreeBoolExpr(bool_node_t *p_expr, bool free_top_node)\n{\n    if (p_expr == NULL)\n        return -EFAULT;\n\n    switch (p_expr->node_type) {\n    case NODE_CONSTANT:\n        /* nothing to free */\n        break;\n\n    case NODE_CONDITION:\n        free(p_expr->content_u.condition);\n        break;\n\n    case NODE_UNARY_EXPR:\n        if (p_expr->content_u.bool_expr.owner)\n            FreeBoolExpr(p_expr->content_u.bool_expr.expr1, true);\n        break;\n\n    case NODE_BINARY_EXPR:\n        if (p_expr->content_u.bool_expr.owner) {\n            FreeBoolExpr(p_expr->content_u.bool_expr.expr1, true);\n            FreeBoolExpr(p_expr->content_u.bool_expr.expr2, true);\n        }\n        break;\n    }\n\n    if (free_top_node)\n        free(p_expr);\n\n    return 0;\n}\n\n/**\n *  Recursive function for building boolean expression, from a union/intersection\n *  of defined classes.\n */\nstatic int build_set_expr(type_set *p_in_set,\n                          bool_node_t *p_out_node, attr_mask_t *p_attr_mask,\n                          const policies_t *policies, char *err_msg)\n{\n    int i, rc;\n\n    if (p_in_set->set_type == SET_SINGLETON) {\n        /* get class from its name */\n        for (i = 0; i < policies->fileset_count; i++) {\n            if (!strcasecmp(policies->fileset_list[i].fileset_id,\n                            p_in_set->set_u.name)) {\n                /* found */\n                *p_out_node = policies->fileset_list[i].definition;\n                *p_attr_mask =\n                    attr_mask_or(p_attr_mask,\n                                 &policies->fileset_list[i].attr_mask);\n                /* top level expression is not owner of the content */\n                p_out_node->content_u.bool_expr.owner = 0;\n                return 0;\n            }\n        }\n        sprintf(err_msg, \"FileClass '%s' is undefined\", p_in_set->set_u.name);\n        return ENOENT;\n    } else if (p_in_set->set_type == SET_NEGATION) {\n        p_out_node->node_type = NODE_UNARY_EXPR;\n\n        if (p_in_set->set_u.op.oper != SET_OP_NOT) {\n            strcpy(err_msg, \"Unexpected set operator in unary expression\");\n            return EINVAL;\n        }\n        p_out_node->content_u.bool_expr.bool_op = BOOL_NOT;\n\n        p_out_node->content_u.bool_expr.owner = 0;\n        p_out_node->content_u.bool_expr.expr1\n            = (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr1)\n            goto errmem;\n\n        p_out_node->content_u.bool_expr.expr2 = NULL;\n\n        rc = build_set_expr(p_in_set->set_u.op.set1,\n                            p_out_node->content_u.bool_expr.expr1,\n                            p_attr_mask, policies, err_msg);\n        if (rc)\n            goto free_set1;\n    } else {    /* not a singleton: Union or Inter or Negation */\n\n        p_out_node->node_type = NODE_BINARY_EXPR;\n\n        if (p_in_set->set_u.op.oper == SET_OP_UNION)\n            /* entry matches one class OR the other */\n            p_out_node->content_u.bool_expr.bool_op = BOOL_OR;\n        else if (p_in_set->set_u.op.oper == SET_OP_INTER)\n            /* entry matches one class AND the other */\n            p_out_node->content_u.bool_expr.bool_op = BOOL_AND;\n        else {\n            strcpy(err_msg, \"Unexpected set operator in expression\");\n            return EINVAL;\n        }\n\n        p_out_node->content_u.bool_expr.owner = 0;\n        p_out_node->content_u.bool_expr.expr1\n            = (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr1)\n            goto errmem;\n        rc = build_set_expr(p_in_set->set_u.op.set1,\n                            p_out_node->content_u.bool_expr.expr1,\n                            p_attr_mask, policies, err_msg);\n\n        if (rc)\n            goto free_set1;\n\n        p_out_node->content_u.bool_expr.expr2\n            = (bool_node_t *)malloc(sizeof(bool_node_t));\n        if (!p_out_node->content_u.bool_expr.expr2)\n            goto errmem;\n        rc = build_set_expr(p_in_set->set_u.op.set2,\n                            p_out_node->content_u.bool_expr.expr2,\n                            p_attr_mask, policies, err_msg);\n        if (rc)\n            goto free_set2;\n    }\n\n    return 0;\n\n errmem:\n    sprintf(err_msg, \"Could not allocate memory\");\n    return ENOMEM;\n\n free_set2:\n    free(p_out_node->content_u.bool_expr.expr2);\n free_set1:\n    free(p_out_node->content_u.bool_expr.expr1);\n    return rc;\n\n}\n\n/**\n * Build a policy boolean expression from a union/intersection of fileclasses\n */\nint GetSetExpr(config_item_t block, const char *block_name,\n               bool_node_t *p_bool_node, attr_mask_t *p_attr_mask,\n               const policies_t *policies, char *err_msg)\n{\n    generic_item *curr_block = (generic_item *)block;\n    generic_item *subitem;\n    int rc;\n\n    /* initialize attr mask */\n    *p_attr_mask = null_mask;\n\n    /* check it is a block */\n    if (!curr_block || (curr_block->type != TYPE_BLOCK)) {\n        sprintf(err_msg, \"'%s' is expected to be a block\", block_name);\n        return EINVAL;\n    }\n\n    /* Check the block contains something  */\n    if (!curr_block->item.block.block_content) {\n        sprintf(err_msg, \"'%s' block is empty, line %d\", block_name,\n                rh_config_GetItemLine(block));\n        return ENOENT;\n    }\n\n    /* check bloc content */\n    subitem = curr_block->item.block.block_content;\n\n    if (subitem->type != TYPE_SET) {\n        sprintf(err_msg,\n                \"Union/intersection/negation of classes expected in block '%s', line %d\",\n                block_name, rh_config_GetItemLine((config_item_t) subitem));\n        return EINVAL;\n    }\n\n    if (subitem->next) {\n        sprintf(err_msg,\n                \"A single expression is expected in block '%s', line %d\",\n                block_name, rh_config_GetItemLine((config_item_t) subitem));\n        return EINVAL;\n    }\n\n    /* now we can analyze the union/intersection */\n    rc = build_set_expr(&subitem->item.set, p_bool_node, p_attr_mask,\n                        policies, err_msg);\n    if (rc)\n        sprintf(err_msg + strlen(err_msg), \", line %d\",\n                rh_config_GetItemLine((config_item_t) subitem));\n\n    return rc;\n\n}\n\nconst char *op2str(compare_direction_t comp)\n{\n    switch (comp) {\n    case COMP_GRTHAN:\n        return \">\";\n    case COMP_GRTHAN_EQ:\n        return \">=\";\n    case COMP_LSTHAN:\n        return \"<\";\n    case COMP_LSTHAN_EQ:\n        return \"<=\";\n    case COMP_EQUAL:\n        return \"==\";\n    case COMP_DIFF:\n        return \"<>\";\n    case COMP_LIKE:\n        return \" =~ \";\n    case COMP_UNLIKE:\n        return \" !~ \";\n    case COMP_NONE:\n        return \"?\";\n    }\n    return \"?\";\n}   /* op2str */\n\nstatic int print_condition(const compare_triplet_t *p_triplet, char *out_str,\n                           size_t str_size)\n{\n    char tmp_buff[256];\n\n    switch (p_triplet->crit) {\n        /* str values */\n    case CRITERIA_TREE:\n    case CRITERIA_PATH:\n    case CRITERIA_NAME:\n    case CRITERIA_INAME:\n    case CRITERIA_FILECLASS:\n#ifdef _LUSTRE\n    case CRITERIA_POOL:\n#endif\n        return snprintf(out_str, str_size, \"%s %s \\\"%s\\\"\",\n                        criteria2str(p_triplet->crit), op2str(p_triplet->op),\n                        p_triplet->val.str);\n\n    case CRITERIA_TYPE:\n        return snprintf(out_str, str_size, \"%s %s \\\"%s\\\"\",\n                        criteria2str(p_triplet->crit), op2str(p_triplet->op),\n                        type2str(p_triplet->val.type));\n\n        /* int values */\n    case CRITERIA_DEPTH:\n#ifdef _LUSTRE\n    case CRITERIA_OST:\n#endif\n    case CRITERIA_DIRCOUNT:\n    case CRITERIA_NLINK:\n        return snprintf(out_str, str_size, \"%s %s %d\",\n                        criteria2str(p_triplet->crit), op2str(p_triplet->op),\n                        p_triplet->val.integer);\n\n    case CRITERIA_SIZE:\n        FormatFileSize(tmp_buff, 256, p_triplet->val.size);\n        return snprintf(out_str, str_size, \"%s %s %s\",\n                        criteria2str(p_triplet->crit), op2str(p_triplet->op),\n                        tmp_buff);\n\n        /* UID/GID: str or int */\n    case CRITERIA_OWNER:\n    case CRITERIA_GROUP:\n        if (global_config.uid_gid_as_numbers)\n            return snprintf(out_str, str_size, \"%s %s %d\",\n                            criteria2str(p_triplet->crit),\n                            op2str(p_triplet->op), p_triplet->val.integer);\n        else\n            return snprintf(out_str, str_size, \"%s %s \\\"%s\\\"\",\n                            criteria2str(p_triplet->crit),\n                            op2str(p_triplet->op), p_triplet->val.str);\n\n        /* duration values */\n\n    case CRITERIA_LAST_ACCESS:\n    case CRITERIA_LAST_MOD:\n    case CRITERIA_LAST_MDCHANGE:\n    case CRITERIA_CREATION:\n    case CRITERIA_RMTIME:\n        FormatDurationFloat(tmp_buff, 256, p_triplet->val.duration);\n        return snprintf(out_str, str_size, \"%s %s %s\",\n                        criteria2str(p_triplet->crit), op2str(p_triplet->op),\n                        tmp_buff);\n\n    case CRITERIA_XATTR:\n        return snprintf(out_str, str_size, XATTR_PREFIX \".%s %s %s\",\n                        p_triplet->attr_name, op2str(p_triplet->op),\n                        p_triplet->val.str);\n    default:\n        return -EINVAL;\n    }\n}\n\n/**\n * Print a boolean expression to a string.\n * @return a negative value on error\n *         else, the number of chars written.\n */\nint BoolExpr2str(bool_node_t *p_bool_node, char *out_str, size_t str_size)\n{\n    size_t written = 0;\n    int rc;\n\n    switch (p_bool_node->node_type) {\n    case NODE_UNARY_EXPR:\n\n        /* only BOOL_NOT is supported as unary operator */\n        if (p_bool_node->content_u.bool_expr.bool_op != BOOL_NOT)\n            return -EINVAL;\n        written = snprintf(out_str, str_size, \"NOT (\");\n        rc = BoolExpr2str(p_bool_node->content_u.bool_expr.expr1,\n                          out_str + written, str_size - written);\n        if (rc < 0)\n            return rc;\n        written += rc;\n        written += snprintf(out_str + written, str_size - written, \")\");\n        return written;\n\n    case NODE_BINARY_EXPR:\n        written = snprintf(out_str, str_size, \"(\");\n        rc = BoolExpr2str(p_bool_node->content_u.bool_expr.expr1,\n                          out_str + written, str_size - written);\n        if (rc < 0)\n            return rc;\n        written += rc;\n        if (p_bool_node->content_u.bool_expr.bool_op == BOOL_OR)\n            written +=\n                snprintf(out_str + written, str_size - written, \") OR (\");\n        else\n            written +=\n                snprintf(out_str + written, str_size - written, \") AND (\");\n\n        rc = BoolExpr2str(p_bool_node->content_u.bool_expr.expr2,\n                          out_str + written, str_size - written);\n        if (rc < 0)\n            return rc;\n        written += rc;\n\n        written += snprintf(out_str + written, str_size - written, \")\");\n        return written;\n\n    case NODE_CONDITION:\n        return print_condition(p_bool_node->content_u.condition, out_str,\n                               str_size);\n\n    case NODE_CONSTANT:\n        return snprintf(out_str, str_size, \"%s\",\n                        bool2str(p_bool_node->content_u.constant));\n    }\n\n    return -EINVAL;\n}\n\n/**\n * Compare 2 boolean expressions\n * @return TRUE if expression structure changed.\n * @return FALSE if they have the same structure,\n * @return  -1 on error.\n */\nint compare_boolexpr(const bool_node_t *expr1, const bool_node_t *expr2)\n{\n    if (expr1->node_type != expr2->node_type)\n        return 1;\n\n    switch (expr1->node_type) {\n    case NODE_UNARY_EXPR:\n        if (expr1->content_u.bool_expr.bool_op !=\n            expr2->content_u.bool_expr.bool_op)\n            return true;\n\n        return compare_boolexpr(expr1->content_u.bool_expr.expr1,\n                                expr2->content_u.bool_expr.expr1);\n\n    case NODE_BINARY_EXPR:\n        if (expr1->content_u.bool_expr.bool_op !=\n            expr2->content_u.bool_expr.bool_op)\n            return true;\n\n        return compare_boolexpr(expr1->content_u.bool_expr.expr1,\n                                expr2->content_u.bool_expr.expr1)\n               || compare_boolexpr(expr1->content_u.bool_expr.expr2,\n                                   expr2->content_u.bool_expr.expr2);\n\n    case NODE_CONDITION:\n        /* compare criteria */\n        if (expr1->content_u.condition->crit !=\n            expr2->content_u.condition->crit)\n            return true;\n\n        /* compare operator, except for custom cmd and xattr */\n        if ((expr1->content_u.condition->crit != CRITERIA_XATTR)\n            && (expr1->content_u.condition->op !=\n                expr2->content_u.condition->op))\n            return true;\n\n        /* same structure */\n        return false;\n\n    case NODE_CONSTANT:\n        /* same structure, just a value change */\n        return false;\n    }\n\n    /* should not happen */\n    RBH_BUG(\"Unexpected node_type in boolean expression\");\n    return -1;\n}   /* compare_boolexpr */\n\n#define RELOAD_TAG \"ReloadExpr\"\n\n/**\n * Update the numerical values of a boolean expression.\n * /!\\ compare_boolexpr() must have returned 0 (else, unguarantied behavior).\n * @param tgt Boolean expression to be updated\n * @param src Boolean expression to take values from.\n * @return TRUE if expression values have been changed\n * @return FALSE if nothing has been changed\n */\nbool update_boolexpr(bool_node_t *tgt, const bool_node_t *src)\n{\n    compare_triplet_t *p_triplet1;\n    compare_triplet_t *p_triplet2;\n    char tmp_buff1[256];\n    char tmp_buff2[256];\n    bool rc;\n\n    switch (tgt->node_type) {\n    case NODE_CONSTANT:\n        if (tgt->content_u.constant != src->content_u.constant) {\n            DisplayLog(LVL_EVENT, RELOAD_TAG,\n                       \"Value updated: %s -> %s\",\n                       bool2str(src->content_u.constant),\n                       bool2str(tgt->content_u.constant));\n            tgt->content_u.constant = src->content_u.constant;\n            return true;\n        } else\n            return false;\n\n    case NODE_UNARY_EXPR:\n        return update_boolexpr(tgt->content_u.bool_expr.expr1,\n                               src->content_u.bool_expr.expr1);\n\n    case NODE_BINARY_EXPR:\n        rc = update_boolexpr(tgt->content_u.bool_expr.expr1,\n                             src->content_u.bool_expr.expr1);\n        if (update_boolexpr\n            (tgt->content_u.bool_expr.expr2, src->content_u.bool_expr.expr2))\n            rc = true;\n        return rc;\n\n    case NODE_CONDITION:\n\n        p_triplet1 = tgt->content_u.condition;\n        p_triplet2 = src->content_u.condition;\n\n        switch (p_triplet1->crit) {\n        case CRITERIA_SIZE:\n            if (p_triplet1->val.size != p_triplet2->val.size) {\n                FormatFileSize(tmp_buff1, 256, p_triplet1->val.size);\n                FormatFileSize(tmp_buff2, 256, p_triplet2->val.size);\n\n                DisplayLog(LVL_EVENT, RELOAD_TAG,\n                           \"Criteria value updated: (%s %s %s) -> (%s %s %s)\",\n                           criteria2str(CRITERIA_SIZE), op2str(p_triplet1->op),\n                           tmp_buff1, criteria2str(CRITERIA_SIZE),\n                           op2str(p_triplet2->op), tmp_buff2);\n                p_triplet1->val.size = p_triplet2->val.size;\n                return true;\n            } else\n                return false;\n\n            /* integer conditions */\n        case CRITERIA_DEPTH:\n#ifdef _LUSTRE\n        case CRITERIA_OST:\n#endif\n        case CRITERIA_DIRCOUNT:\n            if (p_triplet1->val.integer != p_triplet2->val.integer) {\n                DisplayLog(LVL_EVENT, RELOAD_TAG,\n                           \"Criteria value updated: (%s %s %d) -> (%s %s %d)\",\n                           criteria2str(p_triplet1->crit),\n                           op2str(p_triplet1->op), p_triplet1->val.integer,\n                           criteria2str(p_triplet2->crit),\n                           op2str(p_triplet2->op), p_triplet2->val.integer);\n                p_triplet1->val.integer = p_triplet2->val.integer;\n                return true;\n            } else\n                return false;\n\n            /* duration conditions */\n        case CRITERIA_LAST_ACCESS:\n        case CRITERIA_LAST_MOD:\n        case CRITERIA_LAST_MDCHANGE:\n        case CRITERIA_CREATION:\n            if (p_triplet1->val.duration != p_triplet2->val.duration) {\n                FormatDurationFloat(tmp_buff1, 256, p_triplet1->val.duration);\n                FormatDurationFloat(tmp_buff2, 256, p_triplet2->val.duration);\n                DisplayLog(LVL_EVENT, RELOAD_TAG,\n                           \"Criteria value updated: (%s %s %s) -> (%s %s %s)\",\n                           criteria2str(p_triplet1->crit),\n                           op2str(p_triplet1->op), tmp_buff1,\n                           criteria2str(p_triplet2->crit),\n                           op2str(p_triplet2->op), tmp_buff2);\n                p_triplet1->val.duration = p_triplet2->val.duration;\n                return true;\n            } else\n                return false;\n\n        case CRITERIA_TYPE:\n            if (p_triplet1->val.type != p_triplet2->val.type) {\n                DisplayLog(LVL_EVENT, RELOAD_TAG,\n                           \"Criteria value updated: (%s %s %s) -> (%s %s %s)\",\n                           criteria2str(p_triplet1->crit),\n                           op2str(p_triplet1->op),\n                           type2str(p_triplet1->val.type),\n                           criteria2str(p_triplet2->crit),\n                           op2str(p_triplet2->op),\n                           type2str(p_triplet2->val.type));\n                p_triplet1->val.type = p_triplet2->val.type;\n                return true;\n            } else\n                return false;\n\n            /* unmodifiable conditions */\n        case CRITERIA_TREE:\n        case CRITERIA_PATH:\n        case CRITERIA_NAME:\n        case CRITERIA_INAME:\n        case CRITERIA_FILECLASS:\n#ifdef _LUSTRE\n        case CRITERIA_POOL:\n#endif\n            if (strcmp(p_triplet1->val.str, p_triplet2->val.str)) {\n                DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                           \"Condition changed on attribute '%s' but this cannot be modified dynamically\",\n                           criteria2str(p_triplet1->crit));\n            }\n            return false;\n\n        case CRITERIA_OWNER:\n        case CRITERIA_GROUP:\n            if ((global_config.uid_gid_as_numbers &&\n                 p_triplet1->val.integer != p_triplet2->val.integer) ||\n                (!global_config.uid_gid_as_numbers &&\n                 strcmp(p_triplet1->val.str, p_triplet2->val.str))) {\n                DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                           \"Condition changed on attribute '%s' but this cannot be modified dynamically\",\n                           criteria2str(p_triplet1->crit));\n            }\n            return false;\n\n        case CRITERIA_XATTR:\n            if (strcmp(p_triplet1->val.str, p_triplet2->val.str)\n                || strcmp(p_triplet1->attr_name, p_triplet2->attr_name)) {\n                DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                           \"xattr condition changed, but it cannot be modified dynamically\");\n            }\n            return false;\n\n        default:\n            DisplayLog(LVL_CRIT, RELOAD_TAG,\n                       \"Unsupported attribute: %s\",\n                       criteria2str(p_triplet1->crit));\n        }\n\n        break;\n    }\n\n    /* should not happen */\n    RBH_BUG(\"Unexpected node_type in boolean expression\");\n    return -1;\n}   /* update_boolexpr */\n"
  },
  {
    "path": "src/cfg_parsing/rbh_cfg.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  Module for configuration management and parsing.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include \"analyze.h\"\n#include <errno.h>\n\n/* get config handlers definition */\n#include \"global_config.h\"\n#include \"update_params.h\"\n#include \"entry_processor.h\"\n#include \"fs_scan_main.h\"\n#include \"chglog_reader.h\"\n#include \"policy_rules.h\"\n#include \"policy_run.h\"\n#include \"status_manager.h\"\n\nchar config_file[RBH_PATH_MAX] = \"\";\n\nstruct mod_cfgs {\n    mod_cfg_funcs_t *funcs;\n    int flags;\n} cfglist[] = {\n    {&global_cfg_hdlr,     MODULE_MASK_ALWAYS},\n    {&log_cfg_hdlr,        MODULE_MASK_ALWAYS},\n    {&updt_params_hdlr,    MODULE_MASK_ALWAYS},\n    {&lmgr_cfg_hdlr,       MODULE_MASK_ALWAYS},\n    {&entry_proc_cfg_hdlr, MODULE_MASK_ENTRY_PROCESSOR},\n    {&fs_scan_cfg_hdlr,    MODULE_MASK_FS_SCAN},\n#ifdef HAVE_CHANGELOGS\n    {&cl_reader_cfg_hdlr,  MODULE_MASK_EVENT_HDLR},\n#endif\n    /* TODO manage fileclasses separately? */\n    /* must always be called before smi_cfg, run_cfg */\n    {&policies_cfg_hdlr,   MODULE_MASK_ALWAYS},\n    {&smi_cfg_hdlr,        MODULE_MASK_ALWAYS},\n    {&policy_run_cfg_hdlr, MODULE_MASK_POLICY_RUN},\n\n    {NULL, 0}\n};\n\n#define RELOAD_TAG \"ReloadCfg\"\n\nstatic int rbh_cfg_read_set(int module_mask, char *file_path, char *err_msg_out,\n                            bool reload)\n{\n    config_file_t syntax_tree;\n    int           rc, rc_final = 0;\n    char          msg_buf[2048] = \"\";\n    const struct mod_cfgs *p_curr;\n\n    /* First, Parse the configuration file */\n    syntax_tree = rh_config_ParseFile(file_path);\n\n    if (syntax_tree == NULL) {\n        strcpy(err_msg_out, rh_config_GetErrorMsg());\n        return EINVAL;\n    }\n#ifdef _DEBUG_PARSING\n    rh_config_Print(stdout, syntax_tree);\n#endif\n\n    /* Set defaults to the structure, then load values from syntax tree */\n    for (p_curr = &cfglist[0]; p_curr->funcs != NULL; p_curr++) {\n        void *cfg;\n\n        /* Only initialize modules with flag MODULE_MASK_ALWAYS\n         * or matching 'module_mask' parameter. */\n        if ((p_curr->flags != MODULE_MASK_ALWAYS)\n            && !(p_curr->flags & module_mask))\n            continue;\n\n        cfg = p_curr->funcs->new();\n        if (cfg == NULL) {\n            rc_final = ENOMEM;\n            sprintf(err_msg_out,\n                    \"Not enough memory to allocate configuration for %s\",\n                    p_curr->funcs->module_name);\n            if (reload) {\n                DisplayLog(LVL_CRIT, RELOAD_TAG, \"%s\", err_msg_out);\n                continue;\n            } else\n                goto config_free;\n        }\n\n        p_curr->funcs->set_default(cfg);\n\n        DisplayLog(LVL_DEBUG, \"CfgLoader\", \"Loading %s config\",\n                   p_curr->funcs->module_name);\n\n        rc = p_curr->funcs->read(syntax_tree, cfg, msg_buf);\n        if (rc != 0) {\n            rc_final = rc;\n            sprintf(err_msg_out,\n                    \"Error %d reading %s configuration:\\n%s\",\n                    rc, p_curr->funcs->module_name, msg_buf);\n\n            if (reload) {\n                DisplayLog(LVL_CRIT, RELOAD_TAG, \"%s\", err_msg_out);\n                continue;\n            } else\n                goto config_free;\n        }\n\n        rc = p_curr->funcs->set_config(cfg, reload);\n        if (rc != 0) {\n            rc_final = rc;\n            sprintf(err_msg_out,\n                    \"Error %d setting %s configuration:\\n%s\",\n                    rc, p_curr->funcs->module_name, msg_buf);\n            p_curr->funcs->free(cfg);\n\n            if (reload) {\n                DisplayLog(LVL_CRIT, RELOAD_TAG, \"%s\", err_msg_out);\n                continue;\n            } else\n                goto config_free;\n        }\n        /* When reloading, the configuration can be freed\n         * as each module copy new values in the old config structure.\n         */\n        if (reload)\n            p_curr->funcs->free(cfg);\n        else\n            /* just free the top level handler */\n            free(cfg);\n    }\n\n config_free:\n    /* free config file resources */\n    rh_config_Free(syntax_tree);\n\n    return rc_final;\n}\n\n/**\n * Read robinhood's configuration file and fill config struct.\n * if everything is OK, returns 0 and fills the structure\n * else, returns an error code and sets a contextual error message\n * in err_msg_out.\n */\nint rbh_cfg_load(int module_mask, char *file_path, char *err_msg_out)\n{\n    rh_strncpy(config_file, file_path, sizeof(config_file));\n    return rbh_cfg_read_set(module_mask, file_path, err_msg_out, false);\n}\n\n/**\n * Reload robinhood's configuration file (the one used for last call to rbh_cfg_load())\n * and change only parameters that can be modified on the fly.\n */\nint rbh_cfg_reload(int curr_module_mask)\n{\n    char tmp[2048];\n\n    return rbh_cfg_read_set(curr_module_mask, config_file, tmp, true);\n}\n\n/* returns the path to process config file */\nconst char *config_file_path(void)\n{\n    return config_file;\n}\n\n/**\n * Write a documented template of configuration file.\n * returns 0 on success, else it returns a posix error code.\n */\nint rbh_cfg_write_template(FILE *stream)\n{\n    const struct mod_cfgs *p_module;\n\n    fprintf(stream, \"##########################################\\n\");\n    fprintf(stream, \"# Robinhood configuration file template  #\\n\");\n    fprintf(stream, \"##########################################\\n\\n\");\n\n    for (p_module = &cfglist[0]; p_module->funcs != NULL; p_module++) {\n        fprintf(stream, \"# %s configuration\\n\", p_module->funcs->module_name);\n        p_module->funcs->write_template(stream);\n        fprintf(stream, \"\\n\");\n    }\n\n    return 0;\n}\n\nint rbh_cfg_write_default(FILE *stream)\n{\n    const struct mod_cfgs *p_module;\n\n    fprintf(stream, \"# Default configuration values\\n\");\n\n    for (p_module = &cfglist[0]; p_module->funcs != NULL; p_module++) {\n        p_module->funcs->write_default(stream);\n        fprintf(stream, \"\\n\");\n    }\n\n    return 0;\n}\n\n#define INDENT_STEP 4\nvoid print_begin_block(FILE *output, unsigned int indent,\n                       const char *blockname, const char *id)\n{\n    char *indent_char = (indent ? \" \" : \"\");\n\n    if (id)\n        fprintf(output, \"%*s%s\\t%s\\n\", indent * INDENT_STEP, indent_char,\n                blockname, id);\n    else\n        fprintf(output, \"%*s%s\\n\", indent * INDENT_STEP, indent_char,\n                blockname);\n    fprintf(output, \"%*s{\\n\", indent * INDENT_STEP, indent_char);\n}\n\nvoid print_end_block(FILE *output, unsigned int indent)\n{\n    char *indent_char = (indent ? \" \" : \"\");\n    fprintf(output, \"%*s}\\n\", indent * INDENT_STEP, indent_char);\n}\n\nvoid print_line(FILE *output, unsigned int indent, const char *format, ...)\n{\n    va_list arglist;\n    char *indent_char = (indent ? \" \" : \"\");\n\n    fprintf(output, \"%*s\", indent * INDENT_STEP, indent_char);\n\n    va_start(arglist, format);\n    vfprintf(output, format, arglist);\n    va_end(arglist);\n\n    fprintf(output, \"\\n\");\n}\n"
  },
  {
    "path": "src/cfg_parsing/rbh_cfg_helpers.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  Module for configuration management and parsing.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n\nstatic inline bool is_stdname(const char *name)\n{\n    return (!strcasecmp(name, \"stdout\")\n            || !strcasecmp(name, \"stderr\")\n            || !strcasecmp(name, \"syslog\"));\n}\n\n/** get param with the given name and check for existence and unicity */\nstatic int get_cfg_param(config_item_t block, const char *block_name,\n                         const char *var_name, param_flags_t flags,\n                         char **pname, char **pvalue, int *pextra,\n                         config_item_t *pitem, char *err_msg)\n{\n    bool unique = true;\n    int rc;\n\n    *pitem = rh_config_GetItemByName(block, var_name, &unique);\n    if (!*pitem) {\n        if (flags & PFLG_MANDATORY)\n            sprintf(err_msg,\n                    \"Missing mandatory parameter '%s' in block '%s', line %d\",\n                    var_name, block_name, rh_config_GetItemLine(block));\n        /* return ENOENT in any case */\n        return ENOENT;\n    } else if (!unique) {\n        sprintf(err_msg,\n                \"Duplicate definition of parameter '%s' found in block '%s', line %d.\",\n                var_name, block_name, rh_config_GetItemLine(*pitem));\n        return EEXIST;\n    }\n\n    rc = rh_config_GetKeyValue(*pitem, pname, pvalue, pextra);\n    if (rc)\n        sprintf(err_msg,\n                \"Error retrieving parameter value for '%s::%s', line %d:\\n%s\",\n                block_name, var_name, rh_config_GetItemLine(*pitem),\n                rh_config_GetErrorMsg());\n    return rc;\n}\n\n/**\n * Misc. tools for config parsing\n */\nint GetStringParam(config_item_t block, const char *block_name,\n                   const char *var_name, param_flags_t flags, char *target,\n                   unsigned int target_size, char ***extra_args_tab,\n                   unsigned int *nb_extra_args, char *err_msg)\n{\n    config_item_t curr_item;\n    int rc;\n    int extra = 0;\n    char *name;\n    char *value;\n    gsize sz;\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    sz = g_strlcpy(target, value, target_size);\n    if (sz >= target_size) {\n        sprintf(err_msg, \"Option too long for parameter '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    /* checks */\n\n    /* empty string? */\n    if ((flags & PFLG_NOT_EMPTY) && EMPTY_STRING(target)) {\n        sprintf(err_msg, \"Unexpected empty parameter '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    /* are stdio names allowed ? */\n    if ((flags & PFLG_STDIO_ALLOWED) && is_stdname(target))\n        return 0;\n\n    if ((flags & PFLG_ABSOLUTE_PATH) && !IS_ABSOLUTE_PATH(target)) {\n        sprintf(err_msg,\n                \"Absolute path expected for parameter '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    if ((flags & PFLG_NO_WILDCARDS) && WILDCARDS_IN(target)) {\n        sprintf(err_msg, \"Wildcards are not allowed in '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    if (flags & PFLG_MAIL) {\n        char *arob = strchr(target, '@');\n\n        /* Check there is an arobase, and this arobase has text before and\n         * after. */\n        if ((arob == NULL) || (arob == target) || (*(arob + 1) == '\\0')) {\n            sprintf(err_msg, \"Invalid mail address in '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        }\n    }\n\n    if ((flags & PFLG_REMOVE_FINAL_SLASH) && FINAL_SLASH(target))\n        REMOVE_FINAL_SLASH(target);\n\n    return 0;\n\n}\n\nint GetCommandParam(config_item_t block, const char *block_name,\n                    const char *var_name, param_flags_t flags, char ***target,\n                    char ***extra_args_tab, unsigned int *nb_extra_args,\n                    char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, ac;\n    int extra = 0;\n    char *name;\n    char *value;\n    GError *err_desc;\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    /* Early check */\n    if ((flags & PFLG_NO_WILDCARDS) && WILDCARDS_IN(value)) {\n        sprintf(err_msg, \"Wildcards are not allowed in '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    /* free previous array */\n    if (*target != NULL) {\n        g_strfreev(*target);\n        *target = NULL;\n    }\n\n    /* Split argv */\n    if (value[0] != '\\0') {\n        rc = g_shell_parse_argv(value, &ac, target, &err_desc);\n        if (!rc) {\n            sprintf(err_msg, \"Cannot parse '%s': %s\", value, err_desc->message);\n            g_error_free(err_desc);\n            return EINVAL;\n        }\n        if (ac == 0) {\n            g_strfreev(*target);\n            *target = NULL;\n        }\n    }\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    /* Post checks */\n\n    /* empty string? */\n    if ((flags & PFLG_NOT_EMPTY) && *target == NULL) {\n        sprintf(err_msg, \"Unexpected empty parameter '%s::%s', line %d\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    return 0;\n\n}\n\nint GetBoolParam(config_item_t block, const char *block_name,\n                 const char *var_name, param_flags_t flags, bool *target,\n                 char ***extra_args_tab, unsigned int *nb_extra_args,\n                 char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, extra;\n    char *name;\n    char *value;\n    int tmp_bool;\n\n    err_msg[0] = '\\0';\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    tmp_bool = str2bool(value);\n    if (tmp_bool == -1) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: boolean expected (0, 1, true, false, yes, no, enabled, disabled)\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    *target = (tmp_bool != 0);\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n}\n\n/**\n *  Retrieve a duration parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetDurationParam(config_item_t block, const char *block_name,\n                     const char *var_name, param_flags_t flags, time_t *target,\n                     char ***extra_args_tab,\n                     unsigned int *nb_extra_args, char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, extra;\n    time_t timeval;\n    char *name;\n    char *value;\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    timeval = str2duration(value);\n    if (timeval == -1) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: duration expected. Eg: 10s\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    if ((flags & PFLG_POSITIVE) && (timeval < 0)) {\n        sprintf(err_msg, \"Positive value expected for '%s::%s', line %d.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if ((flags & PFLG_NOT_NULL) && (timeval == 0)) {\n        sprintf(err_msg, \"'%s::%s' must not be null, line %d.\", block_name,\n                var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    *target = timeval;\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n}\n\n/**\n *  Retrieve a size parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetSizeParam(config_item_t block, const char *block_name,\n                 const char *var_name, param_flags_t flags,\n                 unsigned long long *target, char ***extra_args_tab,\n                 unsigned int *nb_extra_args, char *err_msg)\n{\n    config_item_t curr_item;\n    int rc;\n    int extra = 0;\n    unsigned long long sizeval;\n    char *name;\n    char *value;\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    sizeval = str2size(value);\n    if (sizeval == (unsigned long long)-1) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: size expected. Eg: 10MB\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    if ((flags & PFLG_NOT_NULL) && (sizeval == 0)) {\n        sprintf(err_msg, \"'%s::%s' must not be null, line %d.\", block_name,\n                var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    *target = sizeval;\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n}\n\n/**\n *  Retrieve an integer parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetIntParam(config_item_t block, const char *block_name,\n                const char *var_name, param_flags_t flags, int *target,\n                char ***extra_args_tab, unsigned int *nb_extra_args,\n                char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, extra, intval, nb_read;\n    char *name;\n    char *value;\n    char tmpbuf[256];\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    nb_read = sscanf(value, \"%d%256s\", &intval, tmpbuf);\n    if (nb_read < 1) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: integer expected.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if ((nb_read > 1) && (tmpbuf[0] != '\\0')) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: extra characters '%s' found after integer %d.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item), tmpbuf,\n                intval);\n        return EINVAL;\n    }\n\n    if ((flags & PFLG_POSITIVE) && (intval < 0)) {\n        sprintf(err_msg, \"Positive value expected for '%s::%s', line %d.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if ((flags & PFLG_NOT_NULL) && (intval == 0)) {\n        sprintf(err_msg, \"'%s::%s' must not be null, line %d.\", block_name,\n                var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    *target = intval;\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n}\n\n/**\n *  Retrieve a long integer parameter and check its format.\n *  (a suffix can be used in config file).\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetInt64Param(config_item_t block, const char *block_name,\n                  const char *var_name, param_flags_t flags, uint64_t *target,\n                  char ***extra_args_tab, unsigned int *nb_extra_args,\n                  char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, extra, nb_read;\n    uint64_t intval;\n    char *name;\n    char *value;\n    char tmpbuf[256];\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    nb_read = sscanf(value, \"%\" SCNu64 \"%256s\", &intval, tmpbuf);\n    if (nb_read < 1) {\n        sprintf(err_msg,\n                \"Invalid value for '%s::%s', line %d: integer expected.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if ((nb_read > 1) && (tmpbuf[0] != '\\0')) {\n        /* check suffix */\n        if (!strcasecmp(tmpbuf, \"k\"))\n            intval *= 1000ULL;  /* thousand */\n        else if (!strcasecmp(tmpbuf, \"M\"))\n            intval *= 1000000ULL;   /* million */\n        else if (!strcasecmp(tmpbuf, \"G\"))\n            intval *= 1000000000ULL;    /* billion */\n        else if (!strcasecmp(tmpbuf, \"T\"))\n            intval *= 1000000000000ULL; /* trillion */\n        else {\n            sprintf(err_msg, \"Invalid suffix for '%s::%s', line %d: '%s'. \"\n                    \"Only 'k', 'M', 'G' or 'T' are allowed.\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item),\n                    tmpbuf);\n            return EINVAL;\n        }\n    }\n\n    if ((flags & PFLG_NOT_NULL) && (intval == 0)) {\n        sprintf(err_msg, \"'%s::%s' must not be null, line %d.\", block_name,\n                var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    *target = intval;\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n}\n\n/**\n *  Retrieve a float parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetFloatParam(config_item_t block, const char *block_name,\n                  const char *var_name, param_flags_t flags, double *target,\n                  char ***extra_args_tab, unsigned int *nb_extra_args,\n                  char *err_msg)\n{\n    config_item_t curr_item;\n    int rc, extra, nb_read;\n    double val;\n    char *name;\n    char *value;\n    char tmpbuf[256];\n\n    err_msg[0] = '\\0';\n\n    if (nb_extra_args)\n        *nb_extra_args = 0;\n    if (extra_args_tab)\n        *extra_args_tab = NULL;\n\n    rc = get_cfg_param(block, block_name, var_name, flags, &name, &value,\n                       &extra, &curr_item, err_msg);\n    if (rc)\n        return rc;\n\n    nb_read = sscanf(value, \"%lf%255s\", &val, tmpbuf);\n    if (nb_read < 1) {\n        sprintf(err_msg, \"Invalid value for '%s::%s', line %d: float expected.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if (nb_read > 1) {\n        /* no postfix sign allowed, or '%' allowed */\n        if ((!(flags & PFLG_ALLOW_PCT_SIGN) && (tmpbuf[0] != '\\0'))\n             ||((flags & PFLG_ALLOW_PCT_SIGN) && (strcmp(tmpbuf, \"%\") != 0))) {\n            sprintf(err_msg,\n                    \"Invalid value for '%s::%s', line %d: extra characters '%s'\"\n                    \" found after float %.2f.\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item),\n                    tmpbuf, val);\n            return EINVAL;\n        }\n    }\n\n    if ((flags & PFLG_POSITIVE) && (val < 0.0)) {\n        sprintf(err_msg, \"Positive value expected for '%s::%s', line %d.\",\n                block_name, var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n    if ((flags & PFLG_NOT_NULL) && (val == 0.0)) {\n        sprintf(err_msg, \"'%s::%s' must not be null, line %d.\", block_name,\n                var_name, rh_config_GetItemLine(curr_item));\n        return EINVAL;\n    }\n\n    *target = val;\n\n    if (extra) {\n        if (!extra_args_tab || !nb_extra_args) {\n            sprintf(err_msg,\n                    \"Unexpected options for parameter '%s::%s', line %d\",\n                    block_name, var_name, rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        } else {\n            *nb_extra_args = rh_config_GetExtraArgs(curr_item, extra_args_tab);\n        }\n    }\n\n    return 0;\n\n}\n\n/**\n * Check that no unknown parameter or block is found.\n * @param param_array NULL terminated array of allowed parameters.\n */\nvoid CheckUnknownParameters(config_item_t block, const char *block_name,\n                            const char *const *param_array)\n{\n    int i, j;\n\n    for (i = 0; i < rh_config_GetNbItems(block); i++) {\n        config_item_t curr_item = rh_config_GetItemByIndex(block, i);\n\n        /* no warning if the value or block was queried */\n        if (rh_config_IsRead(curr_item))\n            continue;\n\n        if (rh_config_ItemType(curr_item) == CONFIG_ITEM_VAR) {\n            char *name;\n            char *value;\n            int args_flg;\n            bool found = false;\n\n            if (rh_config_GetKeyValue(curr_item, &name, &value,\n                                      &args_flg) == 0) {\n                for (j = 0; param_array[j] != NULL; j++) {\n                    if (!strcasecmp(param_array[j], name)) {\n                        found = true;\n                        break;\n                    }\n                }\n\n                if (!found)\n                    DisplayLog(LVL_CRIT, \"Config Check\",\n                               \"WARNING: unknown parameter '%s' in block '%s' line %d\",\n                               name, block_name,\n                               rh_config_GetItemLine(curr_item));\n            }\n        } else if (rh_config_ItemType(curr_item) == CONFIG_ITEM_BLOCK) {\n            char *name;\n            bool found = false;\n\n            name = rh_config_GetBlockName(curr_item);\n\n            if (name != NULL) {\n                for (j = 0; param_array[j] != NULL; j++) {\n                    if (!strcasecmp(param_array[j], name)) {\n                        found = true;\n                        break;\n                    }\n                }\n\n                if (!found)\n                    DisplayLog(LVL_CRIT, \"Config Check\",\n                               \"WARNING: unknown block '%s' as sub-block of '%s' line %d\",\n                               name, block_name,\n                               rh_config_GetItemLine(curr_item));\n            }\n        }\n    }\n}\n\n#define cfg_is_err(_rc, _flgs) (((_rc) != 0 && (_rc) != ENOENT) || \\\n                               ((_rc) == ENOENT && ((_flgs) & PFLG_MANDATORY)))\n\nint read_scalar_params(config_item_t block, const char *block_name,\n                       const cfg_param_t *params, char *msgout)\n{\n    int i;\n    int rc = 0;\n\n    /* read all expected parameters */\n    for (i = 0; params[i].name != NULL; i++) {\n        switch (params[i].type) {\n        case PT_STRING:\n            rc = GetStringParam(block, block_name, params[i].name,\n                                params[i].flags, (char *)params[i].ptr,\n                                params[i].ptrsize, NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_CMD:\n            rc = GetCommandParam(block, block_name, params[i].name,\n                                 params[i].flags, (char ***)params[i].ptr,\n                                 NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_BOOL:\n            rc = GetBoolParam(block, block_name, params[i].name,\n                              params[i].flags, (bool *) params[i].ptr,\n                              NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_DURATION:\n            rc = GetDurationParam(block, block_name, params[i].name,\n                                  params[i].flags, (time_t *)params[i].ptr,\n                                  NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_SIZE:\n            rc = GetSizeParam(block, block_name, params[i].name,\n                              params[i].flags,\n                              (unsigned long long *)params[i].ptr,\n                              NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_INT:\n            rc = GetIntParam(block, block_name, params[i].name,\n                             params[i].flags, (int *)params[i].ptr,\n                             NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_INT64:\n            rc = GetInt64Param(block, block_name, params[i].name,\n                               params[i].flags, (uint64_t *)params[i].ptr,\n                               NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_FLOAT:\n            rc = GetFloatParam(block, block_name, params[i].name,\n                               params[i].flags, (double *)params[i].ptr,\n                               NULL, NULL, msgout);\n            if cfg_is_err\n                (rc, params[i].flags)\n                    return rc;\n            break;\n\n        case PT_TYPE:\n            sprintf(msgout, \"Unexpected type for %s parameter (type)\",\n                    params[i].name);\n            return EINVAL;\n        }\n    }\n    return 0;\n}\n\n/**\n * Common helper for get_cfg_block and get_cfg_subblock.\n * check the returned item is a block and set the\n * returned value\n */\nstatic int _check_and_set_return_block(config_item_t check_item,\n                                       config_item_t *return_item,\n                                       const char *name, bool unique,\n                                       char *msg_out)\n{\n    *return_item = NULL;\n    if (check_item == NULL) {\n        sprintf(msg_out, \"Missing configuration block '%s'\", name);\n        return ENOENT;\n    } else if (!unique) {\n        sprintf(msg_out, \"Found duplicate of block '%s' line %d.\", name,\n                rh_config_GetItemLine(check_item));\n        return EEXIST;\n    }\n\n    if (rh_config_ItemType(check_item) != CONFIG_ITEM_BLOCK) {\n        sprintf(msg_out, \"A block is expected for '%s' item, line %d\",\n                name, rh_config_GetItemLine(check_item));\n        return EINVAL;\n    }\n    *return_item = check_item;\n    return 0;\n}\n\nint get_cfg_block(config_file_t config, const char *name, config_item_t *item,\n                  char *msg_out)\n{\n    bool unique = true;\n    config_item_t block = rh_config_FindItemByName(config, name, &unique);\n\n    return _check_and_set_return_block(block, item, name, unique, msg_out);\n}\n\nint get_cfg_subblock(config_item_t block, const char *name,\n                     config_item_t *returnblock, char *msg_out)\n{\n    bool unique = true;\n    config_item_t subblock = rh_config_GetItemByName(block, name, &unique);\n\n    return _check_and_set_return_block(subblock, returnblock, name, unique,\n                                       msg_out);\n}\n"
  },
  {
    "path": "src/chglog_reader/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nnoinst_LTLIBRARIES=libchglog_rd.la\n\nlibchglog_rd_la_SOURCES= chglog_reader_config.c chglog_reader.c\n\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/chglog_reader/chglog_reader.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n * Copyright 2013 Cray Inc. All Rights Reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file    chglog_reader.c\n * \\author  Th. Leibovici\n * \\brief   Lustre MDT Changelog processing.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"entry_processor.h\"\n#include \"entry_proc_hash.h\"\n#include \"rbh_misc.h\"\n#include \"global_config.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"chglog_reader.h\"\n\n#include <pthread.h>\n#include <errno.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <glib.h>\n#include \"lustre_extended_types.h\"\n\n#ifdef _LLAPI_FORKS\n#include <signal.h>\n#include <sys/wait.h>\n#endif\n\n/* for logs */\n#define CHGLOG_TAG  \"ChangeLog\"\n\nstruct rec_stats {\n    /** index of the record */\n    uint64_t        rec_id;\n    /** timestamp of the record */\n    struct timeval  rec_time;\n    /** when the record reached the current processing step */\n    struct timeval  step_time;\n\n    /* to compute speed between reports */\n    uint64_t        last_report_rec_id;\n    struct timeval  last_report_rec_time;\n};\n\n/** convert changelog record stats to timeval */\nstatic void timeval_from_rec(struct timeval *tv, CL_REC_TYPE *logrec)\n{\n    tv->tv_sec = (time_t)cltime2sec(logrec->cr_time);\n    tv->tv_usec = (time_t)cltime2nsec(logrec->cr_time) / 1000;\n}\n\n/** update record stats */\nstatic void update_rec_stats(struct rec_stats *rs, CL_REC_TYPE *logrec)\n{\n    rs->rec_id = logrec->cr_index;\n    timeval_from_rec(&rs->rec_time, logrec);\n    gettimeofday(&rs->step_time, NULL);\n\n    /* if no record has been reported, save this one - 1 as the previous last */\n    if (rs->last_report_rec_id == 0) {\n        rs->last_report_rec_id = rs->rec_id - 1;\n        rs->last_report_rec_time = rs->rec_time;\n    }\n}\n\n/* reader thread info, one per MDT */\ntypedef struct reader_thr_info_t {\n    /** reader thread index */\n    unsigned int thr_index;\n\n    /** thread id */\n    pthread_t thr_id;\n\n    /** open information */\n    char *mdtdevice;\n    int flags;\n\n    /** nbr of records read by this thread */\n    unsigned long long nb_read;\n\n    /** number of records of interest (ie. not MARK, IOCTL, ...) */\n    unsigned long long interesting_records;\n\n    /** number of suppressed/merged records */\n    unsigned long long suppressed_records;\n\n    /** last record read from the changelog */\n    struct rec_stats last_read;\n    /** last record pushed to the pipeline */\n    struct rec_stats last_push;\n    /** last record commited to the DB */\n    struct rec_stats last_commit;\n    /** last commit id saved to the DB */\n    struct rec_stats last_commit_update;\n    /** last record cleared from the changelog */\n    struct rec_stats last_clear;\n\n    /* number of times the changelog has been reopened */\n    unsigned int nb_reopen;\n\n    /** thread was asked to stop */\n    unsigned int force_stop:1;\n\n    /** log handler */\n    void *chglog_hdlr;\n\n    /** Queue of pending changelogs to push to the pipeline. */\n    struct rh_list_head op_queue;\n    unsigned int op_queue_count;\n\n    /** Store the ops for easier access. Each element in the hash\n     * table is also in the op_queue list. This hash table doesn't\n     * need a lock per slot since there is only one reader. The\n     * slot counts won't be used either. */\n    struct id_hash *id_hash;\n\n    ull_t cl_counters[CL_LAST]; /* since program start time */\n    ull_t cl_reported[CL_LAST]; /* last reported stat (for incremental diff) */\n    time_t last_report;\n\n    unsigned int last_reopen;\n\n    /** On pre LU-1331 versions of Lustre, a CL_RENAME is always\n     * followed by a CL_EXT, however these may not be\n     * contiguous. Temporarily store the CL_RENAME changelog until we\n     * get the CL_EXT. */\n    CL_REC_TYPE *cl_rename;\n\n} reader_thr_info_t;\n\nextern chglog_reader_config_t cl_reader_config;\nstatic run_flags_t behavior_flags = 0;\n\n/* stop reading logs when reaching end of file? */\n#define one_shot (behavior_flags & RUNFLG_ONCE)\n\n/** array of reader info */\nstatic reader_thr_info_t *reader_info = NULL;\n\n/**\n * Close the changelog for a thread.\n */\nstatic int log_close(reader_thr_info_t *p_info)\n{\n    int rc;\n\n    /* close the log and clear input buffers */\n    rc = llapi_changelog_fini(&p_info->chglog_hdlr);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, CHGLOG_TAG, \"Error %d closing changelog: %s\",\n                   rc, strerror(abs(rc)));\n\n    return abs(rc);\n}\n\n/**\n * Free allocated structures in op_extra_info_t field.\n */\nstatic void free_extra_info(void *ptr)\n{\n    op_extra_info_t *p_info = (op_extra_info_t *)ptr;\n\n    if (p_info->is_changelog_record && p_info->log_record.p_log_rec) {\n        llapi_changelog_free(&p_info->log_record.p_log_rec);\n    }\n}\n\nstatic void free_extra_info2(void *ptr)\n{\n    op_extra_info_t *p_info = (op_extra_info_t *)ptr;\n\n    if (p_info->is_changelog_record && p_info->log_record.p_log_rec) {\n        /* if this is a locally allocated record, just \"free\" it */\n        free(p_info->log_record.p_log_rec);\n        p_info->log_record.p_log_rec = NULL;\n    }\n}\n\n/**\n * Clear the changelogs up to the last committed number seen.\n */\nstatic int clear_changelog_records(reader_thr_info_t *p_info)\n{\n    int rc;\n    const char *reader_id;\n\n    if (p_info->last_commit.rec_id == 0) {\n        /* No record was ever committed. Stop here because calling\n         * llapi_changelog_clear() with record 0 will clear all\n         * records, leading to a potential record loss. */\n        return 0;\n    }\n\n    reader_id = cl_reader_config.mdt_def[p_info->thr_index].reader_id;\n\n    DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n               \"%s: acknowledging ChangeLog records up to #%\"PRIu64,\n               p_info->mdtdevice, p_info->last_commit.rec_id);\n\n    DisplayLog(LVL_FULL, CHGLOG_TAG, \"llapi_changelog_clear('%s', '%s', %\"PRIu64\")\",\n               p_info->mdtdevice, reader_id,\n               p_info->last_commit.rec_id);\n\n    rc = llapi_changelog_clear(p_info->mdtdevice, reader_id,\n                               p_info->last_commit.rec_id);\n\n    if (rc) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"ERROR: llapi_changelog_clear(\\\"%s\\\", \\\"%s\\\", %\"PRIu64\") \"\n                   \"returned %d\", p_info->mdtdevice, reader_id,\n                   p_info->last_commit.rec_id, rc);\n        return rc;\n    }\n\n    /* update info about last cleared record */\n    p_info->last_clear.rec_id = p_info->last_commit.rec_id;\n    p_info->last_clear.rec_time =  p_info->last_commit.rec_time;\n    gettimeofday(&p_info->last_clear.step_time, NULL);\n\n    return 0;\n}\n\n/**\n * Store the information about changelog processing\n */\nstatic int store_rec_stats(lmgr_t *lmgr, const reader_thr_info_t *info,\n                           const char *var_prefix, const struct rec_stats *rs)\n{\n    const char *mdt = cl_reader_config.mdt_def[info->thr_index].mdt_name;\n    char *var = NULL;\n    char *val = NULL;\n    int rc;\n\n    /* Don't override previous values in the DB if no record has been\n     * processed by current robinhood instance. */\n    if (rs->rec_id == 0)\n        return 0;\n\n    if (asprintf(&var, \"%s_%s\", var_prefix, mdt) == -1 || var == NULL)\n        return errno ? -errno : -ENOMEM;\n\n    /* Format of value is rec_id:record_time(epoch.us):step_time(epoch.us) */\n    if (asprintf(&val, \"%\"PRIu64\":%lu.%lu:%lu.%lu\", rs->rec_id,\n                 rs->rec_time.tv_sec, rs->rec_time.tv_usec,\n                 rs->step_time.tv_sec, rs->step_time.tv_usec)  == -1\n        || val == NULL) {\n        rc = errno ? -errno : -ENOMEM;\n        goto out;\n    }\n\n    if (ListMgr_SetVar(lmgr, var, val)) {\n        DisplayLog(LVL_MAJOR, CHGLOG_TAG,\n                   \"Failed to save %s record stats for %s\", var_prefix, mdt);\n        rc = -EIO;\n        goto out;\n    }\n\n    rc = 0;\n\nout:\n    free(var);\n    free(val);\n    return rc;\n}\n\n/**\n * Store the last processed record (i.e. commited to the DB)\n * at regular interval.\n * @return true if the record id was saved, false in other cases.\n */\nstatic bool store_last_commit(lmgr_t *lmgr, reader_thr_info_t *info, bool force)\n{\n    int64_t delta_id = info->last_commit.rec_id\n                       - info->last_commit_update.rec_id;\n    time_t delta_sec = info->last_commit.step_time.tv_sec\n                       - info->last_commit_update.step_time.tv_sec;\n\n    /** check update delays */\n    if (!force && delta_id < cl_reader_config.commit_update_max_delta\n        && delta_sec < cl_reader_config.commit_update_max_delay)\n        return false;\n\n    if (store_rec_stats(lmgr, info, CL_LAST_COMMITTED_REC, &info->last_commit))\n        return false;\n\n    info->last_commit_update.rec_id = info->last_commit.rec_id;\n    info->last_commit_update.rec_time = info->last_commit.rec_time;\n    gettimeofday(&info->last_commit_update.step_time, NULL);\n\n    return true;\n}\n\n/** drop all old changelog stats */\nstatic void drop_deprecated_changelog_vars(lmgr_t *lmgr, const char *mdt)\n{\n    char *var = NULL;\n    int i;\n\n    if (asprintf(&var, \"%s_%s\", CL_LAST_COMMITTED_OLD, mdt) == -1\n        || var == NULL)\n        return;\n    ListMgr_SetVar(lmgr, var, NULL);\n    free(var);\n\n    ListMgr_SetVar(lmgr, CL_LAST_READ_REC_ID_OLD, NULL);\n    ListMgr_SetVar(lmgr, CL_LAST_READ_REC_TIME_OLD, NULL);\n    ListMgr_SetVar(lmgr, CL_LAST_READ_TIME_OLD, NULL);\n    ListMgr_SetVar(lmgr, CL_LAST_COMMITTED_OLD, NULL);\n    ListMgr_SetVar(lmgr, CL_DIFF_INTERVAL_OLD, NULL);\n\n    for (i = 0; i < CL_LAST; i++) {\n        if (asprintf(&var, \"%s_%s\", CL_COUNT_PREFIX_OLD, changelog_type2str(i))\n            == -1 || var == NULL)\n            continue;\n        ListMgr_SetVar(lmgr, var, NULL);\n        free(var);\n\n        if (asprintf(&var, \"%s_%s\", CL_DIFF_PREFIX_OLD, changelog_type2str(i))\n            == -1 || var == NULL)\n            continue;\n        ListMgr_SetVar(lmgr, var, NULL);\n        free(var);\n    }\n}\n\n\n/**\n * Retrieve the old variable of last committed changelog record,\n * and store it as the new name.\n * @return 0 if the information is not available.\n */\nstatic uint64_t retrieve_old_commit(lmgr_t *lmgr, const reader_thr_info_t *info)\n{\n    const char *mdt = cl_reader_config.mdt_def[info->thr_index].mdt_name;\n    char *var = NULL;\n    char val_str[128];\n    struct rec_stats rs = {0};\n    uint64_t rec_id;\n\n    if (asprintf(&var, \"%s_%s\", CL_LAST_COMMITTED_OLD, mdt) == -1\n        || var == NULL)\n        return 0;\n\n    if (ListMgr_GetVar(lmgr, var, val_str, sizeof(val_str)) != DB_SUCCESS) {\n        free(var);\n        return 0;\n    }\n\n    rec_id = str2bigint(val_str);\n    if (rec_id == -1LL)\n        rec_id = 0;\n\n    DisplayLog(LVL_EVENT, CHGLOG_TAG, \"Old variable '%s' detected: replacing \"\n               \"it by '%s_%s'\", var, CL_LAST_COMMITTED_REC, mdt);\n    free(var);\n    rs.rec_id = rec_id;\n\n    if (store_rec_stats(lmgr, info, CL_LAST_COMMITTED_REC, &rs) == 0)\n        /* don't drop old variables if the new one could not be set */\n        drop_deprecated_changelog_vars(lmgr, mdt);\n\n    return rec_id;\n}\n\n/**\n * Retrieve last committed record for the given reader.\n * @return 0 if the information is not available.\n */\nstatic uint64_t retrieve_last_commit(lmgr_t *lmgr,\n                                     const reader_thr_info_t *info)\n{\n    const char *mdt = cl_reader_config.mdt_def[info->thr_index].mdt_name;\n    char val_str[MAX_VAR_LEN];\n    int64_t last_rec;\n    char *var = NULL;\n    char *sv = NULL;\n    char *tok;\n\n    if (asprintf(&var, \"%s_%s\", CL_LAST_COMMITTED_REC, mdt) == -1\n        || var == NULL)\n        return 0;\n\n    if (ListMgr_GetVar(lmgr, var, val_str, sizeof(val_str)) != DB_SUCCESS) {\n        free(var);\n        /* try with the old name */\n        return retrieve_old_commit(lmgr, info);\n    }\n    free(var);\n\n    tok = strtok_r(val_str, \":\", &sv);\n    if (tok == NULL)\n        return 0;\n\n    last_rec = str2bigint(tok);\n    if (last_rec == -1LL)\n        last_rec = 0;\n\n    return last_rec;\n}\n\n/**\n * DB callback function: this is called when a given ChangeLog record\n * has been successfully applied to the database.\n */\nstatic int log_record_callback(lmgr_t *lmgr, struct entry_proc_op_t *pop,\n                               void *param)\n{\n    reader_thr_info_t *info = (reader_thr_info_t *)param;\n    CL_REC_TYPE *logrec = pop->extra_info.log_record.p_log_rec;\n    bool saved;\n    int rc;\n\n    /** Check that a log record is set for this entry\n     * (should always be the case).\n     */\n    if (!pop->extra_info.is_changelog_record || (logrec == NULL)) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG, \"Error: log record callback function\"\n                   \" has been called for a non-changelog entry\");\n        return EINVAL;\n    }\n\n    /* update info about the last committed record */\n    update_rec_stats(&info->last_commit, logrec);\n\n    /* Save the last committed record so robinhood doesn't get old records\n     * when restarting (especially if there are multiple changelog readers). */\n    saved = store_last_commit(lmgr, info, false);\n\n    /* batching llapi_changelog_clear() calls.\n     * clear the record in any of those cases:\n     *      - batch_ack_count = 1 (i.e. acknowledge every record).\n     *      - we reached the last pushed record.\n     *      - if the delta to last cleared record is high enough.\n     * do nothing in all other cases:\n     */\n    if ((cl_reader_config.batch_ack_count > 1)\n        && (logrec->cr_index < info->last_push.rec_id)\n        && ((logrec->cr_index - info->last_clear.rec_id)\n            < cl_reader_config.batch_ack_count)) {\n        DisplayLog(LVL_FULL, CHGLOG_TAG, \"callback - %s cl_record: %llu, \"\n                   \"last_cleared: %\"PRIu64\", last_pushed: %\"PRIu64,\n                   info->mdtdevice, logrec->cr_index,\n                   info->last_clear.rec_id, info->last_push.rec_id);\n        /* do nothing, don't clear log now */\n        return 0;\n    }\n\n    rc = clear_changelog_records(info);\n\n    /* Always save the last commit after clearing records. This avoids\n     * clearing records twice. */\n    if (!saved)\n        store_last_commit(lmgr, info, true);\n\n    return rc;\n}\n\n#ifdef _LUSTRE_HSM\nstatic const char *get_event_name(unsigned int cl_event)\n{\n    static const char * const event_name[] = {\n        \"archive\", \"restore\", \"cancel\", \"release\", \"remove\", \"state\",\n    };\n\n    if (cl_event >= G_N_ELEMENTS(event_name))\n        return \"unknown\";\n    else\n        return event_name[cl_event];\n}\n#endif\n\n#define CL_BASE_FORMAT \"%s: %llu %02d%-5s %u.%09u 0x%x%s t=\"DFID\n#define CL_BASE_ARG(_mdt, _rec_) (_mdt), (_rec_)->cr_index, (_rec_)->cr_type, \\\n                                 changelog_type2str((_rec_)->cr_type),        \\\n                                 (uint32_t)cltime2sec((_rec_)->cr_time),      \\\n                                 cltime2nsec((_rec_)->cr_time),               \\\n                                 (_rec_)->cr_flags & CLF_FLAGMASK, flag_buff, \\\n                                 PFID(&(_rec_)->cr_tfid)\n#define CL_NAME_FORMAT \"p=\"DFID\" %.*s\"\n#define CL_NAME_ARG(_rec_) PFID(&(_rec_)->cr_pfid), (_rec_)->cr_namelen, \\\n        rh_get_cl_cr_name(_rec_)\n\n#if defined(HAVE_CHANGELOG_EXTEND_REC) || defined(HAVE_FLEX_CL)\n#define CL_EXT_FORMAT   \"s=\"DFID\" sp=\"DFID\" %.*s\"\n#endif\n\n/* Dump a single record. */\nstatic void dump_record(int debug_level, const char *mdt,\n                        const CL_REC_TYPE *rec)\n{\n    char flag_buff[256] = \"\";\n    char record_str[RBH_PATH_MAX] = \"\";\n    char *curr = record_str;\n    int len;\n    int left = sizeof(record_str);\n\n    /* No need to go further if the log level is not right. */\n    if (EMPTY_STRING(log_config.changelogs_file) &&\n        log_config.debug_level < debug_level)\n        return;\n\n#ifdef _LUSTRE_HSM\n    if (rec->cr_type == CL_HSM)\n        g_snprintf(flag_buff, sizeof(flag_buff), \"(%s%s,rc=%d)\",\n                   get_event_name(hsm_get_cl_event(rec->cr_flags)),\n                   hsm_get_cl_flags(rec->\n                                    cr_flags) & CLF_HSM_DIRTY ? \",dirty\" : \"\",\n                   hsm_get_cl_error(rec->cr_flags));\n#endif\n\n    len = snprintf(curr, left, CL_BASE_FORMAT, CL_BASE_ARG(mdt, rec));\n    curr += len;\n    left -= len;\n    if (left > 0 && rec->cr_namelen) {\n        /* this record has a 'name' field. */\n        len = snprintf(curr, left, \" \" CL_NAME_FORMAT, CL_NAME_ARG(rec));\n        curr += len;\n        left -= len;\n    }\n\n    if (left > 0) {\n#if defined(HAVE_FLEX_CL)\n        /* Newer versions. The cr_sfid is not directly in the\n         * changelog record anymore. CLF_RENAME is always present for\n         * backward compatibility; it describes the format of the\n         * record, but the rename extension will be zero'ed for\n         * non-rename records...\n         */\n        if (rec->cr_flags & CLF_RENAME) {\n            struct changelog_ext_rename *cr_rename;\n\n            cr_rename = changelog_rec_rename((CL_REC_TYPE *)rec);\n            if (fid_is_sane(&cr_rename->cr_sfid)) {\n                len = snprintf(curr, left, \" \" CL_EXT_FORMAT,\n                               PFID(&cr_rename->cr_sfid),\n                               PFID(&cr_rename->cr_spfid),\n                               (int)changelog_rec_snamelen((CL_REC_TYPE *)rec),\n                               changelog_rec_sname((CL_REC_TYPE *)rec));\n                curr += len;\n                left -= len;\n            }\n        }\n        if (rec->cr_flags & CLF_JOBID) {\n            struct changelog_ext_jobid *jobid =\n                changelog_rec_jobid((CL_REC_TYPE *)rec);\n\n            if (jobid->cr_jobid[0] != '\\0') {\n                len = snprintf(curr, left, \" J=%s\", jobid->cr_jobid);\n                curr += len;\n                left -= len;\n            }\n        }\n#elif defined(HAVE_CHANGELOG_EXTEND_REC)\n        if (fid_is_sane(&rec->cr_sfid)) {\n            len = snprintf(curr, left, \" \" CL_EXT_FORMAT,\n                           PFID(&rec->cr_sfid),\n                           PFID(&rec->cr_spfid),\n                           changelog_rec_snamelen((CL_REC_TYPE *)rec),\n                           changelog_rec_sname((CL_REC_TYPE *)rec));\n            curr += len;\n            left -= len;\n        }\n#endif\n    }\n\n    if (left <= 0)\n        record_str[RBH_PATH_MAX - 1] = '\\0';\n\n    DisplayLog(debug_level, CHGLOG_TAG, \"%s\", record_str);\n    DisplayChangelogs(\"%s\", record_str);\n}\n\n/* Dumps the nth most recent entries in the queue. If -1, dump them\n * all. */\nstatic void dump_op_queue(reader_thr_info_t *p_info, int debug_level, int num)\n{\n    entry_proc_op_t *op;\n\n    if (log_config.debug_level < debug_level || num == 0)\n        return;\n\n    rh_list_for_each_entry_reverse(op, &p_info->op_queue, list) {\n        dump_record(debug_level, p_info->mdtdevice,\n                    op->extra_info.log_record.p_log_rec);\n\n        if (num != -1) {\n            num--;\n            if (num == 0)\n                return;\n        }\n    }\n}\n\n/** extract parent_id and name attributes from the changelog record */\nstatic void set_name(CL_REC_TYPE *logrec, entry_proc_op_t *p_op)\n{\n    /* is there entry name in log rec? */\n    if (logrec->cr_namelen == 0) {\n        ATTR(&p_op->fs_attrs, name)[0] = 0;\n        return;\n    }\n    ATTR_MASK_SET(&p_op->fs_attrs, name);\n    rh_strncpy(ATTR(&p_op->fs_attrs, name), rh_get_cl_cr_name(logrec),\n               MIN2(sizeof(ATTR(&p_op->fs_attrs, name)),\n                    logrec->cr_namelen + 1));\n\n    /* parent id is always set when name is (Cf. comment in lfs.c) */\n    if (fid_is_sane(&logrec->cr_pfid)) {\n        ATTR_MASK_SET(&p_op->fs_attrs, parent_id);\n        ATTR(&p_op->fs_attrs, parent_id) = logrec->cr_pfid;\n\n        ATTR_MASK_SET(&p_op->fs_attrs, path_update);\n        ATTR(&p_op->fs_attrs, path_update) = time(NULL);\n    } else {\n        DisplayLog(LVL_MAJOR, CHGLOG_TAG, \"Error: insane parent fid \" DFID\n                   \" in %s changelog record (namelen=%u)\",\n                   PFID(&logrec->cr_pfid),\n                   changelog_type2str(logrec->cr_type), logrec->cr_namelen);\n    }\n}\n\n/* Push the oldest (all=FALSE) or all (all=TRUE) entries into the pipeline. */\nstatic void process_op_queue(reader_thr_info_t *p_info, bool push_all)\n{\n    time_t oldest = time(NULL) - cl_reader_config.queue_max_age;\n    CL_REC_TYPE *rec;\n\n    DisplayLog(LVL_FULL, CHGLOG_TAG, \"processing changelog queue\");\n\n    while (!rh_list_empty(&p_info->op_queue)) {\n        entry_proc_op_t *op =\n            rh_list_first_entry(&p_info->op_queue, entry_proc_op_t, list);\n\n        /* Stop when the queue is below our limit, and when the oldest\n         * element is still new enough. */\n        if (!push_all &&\n            (p_info->op_queue_count < cl_reader_config.queue_max_size) &&\n            (op->timestamp.changelog_inserted > oldest))\n            break;\n\n        rh_list_del(&op->list);\n        rh_list_del(&op->id_hash_list);\n\n        rec = op->extra_info.log_record.p_log_rec;\n        DisplayLog(LVL_FULL, CHGLOG_TAG, \"pushing cl record #%llu: age=%ld\",\n                   rec->cr_index,\n                   time(NULL) - op->timestamp.changelog_inserted);\n\n        /* Set parent_id+name from changelog record info, as they are used\n         * in pipeline for stage locking. */\n        set_name(rec, op);\n        /* Push the entry to the pipeline */\n        EntryProcessor_Push(op);\n\n        update_rec_stats(&p_info->last_push, rec);\n        p_info->op_queue_count--;\n    }\n}\n\n/* Flags to insert_into_hash. */\n#define PLR_FLG_FREE2       0x0001  /* must free changelog record\n                                       on completion */\n#define CHECK_IF_LAST_ENTRY 0x0002  /* check whether the unlinked file is\n                                       the last one. */\n#define GET_FID_FROM_DB     0x0004  /* fid is not valid, get it from DB */\n\n/* Insert the operation into the internal hash table. */\nstatic int insert_into_hash(reader_thr_info_t *p_info, CL_REC_TYPE *p_rec,\n                            unsigned int flags)\n{\n    entry_proc_op_t *op;\n    struct id_hash_slot *slot;\n\n    op = EntryProcessor_Get();\n    if (!op) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"CRITICAL ERROR: EntryProcessor_Get failed to allocate a new op\");\n        return -1;\n    }\n\n    /* first, it will check if it already exists in database */\n    op->pipeline_stage = entry_proc_descr.GET_INFO_DB;\n\n    /* set log record */\n    op->extra_info_is_set = 1;\n    op->extra_info.is_changelog_record = 1;\n    op->extra_info.log_record.p_log_rec = p_rec;\n\n    /* set mdt name */\n    op->extra_info.log_record.mdt =\n        cl_reader_config.mdt_def[p_info->thr_index].mdt_name;\n\n    if (flags & PLR_FLG_FREE2)\n        op->extra_info_free_func = free_extra_info2;\n    else\n        op->extra_info_free_func = free_extra_info;\n\n    /* if the unlink record is not tagged as last unlink,\n     * always check the previous value of nlink in DB */\n    op->check_if_last_entry = (p_rec->cr_type == CL_UNLINK)\n        && !(p_rec->cr_flags & CLF_UNLINK_LAST);\n    op->get_fid_from_db = !!(flags & GET_FID_FROM_DB);\n\n    /* set callback function + args */\n    op->callback_func = log_record_callback;\n    op->callback_param = p_info;\n\n    /* Set entry ID */\n    if (!op->get_fid_from_db)\n        EntryProcessor_SetEntryId(op, &p_rec->cr_tfid);\n\n    /* Add the entry on the pending queue ... */\n    op->timestamp.changelog_inserted = time(NULL);\n    rh_list_add_tail(&op->list, &p_info->op_queue);\n    p_info->op_queue_count++;\n\n    /* ... and the hash table. */\n    slot = get_hash_slot(p_info->id_hash, &op->entry_id);\n    rh_list_add_tail(&op->id_hash_list, &slot->list);\n\n    return 0;\n}\n\n/* Describes which records can be safely ignored. By default a record\n * is never ignored. It is only necessary to add an entry in this\n * table if the record may be skipped (and thus has a mask defined) or\n * if it can be skipped altogether. */\nstatic const struct {\n    enum { IGNORE_NEVER = 0,    /* default */\n        IGNORE_MASK,    /* mask must be set, and record has a FID */\n        IGNORE_CANCEL,  /* record could cancel a previous one\n                           (e.g. CREATE/UNLINK sequence) */\n        IGNORE_ALWAYS\n    } ignore;\n    unsigned int ignore_mask;\n} record_filters[CL_LAST] = {\n\n    /* Record we don't care about. */\n    [CL_MARK] = { .ignore = IGNORE_ALWAYS },\n#ifdef _HAVE_CL_IOCTL /* replaced by CL_LAYOUT in Lustre 2.5 */\n    [CL_IOCTL] = { .ignore = IGNORE_ALWAYS },\n#endif\n\n    /* Similar operation (data changes). For instance, if the current\n     * operation is a CLOSE, drop it if we find a previous\n     * TRUNC/CLOSE/MTIME or CREATE for the same FID. */\n    [CL_TRUNC] = { IGNORE_MASK, 1<<CL_TRUNC | 1<<CL_CLOSE | 1<<CL_MTIME\n                   | 1<<CL_CREATE },\n    [CL_CLOSE] = { IGNORE_MASK, 1<<CL_TRUNC | 1<<CL_CLOSE | 1<<CL_MTIME\n                   | 1<<CL_CREATE },\n    [CL_MTIME] = { IGNORE_MASK, 1<<CL_TRUNC | 1<<CL_CLOSE | 1<<CL_MTIME\n                   | 1<<CL_CREATE | 1<<CL_MKNOD | 1<<CL_MKDIR },\n\n    /* Similar operations (metadata changes). */\n    [CL_CTIME] = { IGNORE_MASK, 1<<CL_CTIME | 1<<CL_SETATTR | 1<<CL_CREATE\n                   | 1<<CL_MKNOD | 1<<CL_MKDIR },\n    [CL_SETATTR] = { IGNORE_MASK, 1<<CL_CTIME | 1<<CL_SETATTR | 1<<CL_CREATE\n                   | 1<<CL_MKNOD | 1<<CL_MKDIR },\n\n    /* Note: no need to check UNLINK_LAST or HSM flags: if unlink comes just\n     * after create, there was no HARDLINK or HSM event in between, so we can\n     * safely cancel the create without missing anything. */\n    [CL_UNLINK] = { IGNORE_CANCEL, 1<<CL_CREATE | 1<<CL_MKNOD },\n    [CL_RMDIR] = { IGNORE_CANCEL, 1<<CL_MKDIR },\n};\n\n#ifdef _LUSTRE_HSM\n/* Special case for CL records is reserved to HSM\n *\n */\nstatic bool can_ignore_hsm_record(reader_thr_info_t *p_info,\n                              const CL_REC_TYPE *logrec_in)\n{\n    entry_proc_op_t *op, *t1;\n    struct id_hash_slot *slot;\n    char flag_buff[256] = \"\";\n\n    slot = get_hash_slot(p_info->id_hash, &logrec_in->cr_tfid);\n    rh_list_for_each_entry_safe_reverse(op, t1, &slot->list, id_hash_list) {\n        CL_REC_TYPE *logrec = op->extra_info.log_record.p_log_rec;\n\n        /* fid not matching, check next records */\n        if (!entry_id_equal(&logrec->cr_tfid, &logrec_in->cr_tfid))\n            continue;\n\n        DisplayLog(LVL_FULL, CHGLOG_TAG,\n                   \"    checking against previous HSM record \"CL_BASE_FORMAT,\n                   CL_BASE_ARG(p_info->mdtdevice, logrec));\n        if (hsm_get_cl_event(logrec->cr_flags) == HE_STATE) {\n            DisplayLog(LVL_FULL, CHGLOG_TAG,\n                       \"    removing previous HSM record \"CL_BASE_FORMAT,\n                       CL_BASE_ARG(p_info->mdtdevice, logrec));\n            /* free and remove previous record */\n            rh_list_del(&op->list);\n            rh_list_del(&op->id_hash_list);\n            p_info->op_queue_count--;\n            EntryProcessor_Release(op);\n            /* removed record was previously counted as interesting */\n            p_info->interesting_records--;\n\n            return false;\n        }\n    }\n    return false;\n}\n#endif\n\n/* Decides whether a new changelog record can be ignored. Ignoring a\n * record should not impact the database state, however the gain is to:\n *  - reduce contention on pipeline stages with constraints,\n *  - reduce the number of DB and FS requests.\n *\n * Returns TRUE or FALSE.\n */\nstatic bool can_ignore_record(reader_thr_info_t *p_info,\n                              const CL_REC_TYPE *logrec_in)\n{\n    entry_proc_op_t *op, *t1;\n    unsigned int ignore_mask;\n    struct id_hash_slot *slot;\n    char flag_buff[256] = \"\";\n\n#ifdef _LUSTRE_HSM\n    // Function for handling duplicate HSM events\n    if (logrec_in->cr_type == CL_HSM) {\n        return can_ignore_hsm_record(p_info, logrec_in);\n    }\n#endif\n\n    if (record_filters[logrec_in->cr_type].ignore == IGNORE_NEVER)\n        return false;\n\n    if (record_filters[logrec_in->cr_type].ignore == IGNORE_ALWAYS) {\n        DisplayChangelogs(\"(ignored redundant record %s:%llu)\",\n                          p_info->mdtdevice, logrec_in->cr_index);\n        return true;\n    }\n\n    DisplayLog(LVL_FULL, CHGLOG_TAG, \"Incoming record \"CL_BASE_FORMAT,\n               CL_BASE_ARG(p_info->mdtdevice, logrec_in));\n    /* The ignore field is IGNORE_MASK. At that point, the FID in the\n     * changelog record must be set. All the changelog record with the\n     * same FID will go into the same bucket, so parse that slot\n     * instead of the whole op_queue list. */\n    slot = get_hash_slot(p_info->id_hash, &logrec_in->cr_tfid);\n    ignore_mask = record_filters[logrec_in->cr_type].ignore_mask;\n\n    rh_list_for_each_entry_safe_reverse(op, t1, &slot->list, id_hash_list) {\n        CL_REC_TYPE *logrec = op->extra_info.log_record.p_log_rec;\n\n        /* fid not matching, check next records */\n        if (!entry_id_equal(&logrec->cr_tfid, &logrec_in->cr_tfid))\n            continue;\n\n        DisplayLog(LVL_FULL, CHGLOG_TAG,\n                   \"    checking against previous record \"CL_BASE_FORMAT,\n                   CL_BASE_ARG(p_info->mdtdevice, logrec));\n\n        if (record_filters[logrec_in->cr_type].ignore == IGNORE_CANCEL) {\n            /* If there is a non-cancellable record in between, we cannot merge\n             * and cancel the whole sequence. */\n            if ((ignore_mask & (1 << logrec->cr_type)) == 0) {\n                DisplayLog(LVL_FULL, CHGLOG_TAG, \"-> Significant record \"\n                   \"between create/unlink sequence: peer must be kept\");\n                return false;\n            }\n            /* create/unlink sequence: can be cancelled */\n            DisplayLog(LVL_FULL, CHGLOG_TAG,\n                       \"-> Log peer to be cancelled\");\n            DisplayChangelogs(\"(dropped log peer %s:%llu; %s:%llu)\",\n                              p_info->mdtdevice, logrec->cr_index,\n                              p_info->mdtdevice, logrec_in->cr_index);\n            /* free and remove previous record */\n            rh_list_del(&op->list);\n            rh_list_del(&op->id_hash_list);\n            p_info->op_queue_count--;\n            EntryProcessor_Release(op);\n            /* removed record was previously counted as interesting */\n            p_info->interesting_records--;\n            /* ignore second record as well */\n            return true;\n        }\n\n        /* the only remaining case is ignore mask */\n        assert(record_filters[logrec_in->cr_type].ignore == IGNORE_MASK);\n\n        /* If the type of record matches what we're looking for, and\n         * it's for the same FID, then we can ignore the new\n         * record. */\n        if (ignore_mask & (1 << logrec->cr_type)) {\n            DisplayLog(LVL_FULL, CHGLOG_TAG, \"-> Ignored\");\n\n            /* if the matching record is n, and ignored record is n+1,\n             * acknownledging(n) can also acknownledge(n+1),\n             * as they refer to the same entry.\n             */\n            if (logrec_in->cr_index == logrec->cr_index + 1) {\n                DisplayLog(LVL_FULL, CHGLOG_TAG,\n                           \"acknowledging %llu will acknowledge %llu too\",\n                           logrec->cr_index, logrec_in->cr_index);\n                logrec->cr_index++;\n            }\n\n            DisplayChangelogs(\"(ignored redundant record %s:%llu)\",\n                              p_info->mdtdevice, logrec_in->cr_index);\n            return true;\n        }\n    }\n\n    return false;\n}\n\n/**\n * Convert rename flags to unlink flags, depending on Lustre client/server\n * versions.\n * @param[in]     flags            cr_flags from rename changelog record.\n * @param[in,out] pipeline_flags   indicate if specific processing is needed\n *                                 in pipeline.\n */\nstatic uint16_t cl_rename2unlink_flags(uint16_t flags,\n                                       unsigned int *pipeline_flags)\n{\n    uint16_t retflg = 0;\n\n#ifdef CLF_RENAME_LAST\n    /* The client support LU-1331 (since CLF_RENAME_LAST is\n     * defined) but that may not be the case of the server. */\n    if (cl_reader_config.mds_has_lu1331) {\n        if (flags & CLF_RENAME_LAST)\n            retflg |= CLF_UNLINK_LAST;\n#ifdef CLF_RENAME_LAST_EXISTS\n        if (flags & CLF_RENAME_LAST_EXISTS)\n            retflg |= CLF_UNLINK_HSM_EXISTS;\n#endif\n\n    } else\n#endif\n    {\n        /* CLF_RENAME_LAST is not supported in this version of the\n         * client and/or the server. The pipeline will have to\n         * decide whether this is the last entry or not. */\n        *pipeline_flags |= CHECK_IF_LAST_ENTRY;\n    }\n\n    if (!cl_reader_config.mds_has_lu543) {\n        /* The server doesn't tell whether the rename operation will\n         * remove a file. */\n        *pipeline_flags |= GET_FID_FROM_DB;\n    }\n\n    return retflg;\n}\n\n/**\n * Create a fake unlink changelog record that will be used to remove a\n * file that is overriden during a rename operation.\n *\n * rec_in is a changelog of type CL_RENAME (if rename is recorded with\n * one changelog record) or CL_EXT (if rename is recorded with\n * CL_RENAME+CL_EXT). This function is called because the rename\n * operation is deleting the destination, so we need to insert a fake\n * CL_UNLINK into the pipeline for that operation.\n */\nstatic CL_REC_TYPE *create_fake_unlink_record(const reader_thr_info_t *p_info,\n                                              CL_REC_TYPE *rec_in,\n                                              unsigned int *insert_flags)\n{\n    CL_REC_TYPE *rec;\n    size_t name_len;\n\n    /* Build a simple changelog record with no extension (jobid, rename...).\n     * So, just allocate enough space for the record and the source name. */\n    name_len = strlen(rh_get_cl_cr_name(rec_in));\n    rec = MemAlloc(sizeof(CL_REC_TYPE) + name_len + 1);\n    if (rec == NULL)\n        return NULL;\n\n    /* Copy the fix part of the changelog structure */\n    memcpy(rec, rec_in, sizeof(CL_REC_TYPE));\n\n    /* set target flags before using any accessor on it */\n    rec->cr_flags = cl_rename2unlink_flags(rec_in->cr_flags, insert_flags);\n\n    /* record has to be freed */\n    *insert_flags |= PLR_FLG_FREE2;\n\n    /* unlinked entry is the target name */\n    memcpy(rh_get_cl_cr_name(rec), rh_get_cl_cr_name(rec_in), name_len);\n    rh_get_cl_cr_name(rec)[name_len] = 0;   /* terminate string */\n    rec->cr_namelen = name_len + 1;\n\n    rec->cr_type = CL_UNLINK;\n    rec->cr_index = rec_in->cr_index - 1;\n\n    DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n               \"Unlink: object=\" DFID \", name=%.*s, flags=%#x\",\n               PFID(&rec->cr_tfid), rec->cr_namelen,\n               rh_get_cl_cr_name(rec), rec->cr_flags);\n\n    return rec;\n}\n\n#if defined(HAVE_CHANGELOG_EXTEND_REC) || defined(HAVE_FLEX_CL)\n/**\n * Create a fake rename record to ensure compatibility with older\n * Lustre records.\n *\n * rec_in is a single rename record of type CL_RENAME; Lustre won't\n * issue a CL_EXT record for this rename. But RH's pipeline expects a\n * CL_RENAME followed by a CL_EXT record. So this function creates an\n * old fashion CL_RENAME that will be followed by a CL_EXT.\n *\n * This is only used if LU-1331 fix is present on the Lustre server.\n */\nstatic CL_REC_TYPE *create_fake_rename_record(const reader_thr_info_t *p_info,\n                                              CL_REC_TYPE *rec_in)\n{\n    CL_REC_TYPE *rec;\n    size_t sname_len;\n\n    /* Build a simple changelog record with no extension (jobid, rename...).\n     * So, just allocate enough space for the record and the source name. */\n    sname_len = changelog_rec_snamelen(rec_in);\n    rec = MemAlloc(sizeof(CL_REC_TYPE) + sname_len + 1);\n    if (rec == NULL)\n        return NULL;\n\n    /* Copy the fix part of the changelog structure */\n    memcpy(rec, rec_in, sizeof(CL_REC_TYPE));\n\n    /* set target flags before using any accessor on it */\n    rec->cr_flags = 0;  /* simplest record */\n\n    rec->cr_namelen = sname_len + 1;    /* add 1 for final NULL-byte */\n    memcpy(rh_get_cl_cr_name(rec), changelog_rec_sname(rec_in), sname_len);\n    rh_get_cl_cr_name(rec)[sname_len] = 0;  /* terminate string */\n\n    /* we don't want to acknowledge this record as long as the 2\n     * records are not processed. acknowledge n-1 instead */\n    rec->cr_index = rec_in->cr_index - 1;\n\n#ifdef HAVE_FLEX_CL\n    {\n        const struct changelog_ext_rename *cr_ren_in =\n            changelog_rec_rename(rec_in);\n\n        rec->cr_tfid = cr_ren_in->cr_sfid;  /* the renamed fid */\n        rec->cr_pfid = cr_ren_in->cr_spfid; /* the source parent */\n    }\n#else\n    rec->cr_tfid = rec_in->cr_sfid; /* the renamed fid */\n    rec->cr_pfid = rec_in->cr_spfid;    /* the source parent */\n#endif\n\n    return rec;\n}\n#endif\n\n/**\n * This handles a single log record.\n */\nstatic int process_log_rec(reader_thr_info_t *p_info, CL_REC_TYPE *p_rec)\n{\n    unsigned int opnum;\n\n    /* display the log record in debug mode */\n    dump_record(LVL_DEBUG, p_info->mdtdevice, p_rec);\n\n    /* update stats */\n    opnum = p_rec->cr_type;\n    if (opnum < CL_LAST)\n        p_info->cl_counters[opnum]++;\n    else {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"Log record type %d out of bounds.\", opnum);\n        return EINVAL;\n    }\n\n    /* This record might be of interest. But try to check whether it\n     * might create a duplicate operation anyway. */\n    if (can_ignore_record(p_info, p_rec)) {\n        DisplayLog(LVL_FULL, CHGLOG_TAG, \"Ignoring event %s\",\n                   changelog_type2str(opnum));\n        p_info->suppressed_records++;\n        llapi_changelog_free(&p_rec);\n        goto done;\n    }\n\n    p_info->interesting_records++;\n\n    if (p_rec->cr_type == CL_RENAME) {\n        /* Ensure there is no pending rename. */\n        if (p_info->cl_rename) {\n            /* Should never happen. */\n            DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                       \"Got 2 CL_RENAME in a row without a CL_EXT.\");\n            dump_record(LVL_CRIT, p_info->mdtdevice, p_rec);\n            dump_op_queue(p_info, LVL_CRIT, 32);\n\n            /* Discarding bogus entry. */\n            llapi_changelog_free(&p_info->cl_rename);\n            p_info->cl_rename = NULL;\n        }\n#if defined(HAVE_CHANGELOG_EXTEND_REC) || defined(HAVE_FLEX_CL)\n        /* extended record: 1 single RENAME record per rename op;\n         * there is no EXT. */\n        if (rh_is_rename_one_record(p_rec)) {\n            CL_REC_TYPE *p_rec2;\n#ifdef HAVE_FLEX_CL\n            struct changelog_ext_rename *cr_ren;\n#endif\n\n            /* The MDS sent an extended record, so we have both LU-543\n             * and LU-1331. */\n            if (!cl_reader_config.mds_has_lu543 ||\n                !cl_reader_config.mds_has_lu1331) {\n                DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                           \"LU-1331 is fixed in this version of Lustre.\");\n\n                cl_reader_config.mds_has_lu543 = true;\n                cl_reader_config.mds_has_lu1331 = true;\n            }\n\n            if (!FID_IS_ZERO(&p_rec->cr_tfid)) {\n                CL_REC_TYPE *unlink;\n                unsigned int insert_flags;\n\n                unlink = create_fake_unlink_record(p_info,\n                                                   p_rec, &insert_flags);\n                if (unlink) {\n                    insert_into_hash(p_info, unlink, insert_flags);\n                } else {\n                    DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                               \"Could not allocate an UNLINK record.\");\n                }\n            }\n#ifdef HAVE_FLEX_CL\n            cr_ren = changelog_rec_rename(p_rec);\n            DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n                       \"Rename: object=\" DFID \", old parent/name=\" DFID\n                       \"/%.*s, new parent/name=\" DFID \"/%.*s\",\n                       PFID(&cr_ren->cr_sfid), PFID(&cr_ren->cr_spfid),\n                       (int)changelog_rec_snamelen(p_rec),\n                       changelog_rec_sname(p_rec), PFID(&p_rec->cr_pfid),\n                       p_rec->cr_namelen, rh_get_cl_cr_name(p_rec));\n#else\n            DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n                       \"Rename: object=\" DFID \", old parent/name=\" DFID\n                       \"/%s, new parent/name=\" DFID \"/%.*s\",\n                       PFID(&p_rec->cr_sfid), PFID(&p_rec->cr_spfid),\n                       changelog_rec_sname(p_rec), PFID(&p_rec->cr_pfid),\n                       p_rec->cr_namelen, rh_get_cl_cr_name(p_rec));\n#endif\n\n            /* Ensure compatibility with older Lustre versions:\n             * push RNMFRM to remove the old path from NAMES table.\n             * push RNMTO to add target path information.\n             */\n            /* 1) build & push RNMFRM */\n            p_rec2 = create_fake_rename_record(p_info, p_rec);\n            insert_into_hash(p_info, p_rec2, PLR_FLG_FREE2);\n\n            /* 2) update RNMTO */\n            p_rec->cr_type = CL_EXT;    /* CL_RENAME -> CL_RNMTO */\n#ifdef HAVE_FLEX_CL\n            p_rec->cr_tfid = cr_ren->cr_sfid;   /* removed fid -> renamed fid */\n#else\n            p_rec->cr_tfid = p_rec->cr_sfid;    /* removed fid -> renamed fid */\n#endif\n            insert_into_hash(p_info, p_rec, 0);\n        } else\n#endif\n        {\n            /* This CL_RENAME is followed by CL_EXT, so keep it until\n             * then. */\n            p_info->cl_rename = p_rec;\n        }\n    } else if (p_rec->cr_type == CL_EXT) {\n\n        if (!p_info->cl_rename) {\n            /* Should never happen. */\n            DisplayLog(LVL_CRIT, CHGLOG_TAG, \"Got CL_EXT without a CL_RENAME.\");\n            dump_record(LVL_CRIT, p_info->mdtdevice, p_rec);\n            dump_op_queue(p_info, LVL_CRIT, 32);\n\n            /* Discarding bogus entry. */\n            llapi_changelog_free(&p_rec);\n\n            goto done;\n        }\n\n        if (!cl_reader_config.mds_has_lu543 &&\n            (FID_IS_ZERO(&p_rec->cr_tfid) ||\n             !entry_id_equal(&p_info->cl_rename->cr_tfid, &p_rec->cr_tfid))) {\n            /* tfid if 0, or the two fids are different, so we have LU-543. */\n            cl_reader_config.mds_has_lu543 = true;\n            DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                       \"LU-543 is fixed in this version of Lustre.\");\n        }\n\n        /* We now have a CL_RENAME and a CL_EXT. */\n        /* If target fid is not zero: unlink the target.\n         * e.g. \"mv a b\" and b exists => rm b.\n         */\n        if (!FID_IS_ZERO(&p_rec->cr_tfid)) {\n            CL_REC_TYPE *unlink;\n            unsigned int insert_flags;\n\n            /* Push an unlink. */\n            unlink = create_fake_unlink_record(p_info, p_rec, &insert_flags);\n\n            if (unlink) {\n                insert_into_hash(p_info, unlink, insert_flags);\n            } else {\n                DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                           \"Could not allocate an UNLINK record.\");\n            }\n        }\n\n        /* Push the rename and the ext.\n         *\n         * TODO: we should be able to push only one RENAME/EXT now.\n         *\n         * This is a little racy if CL_RENAME and CL_EXT were not\n         * consecutive, because we are re-ordering the\n         * CL_RENAME. Clearing one of the record in the middle will\n         * also clear the RENAME with Lustre, however the RENAME\n         * hasn't been processed yet. To hit the race, that\n         * non-contiguous case should also happen while the changelog\n         * is shutting down. The chance of that happening in the real\n         * world should be rather slim to non-existent. */\n\n        /* indicate the target fid as the renamed entry */\n        p_rec->cr_tfid = p_info->cl_rename->cr_tfid;\n\n        insert_into_hash(p_info, p_info->cl_rename, 0);\n        p_info->cl_rename = NULL;\n        insert_into_hash(p_info, p_rec, 0);\n    } else {\n        /* build the record to be processed in the pipeline */\n        insert_into_hash(p_info, p_rec, 0);\n    }\n\n done:\n    return 0;\n}\n\n/* get a changelog line (with retries) */\ntypedef enum { cl_ok, cl_continue, cl_stop } cl_status_e;\n\nstatic cl_status_e cl_get_one(reader_thr_info_t *info, CL_REC_TYPE **pp_rec)\n{\n    int rc;\n\n    /* get next record */\n    rc = llapi_changelog_recv(info->chglog_hdlr, pp_rec);\n\n    if (!EMPTY_STRING(log_config.changelogs_file) && rc != 0 && rc != 1) {\n        DisplayChangelogs(\">>> llapi_changelog_recv returned error %d \"\n                          \"(last record = %\"PRIu64\")\", rc,\n                          info->last_read.rec_id);\n        FlushLogs();\n    }\n\n    switch (rc) {\n    case 0:\n        /* Successfully retrieved a record. Update last read record. */\n        update_rec_stats(&info->last_read, *pp_rec);\n        info->nb_read++;\n        return cl_ok;\n\n    case -EINTR:\n        DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                   \"llapi_changelog_recv() interrupted. Retrying.\");\n        return cl_continue;\n\n    case 1:    /* EOF */\n    case -EINVAL:  /* FS unmounted */\n    case -EPROTO:  /* error in KUC channel */\n    default:\n\n        /* warn if it is an error */\n        if (rc != 1)\n            DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                       \"Error %d in llapi_changelog_recv(): %s. \"\n                       \"Trying to reopen it.\", rc, strerror(-rc));\n\n        if (one_shot)\n            return cl_stop;\n\n        /* Close, wait and open the log again (from last_read_record + 1) */\n        log_close(info);\n\n        if (cl_reader_config.force_polling) {\n            DisplayLog(LVL_FULL, CHGLOG_TAG,\n                       \"EOF reached on changelog from %s, reopening in %ld sec\",\n                       info->mdtdevice, cl_reader_config.polling_interval);\n            /* sleep during polling interval */\n            rh_sleep(cl_reader_config.polling_interval);\n        } else {\n            DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                       \"WARNING: EOF reached on ChangeLog whereas FOLLOW flag \"\n                       \"was specified. Re-opening in 1 sec...\");\n            rh_sleep(1);\n        }\n\n        info->nb_reopen++;\n\n        rc = llapi_changelog_start(&info->chglog_hdlr, info->flags,\n                                   info->mdtdevice, info->last_read.rec_id + 1);\n        if (rc) {\n            /* will try to recover from this error */\n            rh_sleep(1);\n            DisplayLog(LVL_EVENT, CHGLOG_TAG,\n                       \"Error reopening changelog \"\n                       \"will try again soon: %d - %s\", rc, strerror(-rc));\n        }\n\n        return cl_continue;\n    }\n    /* Unreachable */\n    return cl_continue;\n}\n\n/** a thread that reads lines from a given changelog */\nstatic void *chglog_reader_thr(void *arg)\n{\n    reader_thr_info_t *info = (reader_thr_info_t *)arg;\n    CL_REC_TYPE *p_rec = NULL;\n    cl_status_e st;\n    /* Next time we will have to push. */\n    time_t next_push_time = time(NULL) + cl_reader_config.queue_check_interval;\n\n    /* loop until a TERM signal is caught */\n    while (!info->force_stop) {\n        /* Is it time to flush? */\n        if (info->op_queue_count >= cl_reader_config.queue_max_size ||\n            next_push_time <= time(NULL)) {\n            process_op_queue(info, false);\n\n            next_push_time = time(NULL) + cl_reader_config.queue_check_interval;\n\n            if (!EMPTY_STRING(log_config.changelogs_file))\n                FlushLogs();\n        }\n\n        st = cl_get_one(info, &p_rec);\n        if (st == cl_continue)\n            continue;\n        else if (st == cl_stop)\n            break;\n\n        /* handle the line and push it to the pipeline */\n        process_log_rec(info, p_rec);\n    }\n\n    if (one_shot) {\n        /* Expected behavior in one-shot mode is to process all pending\n         * changelogs. So flush the internal queue. */\n        process_op_queue(info, true);\n    }\n    /* Else, process what stopped by a signal. Drop pending records and exit\n     * ASAP. */\n\n    DisplayLog(LVL_CRIT, CHGLOG_TAG, \"Changelog reader thread terminating\");\n    FlushLogs();\n    return NULL;\n\n}\n\n#ifdef _LLAPI_FORKS\n/* In early Lustre 2.0 releases, llapi_changelog_start() forks a process\n * that keeps in <defunc> state.\n * So we work around this issue by trapping SIGCHILD signals.\n */\nstatic void action_sigchld(int sig)\n{\n    pid_t child;\n    do {\n        /* wait for all terminated children\n         * and stop on end of list or error.\n         */\n        child = waitpid(-1, NULL, WNOHANG);\n    } while (child > 0);\n\n}\n#endif\n\n/** start ChangeLog Reader module */\nint cl_reader_start(run_flags_t flags, int mdt_index)\n{\n    int i, rc;\n    char mdtdevice[128];\n#ifdef _LLAPI_FORKS\n    struct sigaction act_sigchld;\n#endif\n\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        if (mdt_index == -1 || mdt_index == i)\n            DisplayLog(LVL_FULL, CHGLOG_TAG, \"mdt[%u] = %s\", i,\n                       cl_reader_config.mdt_def[i].mdt_name);\n    }\n\n    /* check parameters */\n    if ((cl_reader_config.mdt_count == 0)\n        || (cl_reader_config.mdt_def == NULL)) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"ERROR: no MDT ChangeLog has been defined in configuration\");\n        return EINVAL;\n    }\n#ifndef HAVE_DNE\n    else if ((cl_reader_config.mdt_count > 1) || (mdt_index > 0)) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"ERROR: multiple MDTs are not supported with this version of Lustre\");\n        return ENOTSUP;\n    }\n#endif\n    else if (mdt_index >= (int)cl_reader_config.mdt_count) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"The specified mdt_index (%d) exceeds the MDT count in configuration file (%u)\",\n                   mdt_index, cl_reader_config.mdt_count);\n        return EINVAL;\n    }\n\n    if (mdt_index != -1) {\n        /* hack the configuration structure to keep only the specified MDT */\n        if (mdt_index != 0)\n            cl_reader_config.mdt_def[0] = cl_reader_config.mdt_def[mdt_index];\n        cl_reader_config.mdt_count = 1;\n        DisplayLog(LVL_MAJOR, CHGLOG_TAG,\n                   \"Starting changelog reader only for %s, as specified by command line\",\n                   cl_reader_config.mdt_def[0].mdt_name);\n    }\n\n    /* saves the current config and parameter flags */\n    behavior_flags = flags;\n\n    /* create thread params */\n    reader_info = (reader_thr_info_t *)MemCalloc(cl_reader_config.mdt_count,\n                                                  sizeof(reader_thr_info_t));\n\n    if (reader_info == NULL)\n        return ENOMEM;\n\n#ifdef _LLAPI_FORKS\n    /* initialize sigchild handler */\n    memset(&act_sigchld, 0, sizeof(act_sigchld));\n    act_sigchld.sa_flags = 0;\n    act_sigchld.sa_handler = action_sigchld;\n    if (sigaction(SIGCHLD, &act_sigchld, NULL) == -1) {\n        DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                   \"ERROR: Could not initialize SIGCHLD handler: %s\",\n                   strerror(errno));\n        return errno;\n    }\n    DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n               \"Ready to trap SIGCHLD from liblustreapi child process\");\n#endif\n\n    Alert_StartBatching();\n\n    // need a connection to get last committed record\n    lmgr_t lmgr;\n    int dbget = 1;\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc)\n        dbget = 0;\n\n    /* create one reader per MDT */\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        reader_thr_info_t *info = &reader_info[i];\n\n        /* retrieve from the first unacknowledged record */\n        unsigned long long last_rec = 0;\n\n        memset(info, 0, sizeof(reader_thr_info_t));\n        info->thr_index = i;\n        rh_list_init(&info->op_queue);\n        info->last_report = time(NULL);\n        info->id_hash = id_hash_init(\n            max_count_to_hash_size(cl_reader_config.queue_max_size), false);\n\n        snprintf(mdtdevice, 128, \"%s-%s\", get_fsname(),\n                 cl_reader_config.mdt_def[i].mdt_name);\n\n        info->mdtdevice = strdup(mdtdevice);\n        info->flags =\n            ((one_shot\n              || cl_reader_config.force_polling) ? 0 : CHANGELOG_FLAG_FOLLOW)\n            | CHANGELOG_FLAG_BLOCK;\n#ifdef HAVE_FLEX_CL\n        /* more efficient: avoid structure remapping in liblustreapi */\n        info->flags |= CHANGELOG_FLAG_JOBID;\n#endif\n\n        if (dbget) {\n            last_rec = retrieve_last_commit(&lmgr, info);\n            if (last_rec != 0)\n                /* start rec = last rec + 1 */\n                last_rec++;\n        }\n        DisplayLog(LVL_DEBUG, CHGLOG_TAG,\n                   \"Opening chglog for %s (start_rec=%llu)\", mdtdevice,\n                   last_rec);\n\n        /* open the changelog (if we are in one_shot mode,\n         * don't use the CHANGELOG_FLAG_FOLLOW flag)\n         */\n        rc = llapi_changelog_start(&info->chglog_hdlr,\n                                   info->flags, info->mdtdevice, last_rec);\n\n        if (rc) {\n            DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                       \"ERROR %d opening changelog for MDT '%s': %s\",\n                       rc, mdtdevice, strerror(abs(rc)));\n            return abs(rc);\n        }\n\n        /* then create the thread that manages it */\n        if (pthread_create(&info->thr_id, NULL, chglog_reader_thr, info)) {\n            int err = errno;\n            DisplayLog(LVL_CRIT, CHGLOG_TAG,\n                       \"ERROR creating ChangeLog reader thread: %s\",\n                       strerror(err));\n            return err;\n        }\n\n    }\n\n    if (dbget)\n        ListMgr_CloseAccess(&lmgr);\n\n    return 0;\n}\n\n/** terminate ChangeLog Readers */\nint cl_reader_terminate(void)\n{\n    unsigned int i;\n\n    /* ask threads to stop */\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        reader_info[i].force_stop = true;\n    }\n\n    DisplayLog(LVL_EVENT, CHGLOG_TAG,\n               \"Stop request has been sent to all ChangeLog reader threads\");\n\n    cl_reader_wait();\n\n    return 0;\n}\n\n/** wait for ChangeLog Readers termination */\nint cl_reader_wait(void)\n{\n    int i;\n    void *ret;\n\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        pthread_join(reader_info[i].thr_id, &ret);\n    }\n\n    Alert_EndBatching();\n\n    return 0;\n}\n\n/** Release last changelog records, and dump the final stats. */\nint cl_reader_done(void)\n{\n    lmgr_t lmgr;\n    int rc;\n    int i;\n\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        reader_thr_info_t *info = &reader_info[i];\n\n        /* Clear the records that are still batched for clearing. */\n        clear_changelog_records(info);\n\n        log_close(info);\n    }\n\n    cl_reader_dump_stats();\n\n    /* need DB access to save changelog stats */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc != DB_SUCCESS)\n        return 0;\n    cl_reader_store_stats(&lmgr);\n    ListMgr_CloseAccess(&lmgr);\n\n    return 0;\n}\n\n/** Display record stats */\nstatic void show_rec_stats(const char *verb, const char *verb_ed,\n                           struct rec_stats *rs, time_t last_report)\n{\n    char rectime_str[256];\n    char steptime_str[256];\n    struct tm paramtm;\n    time_t now = time(NULL);\n\n    /* nothing processed, nothing to report */\n    if (rs->rec_id == 0)\n        return;\n\n    /* first convert tv_sec */\n    strftime(rectime_str, sizeof(rectime_str), \"%Y/%m/%d %T\",\n             localtime_r(&rs->rec_time.tv_sec, &paramtm));\n    strftime(steptime_str, sizeof(steptime_str), \"%Y/%m/%d %T\",\n             localtime_r(&rs->step_time.tv_sec, &paramtm));\n\n    /* then %06u appends microseconds (strftime only supports struct tm) */\n    DisplayLog(LVL_MAJOR, \"STATS\", \"   last %s: rec_id=%\"PRIu64\", \"\n               \"rec_time=%s.%06lu, %s at %s.%06lu\", verb_ed, rs->rec_id,\n               rectime_str, rs->rec_time.tv_usec, verb_ed, steptime_str,\n               rs->step_time.tv_usec);\n\n    /* compute speeds */\n    if (rs->last_report_rec_id != 0 && now > last_report) {\n        double interval = now - last_report;\n        double speed, ratio;\n\n        speed = (double)(rs->rec_id - rs->last_report_rec_id) / interval;\n\n        ratio = (double)((rs->rec_time.tv_sec + rs->rec_time.tv_usec * 0.000001)\n                         - (rs->last_report_rec_time.tv_sec\n                            + rs->last_report_rec_time.tv_usec * 0.000001))\n                / interval;\n        DisplayLog(LVL_MAJOR, \"STATS\", \"       %s speed: %.2f rec/sec, \"\n                   \"log/real time ratio: %.2f\", verb, speed, ratio);\n    }\n\n    rs->last_report_rec_id = rs->rec_id;\n    rs->last_report_rec_time = rs->rec_time;\n}\n\n/** dump changelog processing stats */\nint cl_reader_dump_stats(void)\n{\n    unsigned int i, j;\n    char tmp_buff[256];\n    char *ptr;\n\n    for (i = 0; i < cl_reader_config.mdt_count; i++) {\n        DisplayLog(LVL_MAJOR, \"STATS\", \"ChangeLog reader #%u:\", i);\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   fs_name    =   %s\", get_fsname());\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   mdt_name   =   %s\",\n                   cl_reader_config.mdt_def[i].mdt_name);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   reader_id  =   %s\",\n                   cl_reader_config.mdt_def[i].reader_id);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   records read        = %llu\",\n                   reader_info[i].nb_read);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   interesting records = %llu\",\n                   reader_info[i].interesting_records);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   suppressed records  = %llu\",\n                   reader_info[i].suppressed_records);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   records pending     = %u\",\n                   reader_info[i].op_queue_count);\n\n        if (reader_info[i].force_stop)\n            DisplayLog(LVL_MAJOR, \"STATS\",\n                       \"   status              = terminating\");\n        else if (reader_info[i].nb_reopen == reader_info[i].last_reopen)\n            /* no reopen: it is busy reading changelogs */\n            DisplayLog(LVL_MAJOR, \"STATS\",\n                       \"   status              = busy\");\n        else if (time(NULL) - reader_info[i].last_report\n                 == (reader_info[i].nb_reopen - reader_info[i].last_reopen)\n                    * cl_reader_config.polling_interval) {\n            /* if the whole interval is the reopen time => it spends it time\n             * polling */\n            /* more than a single record read? */\n            if (reader_info[i].last_read.rec_id -\n                reader_info[i].last_read.last_report_rec_id > 1)\n                DisplayLog(LVL_MAJOR, \"STATS\",\n                           \"   status              = almost idle\");\n            else\n                DisplayLog(LVL_MAJOR, \"STATS\",\n                           \"   status              = idle\");\n        }\n\n        if (reader_info[i].nb_read > 0) {\n            show_rec_stats(\"receive\", \"received\", &reader_info[i].last_read,\n                           reader_info[i].last_report);\n            show_rec_stats(\"push\", \"pushed\", &reader_info[i].last_push,\n                           reader_info[i].last_report);\n            show_rec_stats(\"commit\", \"committed\", &reader_info[i].last_commit,\n                           reader_info[i].last_report);\n            show_rec_stats(\"clear\", \"cleared\", &reader_info[i].last_clear,\n                           reader_info[i].last_report);\n        }\n        /* last_report is updated by cl_reader_store_stats */\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"   ChangeLog stats:\");\n\n        tmp_buff[0] = '\\0';\n        ptr = tmp_buff;\n        for (j = 0; j < CL_LAST; j++) {\n            /* flush full line */\n            if (ptr - tmp_buff >= 80) {\n                DisplayLog(LVL_MAJOR, \"STATS\", \"   %s\", tmp_buff);\n                tmp_buff[0] = '\\0';\n                ptr = tmp_buff;\n            }\n            if (ptr != tmp_buff)\n                ptr += sprintf(ptr, \", \");\n\n            ptr += sprintf(ptr, \"%s: %llu\", changelog_type2str(j),\n                           reader_info[i].cl_counters[j]);\n        }\n        /* last unflushed line */\n        if (ptr != tmp_buff)\n            DisplayLog(LVL_MAJOR, \"STATS\", \"   %s\", tmp_buff);\n    }\n\n    return 0;\n}\n\nstatic void store_thread_info(lmgr_t *lmgr, reader_thr_info_t *info)\n{\n    const char *mdt = cl_reader_config.mdt_def[info->thr_index].mdt_name;\n    char *varname = NULL;\n    char tmp_buff[256];\n    int i;\n\n    store_rec_stats(lmgr, info, CL_LAST_READ_REC, &info->last_read);\n    store_rec_stats(lmgr, info, CL_LAST_PUSHED_REC, &info->last_push);\n    store_rec_stats(lmgr, info, CL_LAST_CLEARED_REC, &info->last_clear);\n    /* CL_LAST_COMMITTED_REC is updated by entry processor callbacks */\n\n    for (i = 0; i < CL_LAST; i++) {\n        char last_val[256];\n        unsigned long long last, current, diff;\n\n        /* CL counters format:  <prefix>_<mdt_name>_<event_name> */\n        if (asprintf(&varname, \"%s_%s_%s\", CL_COUNT_PREFIX, mdt,\n                     changelog_type2str(i)) == -1 || varname == NULL)\n            continue;\n\n        /* get and set (increment) */\n        if (ListMgr_GetVar(lmgr, varname, last_val, sizeof(last_val)) !=\n            DB_SUCCESS)\n            last = 0;\n        else\n            last = str2bigint(last_val);\n\n        /* diff = current - last_reported */\n        current = info->cl_counters[i];\n        diff = current - info->cl_reported[i];\n\n        /* new value = last + diff */\n        snprintf(tmp_buff, sizeof(tmp_buff), \"%llu\", last + diff);\n        if (ListMgr_SetVar(lmgr, varname, tmp_buff) == DB_SUCCESS)\n            /* last_reported is now current */\n            info->cl_reported[i] = current;\n        free(varname);\n\n        /* save diff */\n        if (asprintf(&varname, \"%s_%s_%s\", CL_DIFF_PREFIX, mdt,\n                     changelog_type2str(i)) == -1 || varname == NULL)\n            continue;\n        snprintf(tmp_buff, sizeof(tmp_buff), \"%llu\", diff);\n        ListMgr_SetVar(lmgr, varname, tmp_buff);\n        free(varname);\n    }\n\n    if (asprintf(&varname, \"%s_%s\", CL_DIFF_INTERVAL, mdt) == -1\n        || varname == NULL)\n        return;\n\n    /* indicate diff interval */\n    snprintf(tmp_buff, sizeof(tmp_buff), \"%lu\", time(NULL) - info->last_report);\n    ListMgr_SetVar(lmgr, varname, tmp_buff);\n    free(varname);\n\n    info->last_report = time(NULL);\n    info->last_reopen = info->nb_reopen;\n}\n\n/** store changelog stats to the database */\nvoid cl_reader_store_stats(lmgr_t *lmgr)\n{\n    int i;\n\n    if (cl_reader_config.mdt_count < 1)\n        /* nothing to be stored */\n        return;\n\n    for (i = 0; i < cl_reader_config.mdt_count; i++)\n        store_thread_info(lmgr, &reader_info[i]);\n}\n"
  },
  {
    "path": "src/chglog_reader/chglog_reader_config.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file    cl_reader_config.c\n * \\author  Th. Leibovici\n * \\brief   Configuration for Lustre MDT Changelog processing module.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"chglog_reader.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_logs.h\"\n#include <pthread.h>\n#include <errno.h>\n#include <unistd.h>\n\n#define TAG \"ChgLogCfg\" /* for logs */\n\n#define CHGLOG_CFG_BLOCK    \"ChangeLog\"\n#define MDT_DEF_BLOCK       \"MDT\"\n\nchglog_reader_config_t cl_reader_config;\n\nstatic mdt_def_t default_mdt_def = {\n    .mdt_name = \"MDT0000\",\n    .reader_id = \"cl1\"\n};\n\n/** Set changelog reader default configuration */\nstatic void cl_reader_set_default_cfg(void *module_config)\n{\n    chglog_reader_config_t *p_config = (chglog_reader_config_t *)module_config;\n\n    p_config->mdt_def = &default_mdt_def;\n    p_config->mdt_count = 1;\n    /* poll until changelog's follow flag is implemented in llapi */\n    p_config->force_polling = true;\n    p_config->polling_interval = 1; /* 1s */\n    p_config->queue_max_size = 1000;\n    p_config->queue_max_age = 5;    /* 5s */\n    p_config->queue_check_interval = 1; /* every second */\n    p_config->commit_update_max_delay = 5;\n    p_config->commit_update_max_delta = 10000;\n\n    p_config->mds_has_lu543 = false;\n    p_config->mds_has_lu1331 = false;\n\n    /* acknowledge 1024 records at once */\n    p_config->batch_ack_count = 1024;\n}\n\n/** Write default parameters for changelog readers */\nstatic void cl_reader_write_default(FILE *output)\n{\n    print_begin_block(output, 0, CHGLOG_CFG_BLOCK, NULL);\n    print_begin_block(output, 1, MDT_DEF_BLOCK, NULL);\n    print_line(output, 2, \"mdt_name    :  \\\"%s\\\"\", default_mdt_def.mdt_name);\n    print_line(output, 2, \"reader_id   :  \\\"%s\\\"\", default_mdt_def.reader_id);\n    print_end_block(output, 1);\n\n    print_line(output, 1, \"batch_ack_count  : 1024\");\n    print_line(output, 1, \"force_polling    : yes\");\n    print_line(output, 1, \"polling_interval : 1s\");\n    print_line(output, 1, \"queue_max_size   : 1000\");\n    print_line(output, 1, \"queue_max_age    : 5s\");\n    print_line(output, 1, \"queue_check_interval : 1s\");\n    print_line(output, 1, \"commit_update_max_delay : 5s\");\n    print_line(output, 1, \"commit_update_max_delta : 10k\");\n    print_line(output, 1, \"mds_has_lu543    : no\");\n    print_line(output, 1, \"mds_has_lu1331   : no\");\n\n    print_end_block(output, 0);\n}\n\n/** Write a configuration template for changelog readers */\nstatic void cl_reader_write_template(FILE *output)\n{\n    print_line(output, 0, \"# Parameters for processing MDT changelogs :\");\n    print_begin_block(output, 0, CHGLOG_CFG_BLOCK, NULL);\n\n    print_line(output, 1, \"# 1 MDT block for each MDT :\");\n    print_begin_block(output, 1, MDT_DEF_BLOCK, NULL);\n\n    print_line(output, 2, \"# name of the first MDT\");\n    print_line(output, 2, \"mdt_name  = \\\"MDT0000\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 2, \"# id of the persistent changelog reader\");\n    print_line(output, 2,\n               \"# as returned by \\\"lctl changelog_register\\\" command\");\n    print_line(output, 2, \"reader_id = \\\"cl1\\\" ;\");\n\n    print_end_block(output, 1);\n\n#ifdef HAVE_DNE\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# another MDT\");\n    print_begin_block(output, 1, MDT_DEF_BLOCK, NULL);\n    print_line(output, 2, \"mdt_name  = \\\"MDT0001\\\" ;\");\n    print_line(output, 2, \"reader_id = \\\"cl1\\\" ;\");\n    print_end_block(output, 1);\n\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# yet another MDT\");\n    print_begin_block(output, 1, MDT_DEF_BLOCK, NULL);\n    print_line(output, 2, \"mdt_name  = \\\"MDT0002\\\" ;\");\n    print_line(output, 2, \"reader_id = \\\"cl1\\\" ;\");\n    print_end_block(output, 1);\n#endif\n\n    print_line(output, 1, \"# clear changelog every 1024 records:\");\n    print_line(output, 1, \"batch_ack_count = 1024 ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"force_polling    = yes ;\");\n    print_line(output, 1, \"polling_interval = 1s ;\");\n    print_line(output, 1, \"# changelog batching parameters\");\n    print_line(output, 1, \"queue_max_size   = 1000 ;\");\n    print_line(output, 1, \"queue_max_age    = 5s ;\");\n    print_line(output, 1, \"queue_check_interval = 1s ;\");\n    print_line(output, 1, \"# delays to update last committed record in the DB\");\n    print_line(output, 1, \"commit_update_max_delay = 5s ;\");\n    print_line(output, 1, \"commit_update_max_delta = 10k ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1,\n               \"# uncomment to dump all changelog records to the file\");\n\n    print_end_block(output, 0);\n}\n\n#define critical_err_check(_ptr_, _blkname_) do { if (!_ptr_) {\\\n            sprintf(msg_out, \"Internal error reading %s block in config file\", \\\n                    _blkname_); \\\n            return EFAULT; \\\n        } \\\n    } while (0)\n\nstatic int parse_mdt_block(config_item_t config_blk, const char *block_name,\n                           mdt_def_t *p_mdt_def, char *msg_out)\n{\n    char *str;\n    bool unique;\n\n    /* 2 variables expected : 'mdt_name' and 'reader_id' */\n    static const char * const expected_vars[] = {\n        \"mdt_name\", \"reader_id\", NULL\n    };\n\n    /* get 'mdt_name' value */\n    unique = true;\n    str = rh_config_GetKeyValueByName(config_blk, \"mdt_name\", &unique);\n    if (str == NULL) {\n        DisplayLog(LVL_CRIT, \"ChgLog config\",\n                   \"WARNING: no 'mdt_name' provided in %s block: using default value '%s'\",\n                   block_name, default_mdt_def.mdt_name);\n        strcpy(p_mdt_def->mdt_name, default_mdt_def.mdt_name);\n    } else if (!unique) {\n        sprintf(msg_out, \"Found duplicate parameter '%s' in %s.\\n\", \"mdt_name\",\n                block_name);\n        return EEXIST;\n    } else if (strlen(str) >= MDT_NAME_MAX) {\n        sprintf(msg_out, \"MDT name '%s' is too long (max length=%u)\", str,\n                MDT_NAME_MAX);\n        return ENAMETOOLONG;\n    } else if (strncmp(\"MDT\", str, 3) != 0) {\n        sprintf(msg_out, \"Invalid MDT name '%s'. \\\"MDT<index>\\\" expected\", str);\n        return EINVAL;\n    } else {\n        strcpy(p_mdt_def->mdt_name, str);\n    }\n\n    /* get 'reader_id' value */\n    unique = true;\n    str = rh_config_GetKeyValueByName(config_blk, \"reader_id\", &unique);\n    if (str == NULL) {\n        DisplayLog(LVL_CRIT, \"ChgLog config\",\n                   \"WARNING: no 'reader_id' provided in %s block: using default value '%s'\",\n                   block_name, default_mdt_def.reader_id);\n        strcpy(p_mdt_def->reader_id, default_mdt_def.reader_id);\n    } else if (!unique) {\n        sprintf(msg_out, \"Found duplicate parameter '%s' in %s.\\n\", \"reader_id\",\n                block_name);\n        return EEXIST;\n    } else if (strlen(str) >= MDT_NAME_MAX) {\n        sprintf(msg_out, \"Client id '%s' is too long (max length=%u)\", str,\n                READER_ID_MAX);\n        return ENAMETOOLONG;\n    } else {\n        strcpy(p_mdt_def->reader_id, str);\n    }\n\n    /* display warnings for unknown parameters */\n    CheckUnknownParameters(config_blk, block_name, expected_vars);\n\n    return 0;\n}\n\n/** Read configuration for changelog readers */\nstatic int cl_reader_read_cfg(config_file_t config, void *module_config,\n                              char *msg_out)\n{\n    chglog_reader_config_t *p_config = (chglog_reader_config_t *)module_config;\n    config_item_t chglog_block;\n    unsigned int blc_index;\n    int rc;\n\n    static const char *cl_cfg_allow[] = {\n        \"force_polling\", \"polling_interval\", \"batch_ack_count\",\n        \"queue_max_size\", \"queue_max_age\", \"queue_check_interval\",\n        \"commit_update_max_delay\", \"commit_update_max_delta\",\n        \"mds_has_lu543\", \"mds_has_lu1331\", MDT_DEF_BLOCK,\n        NULL\n    };\n\n    const cfg_param_t cfg_params[] = {\n        {\"force_polling\", PT_BOOL, 0, &p_config->force_polling, 0},\n        {\"polling_interval\", PT_DURATION, PFLG_NOT_NULL | PFLG_POSITIVE,\n         &p_config->polling_interval, 0},\n        {\"batch_ack_count\", PT_INT, PFLG_NOT_NULL | PFLG_POSITIVE,\n         &p_config->batch_ack_count, 0},\n        {\"queue_max_size\", PT_INT, PFLG_NOT_NULL | PFLG_POSITIVE,\n         &p_config->queue_max_size, 0},\n        {\"queue_max_age\", PT_DURATION, PFLG_NOT_NULL | PFLG_POSITIVE,\n         &p_config->queue_max_age, 0},\n        {\"queue_check_interval\", PT_DURATION, PFLG_NOT_NULL | PFLG_POSITIVE,\n         &p_config->queue_check_interval, 0},\n        {\"commit_update_max_delta\", PT_INT64, PFLG_POSITIVE,\n         &p_config->commit_update_max_delta, 0},\n        {\"commit_update_max_delay\", PT_DURATION, PFLG_POSITIVE,\n         &p_config->commit_update_max_delay, 0},\n        {\"mds_has_lu543\", PT_BOOL, 0, &p_config->mds_has_lu543, 0},\n        {\"mds_has_lu1331\", PT_BOOL, 0, &p_config->mds_has_lu1331, 0},\n        END_OF_PARAMS\n    };\n\n    /* get ChangeLog  block */\n    rc = get_cfg_block(config, CHGLOG_CFG_BLOCK, &chglog_block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* get scalar params */\n    rc = read_scalar_params(chglog_block, CHGLOG_CFG_BLOCK, cfg_params,\n                            msg_out);\n    if (rc)\n        return rc;\n\n    /* browse  the list of MDT blocks */\n    for (blc_index = 0; blc_index < rh_config_GetNbItems(chglog_block);\n         blc_index++) {\n        char *block_name;\n        config_item_t curr_item =\n            rh_config_GetItemByIndex(chglog_block, blc_index);\n        critical_err_check(curr_item, CHGLOG_CFG_BLOCK);\n\n        if (rh_config_ItemType(curr_item) != CONFIG_ITEM_BLOCK)\n            continue;\n\n        block_name = rh_config_GetBlockName(curr_item);\n        critical_err_check(curr_item, CHGLOG_CFG_BLOCK);\n\n        if (!strcasecmp(block_name, MDT_DEF_BLOCK)) {\n            /* allocate a new mdt_definition  */\n\n            if ((p_config->mdt_def == NULL)\n                || (p_config->mdt_def == &default_mdt_def)) {\n                p_config->mdt_count = 1;\n\n                /* no MDT definition, or MDT definition was the default */\n                p_config->mdt_def = (mdt_def_t *)malloc(sizeof(mdt_def_t));\n                if (!p_config->mdt_def)\n                    return ENOMEM;\n            } else {\n                p_config->mdt_count++;\n\n                p_config->mdt_def = (mdt_def_t *)realloc(p_config->mdt_def,\n                                                         p_config->mdt_count *\n                                                         sizeof(mdt_def_t));\n                if (!p_config->mdt_def)\n                    return ENOMEM;\n            }\n\n            /* fill the structure */\n            rc = parse_mdt_block(curr_item, MDT_DEF_BLOCK,\n                                 &p_config->mdt_def[p_config->mdt_count - 1],\n                                 msg_out);\n            if (rc)\n                return rc;\n        } else {\n            sprintf(msg_out,\n                    \"Unknown sub-block '%s' in \" CHGLOG_CFG_BLOCK\n                    \" block, line %d\", block_name,\n                    rh_config_GetItemLine(curr_item));\n            return EINVAL;\n        }\n    }\n\n    CheckUnknownParameters(chglog_block, CHGLOG_CFG_BLOCK, cl_cfg_allow);\n\n#ifdef _DEBUG_CHGLOG\n    printf(\"%u MDT definitions parsed successfully, ptr = %p\\n\",\n           p_config->mdt_count, p_config->mdt_def);\n#endif\n\n    return 0;\n}\n\n#define NO_PARAM_UPDT_MSG(_blk, _name) DisplayLog(LVL_MAJOR, TAG, \"%s::%s\"     \\\n                \" changed in config file, but cannot be modified dynamically\", \\\n                 _blk, _name)\n#define PARAM_UPDT_MSG(_blk, _name, _format, _v1, _v2) DisplayLog(LVL_EVENT,  \\\n           TAG, \"%s::%s updated: \"_format\"->\"_format, _blk, _name, _v1, _v2)\n\n#define SCALAR_PARAM_UPDT(_cfg, _val_field, _blk, _name, _format, _format_func) \\\n    do { \\\n        if ((_cfg)->_val_field != cl_reader_config._val_field) { \\\n            PARAM_UPDT_MSG(_blk, _name, _format,                 \\\n                _format_func(cl_reader_config._val_field),       \\\n                          _format_func((_cfg)->_val_field));     \\\n            cl_reader_config._val_field = (_cfg)->_val_field;    \\\n        } \\\n} while (0)\n\n/** reload parameters for a single policy */\nstatic int cl_reader_reload_cfg(chglog_reader_config_t *cfg)\n{\n    SCALAR_PARAM_UPDT(cfg, force_polling, CHGLOG_CFG_BLOCK, \"force_polling\",\n                      \"%s\", bool2str);\n    SCALAR_PARAM_UPDT(cfg, polling_interval, CHGLOG_CFG_BLOCK,\n                      \"polling_interval\", \"%ld\",);\n    SCALAR_PARAM_UPDT(cfg, batch_ack_count, CHGLOG_CFG_BLOCK, \"batch_ack_count\",\n                      \"%u\",);\n    SCALAR_PARAM_UPDT(cfg, queue_max_size, CHGLOG_CFG_BLOCK, \"queue_max_size\",\n                      \"%u\",);\n    SCALAR_PARAM_UPDT(cfg, queue_max_age, CHGLOG_CFG_BLOCK, \"queue_max_age\",\n                      \"%ld\",);\n    SCALAR_PARAM_UPDT(cfg, queue_check_interval, CHGLOG_CFG_BLOCK,\n                      \"queue_check_interval\", \"%ld\",);\n    SCALAR_PARAM_UPDT(cfg, commit_update_max_delta, CHGLOG_CFG_BLOCK,\n                      \"commit_update_max_delta\", \"%\"PRIu64,);\n    SCALAR_PARAM_UPDT(cfg, commit_update_max_delay, CHGLOG_CFG_BLOCK,\n                      \"commit_update_max_delay\", \"%ld\",);\n\n    if (cfg->mds_has_lu543 != cl_reader_config.mds_has_lu543)\n        NO_PARAM_UPDT_MSG(CHGLOG_CFG_BLOCK, \"mds_has_lu543\");\n    if (cfg->mds_has_lu1331 != cl_reader_config.mds_has_lu1331)\n        NO_PARAM_UPDT_MSG(CHGLOG_CFG_BLOCK, \"mds_has_lu1331\");\n\n    if (cfg->mdt_count != cl_reader_config.mdt_count)\n        NO_PARAM_UPDT_MSG(CHGLOG_CFG_BLOCK, MDT_DEF_BLOCK \" count\");\n    else {\n        int i;\n\n        for (i = 0; i < cfg->mdt_count; i++) {\n            if (strcmp\n                (cfg->mdt_def[i].mdt_name,\n                 cl_reader_config.mdt_def[i].mdt_name))\n                NO_PARAM_UPDT_MSG(CHGLOG_CFG_BLOCK \"::\" MDT_DEF_BLOCK,\n                                  \"mdt_name\");\n            if (strcmp\n                (cfg->mdt_def[i].reader_id,\n                 cl_reader_config.mdt_def[i].reader_id))\n                NO_PARAM_UPDT_MSG(CHGLOG_CFG_BLOCK \"::\" MDT_DEF_BLOCK,\n                                  \"reader_id\");\n        }\n    }\n\n    return 0;\n}\n\nstatic int cl_reader_cfg_set(void *arg, bool reload)\n{\n    chglog_reader_config_t *cfg = (chglog_reader_config_t *)arg;\n\n    if (reload)\n        return cl_reader_reload_cfg(cfg);\n    else\n        cl_reader_config = *cfg;\n    return 0;\n}\n\nstatic void *cl_reader_cfg_new(void)\n{\n    return calloc(1, sizeof(chglog_reader_config_t));\n}\n\nstatic void cl_reader_cfg_free(void *arg)\n{\n    chglog_reader_config_t *cfg = (chglog_reader_config_t *)arg;\n\n    if ((cfg->mdt_def != NULL) && (cfg->mdt_def != &default_mdt_def))\n        free(cfg->mdt_def);\n    free(cfg);\n}\n\n/** config handling functions */\nmod_cfg_funcs_t cl_reader_cfg_hdlr = {\n    .module_name = \"changelog reader\",\n    .new = cl_reader_cfg_new,\n    .free = cl_reader_cfg_free,\n    .set_default = cl_reader_set_default_cfg,\n    .read = cl_reader_read_cfg,\n    .set_config = cl_reader_cfg_set,\n    .write_default = cl_reader_write_default,\n    .write_template = cl_reader_write_template\n};\n"
  },
  {
    "path": "src/common/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\nAM_LDFLAGS=-ldl\n\nnoinst_LTLIBRARIES=libcommontools.la\n\nif LUSTRE\nFS_SRC=lustre_tools.c\nendif\nif MNTENTCOMPAT\nCOMPAT_SRC=mntent_compat.c mntent_compat.h\nendif\nif SHOOK\n#PURPOSE_SRC=shook_wrap.c\nendif\n\nlibcommontools_la_SOURCES= RW_Lock.c uidgidcache.c rbh_misc.c rbh_cmd.c \\\n\t\t\t   rbh_params.c param_utils.c  global_config.c \\\n\t\t           update_params.c queue.c rbh_logs.c rbh_modules.c \\\n\t\t\t   basename.c $(FS_SRC) $(PURPOSE_SRC) $(COMPAT_SRC)\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/common/RW_Lock.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/*\n * RW_Lock.c\n *\n * This file contains the functions for the RW lock management\n *\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <pthread.h>\n#include <stdio.h>\n#include <string.h>\n#include \"RW_Lock.h\"\n\n/*\n * Debugging function\n */\nstatic inline void print_lock(char *s, rw_lock_t *plock)\n{\n#ifdef _DEBUG\n    printf(\"%s: id = %u:  Lock State: nbr_active = %d, nbr_waiting = %d, \"\n           \"nbw_active = %d, nbw_waiting = %d\\n\", s,\n           (unsigned int)pthread_self(), plock->nbr_active,\n           plock->nbr_waiting, plock->nbw_active, plock->nbw_waiting);\n#else\n    return;\n#endif\n}   /* print_lock */\n\n/*\n * Take the lock for reading\n */\nint P_r(rw_lock_t *plock)\n{\n    P(plock->mutexProtect);\n\n    print_lock(\"P_r.1\", plock);\n\n    plock->nbr_waiting++;\n\n    /* no new read lock is granted if writers are waiting or active */\n    if (plock->nbw_active > 0 || plock->nbw_waiting > 0)\n        pthread_cond_wait(&(plock->condRead), &(plock->mutexProtect));\n\n    /* There is no active or waiting writers, readers can go ... */\n    plock->nbr_waiting--;\n    plock->nbr_active++;\n\n    V(plock->mutexProtect);\n\n    print_lock(\"P_r.end\", plock);\n\n    return 0;\n}   /* P_r */\n\n/*\n * Release the lock after reading\n */\nint V_r(rw_lock_t *plock)\n{\n    P(plock->mutexProtect);\n\n    print_lock(\"V_r.1\", plock);\n\n    /* I am a reader that is no longer active */\n    plock->nbr_active--;\n\n    /* I was the last active reader, and there are some waiting writers,\n     * I let one of them go */\n    if (plock->nbr_active == 0 && plock->nbw_waiting > 0) {\n        print_lock(\"V_r.2 lecteur libere un redacteur\", plock);\n        pthread_cond_signal(&plock->condWrite);\n    }\n\n    print_lock(\"V_r.end\", plock);\n\n    V(plock->mutexProtect);\n\n    return 0;\n}   /* V_r */\n\n/*\n * Take the lock for writing\n */\nint P_w(rw_lock_t *plock)\n{\n    P(plock->mutexProtect);\n\n    print_lock(\"P_w.1\", plock);\n\n    plock->nbw_waiting++;\n\n    /* nobody must be active obtain exclusive lock */\n    while (plock->nbr_active > 0 || plock->nbw_active > 0)\n        pthread_cond_wait(&plock->condWrite, &plock->mutexProtect);\n\n    /* I become active and no longer waiting */\n    plock->nbw_waiting--;\n    plock->nbw_active++;\n\n    V(plock->mutexProtect);\n\n    print_lock(\"P_w.end\", plock);\n    return 0;\n}   /* P_w */\n\n/*\n * Release the lock after writing\n */\nint V_w(rw_lock_t *plock)\n{\n    P(plock->mutexProtect);\n\n    print_lock(\"V_w.1\", plock);\n\n    /* I was the active writer, I am not it any more */\n    plock->nbw_active--;\n\n    if (plock->nbw_waiting > 0) {\n\n        print_lock(\"V_w.4 redacteur libere un lecteur\", plock);\n\n        /* There are waiting writers, but no waiting readers,\n         * I let a writter go */\n        pthread_cond_signal(&(plock->condWrite));\n\n        print_lock(\"V_w.5\", plock);\n\n    } else if (plock->nbr_waiting > 0) {\n        /* if readers are waiting, let them go */\n        print_lock(\"V_w.2 redacteur libere les lecteurs\", plock);\n        pthread_cond_broadcast(&(plock->condRead));\n\n        print_lock(\"V_w.3\", plock);\n\n    }\n    V(plock->mutexProtect);\n\n    print_lock(\"V_w.end\", plock);\n\n    return 0;\n}   /* V_w */\n\n/* Roughly, downgrading a writer lock is making a V_w atomically followed\n * by a P_r */\nint rw_lock_downgrade(rw_lock_t *plock)\n{\n    P(plock->mutexProtect);\n\n    print_lock(\"downgrade.1\", plock);\n\n    /* I was the active writer, I am not it any more */\n    plock->nbw_active--;\n\n    if (plock->nbr_waiting > 0) {\n\n        /* there are waiting readers, I let all the readers go */\n        print_lock(\"downgrade.2 libere les lecteurs\", plock);\n        pthread_cond_broadcast(&(plock->condRead));\n\n    }\n\n    /* nobody must break caller's read lock, so don't consider\n     * or unlock writers */\n\n    /* caller is also a reader, now */\n    plock->nbr_active++;\n\n    V(plock->mutexProtect);\n\n    print_lock(\"downgrade.end\", plock);\n\n    return 0;\n\n}   /* rw_lock_downgrade */\n\n/*\n * Routine for initializing a lock\n */\nint rw_lock_init(rw_lock_t *plock)\n{\n    int rc = 0;\n    pthread_mutexattr_t mutex_attr;\n    pthread_condattr_t cond_attr;\n\n    if ((rc = pthread_mutexattr_init(&mutex_attr) != 0))\n        return 1;\n    if ((rc = pthread_condattr_init(&cond_attr) != 0))\n        return 1;\n\n    if ((rc = pthread_mutex_init(&(plock->mutexProtect), &mutex_attr)) != 0)\n        return 1;\n\n    if ((rc = pthread_cond_init(&(plock->condRead), &cond_attr)) != 0)\n        return 1;\n    if ((rc = pthread_cond_init(&(plock->condWrite), &cond_attr)) != 0)\n        return 1;\n\n    plock->nbr_waiting = 0;\n    plock->nbr_active = 0;\n\n    plock->nbw_waiting = 0;\n    plock->nbw_active = 0;\n\n    return 0;\n}   /* rw_lock_init */\n\n/*\n * Routine for destroying a lock\n */\nint rw_lock_destroy(rw_lock_t *plock)\n{\n    int rc = 0;\n\n    if ((rc = pthread_mutex_destroy(&(plock->mutexProtect))) != 0)\n        return 1;\n\n    if ((rc = pthread_cond_destroy(&(plock->condWrite))) != 0)\n        return 1;\n    if ((rc = pthread_cond_destroy(&(plock->condRead))) != 0)\n        return 1;\n\n    memset(plock, 0, sizeof(rw_lock_t));\n\n    return 0;\n}   /* rw_lock_init */\n"
  },
  {
    "path": "src/common/basename.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Ensure we use the GNU version of basename.\n */\n#define _GNU_SOURCE\n#include <string.h>\n\n#include \"rbh_basename.h\"\n\n/* GNU basename never modifies input argument */\nconst char *rh_basename(const char *path)\n{\n    return basename(path);\n}\n"
  },
  {
    "path": "src/common/global_config.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"global_config.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include <errno.h>\n\n#define GLOBAL_CONFIG_BLOCK \"General\"\n\n/* exported variable available to all modules */\nglobal_config_t global_config;\n\n/* name 2 fskey value */\nstatic inline fs_key_t name2fskey(const char *name)\n{\n    if (!strcasecmp(name, \"fsname\"))\n        return FSKEY_FSNAME;\n    else if (!strcasecmp(name, \"fsid\"))\n        return FSKEY_FSID;\n    else if (!strcasecmp(name, \"devid\"))\n        return FSKEY_DEVID;\n    else\n        return FSKEY_ERROR;\n}\n\nstatic void global_cfg_set_default(void *module_config)\n{\n    global_config_t *conf = (global_config_t *)module_config;\n\n    rh_strncpy(conf->fs_path, \"\", RBH_PATH_MAX);\n#ifdef _LUSTRE\n    rh_strncpy(conf->fs_type, \"lustre\", FILENAME_MAX);\n#else\n    rh_strncpy(conf->fs_type, \"\", FILENAME_MAX);\n#endif\n    conf->stay_in_fs = true;\n    conf->check_mounted = true;\n    conf->last_access_only_atime = false;\n    conf->uid_gid_as_numbers = false;\n    conf->fs_key = FSKEY_FSNAME;\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    conf->direct_mds_stat = false;\n#endif\n\n#ifdef _LUSTRE\n    conf->lustre_projid = false;\n#endif\n}\n\nstatic void global_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, GLOBAL_CONFIG_BLOCK, NULL);\n    print_line(output, 1, \"fs_path       :  [MANDATORY]\");\n#ifdef _LUSTRE\n    print_line(output, 1, \"fs_type       :  lustre\");\n#else\n    print_line(output, 1, \"fs_type       :  [MANDATORY]\");\n#endif\n    print_line(output, 1, \"fs_key        :  fsname\");\n    print_line(output, 1, \"stay_in_fs    :  yes\");\n    print_line(output, 1, \"check_mounted :  yes\");\n    print_line(output, 1, \"last_access_only_atime :  no\");\n    print_line(output, 1, \"uid_gid_as_numbers     :  no\");\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    print_line(output, 1, \"direct_mds_stat :   no\");\n#endif\n#ifdef _LUSTRE\n    print_line(output, 1, \"lustre_projid :  no\");\n#endif\n    print_end_block(output, 0);\n}\n\nstatic int global_cfg_read(config_file_t config, void *module_config,\n                           char *msg_out)\n{\n    global_config_t *conf = (global_config_t *)module_config;\n    config_item_t    general_block;\n    int              rc;\n\n    static const char * const allowed_params[] = {\n        \"fs_path\", \"fs_type\", \"stay_in_fs\", \"check_mounted\",\n        \"direct_mds_stat\", \"fs_key\", \"last_access_only_atime\",\n        \"uid_gid_as_numbers\", \"lustre_projid\", NULL\n    };\n    const cfg_param_t cfg_params[] = {\n        {\"fs_path\", PT_STRING, PFLG_MANDATORY | PFLG_ABSOLUTE_PATH |\n         PFLG_REMOVE_FINAL_SLASH | PFLG_NO_WILDCARDS, conf->fs_path,\n         sizeof(conf->fs_path)}\n        ,\n        {\"fs_type\", PT_STRING,\n#ifndef _LUSTRE\n         PFLG_MANDATORY |\n#endif\n         PFLG_NO_WILDCARDS, conf->fs_type, sizeof(conf->fs_type)}\n        ,\n        {\"stay_in_fs\", PT_BOOL, 0, &conf->stay_in_fs, 0}\n        ,\n        {\"check_mounted\", PT_BOOL, 0, &conf->check_mounted, 0}\n        ,\n        {\"last_access_only_atime\", PT_BOOL, 0, &conf->last_access_only_atime, 0}\n        ,\n        {\"uid_gid_as_numbers\", PT_BOOL, 0, &conf->uid_gid_as_numbers, 0}\n        ,\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n        {\"direct_mds_stat\", PT_BOOL, 0, &conf->direct_mds_stat, 0}\n        ,\n#endif\n#ifdef _LUSTRE\n        {\"lustre_projid\", PT_BOOL, 0, &conf->lustre_projid, 0}\n        ,\n#endif\n        END_OF_PARAMS\n    };\n\n    /* get GENERAL block */\n    rc = get_cfg_block(config, GLOBAL_CONFIG_BLOCK, &general_block, msg_out);\n    if (rc)\n        return rc;\n\n    /* retrieve std parameters */\n    rc = read_scalar_params(general_block, GLOBAL_CONFIG_BLOCK, cfg_params,\n                            msg_out);\n    if (rc)\n        return rc;\n\n#ifdef _LUSTRE\n    if (strcmp(conf->fs_type, \"lustre\")) {\n        strcpy(msg_out,\n               \"This robinhood version has been built for Lustre filesystem support only\");\n        return EINVAL;\n    }\n#endif\n\n    /* fs_key param */\n    char tmpstr[128];\n    rc = GetStringParam(general_block, GLOBAL_CONFIG_BLOCK, \"fs_key\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr), NULL, NULL,\n                        msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == 0) {\n        conf->fs_key = name2fskey(tmpstr);\n        if (conf->fs_key == FSKEY_ERROR) {\n            sprintf(msg_out,\n                    \"Invalid type for fs_key: '%s' ('fsname', 'devid' or 'fsid' expected)\",\n                    tmpstr);\n            return EINVAL;\n        }\n    }\n\n    /* check unknown parameters */\n    CheckUnknownParameters(general_block, GLOBAL_CONFIG_BLOCK, allowed_params);\n\n    return 0;\n}\n\nstatic int global_cfg_set(void *module_config, bool reload)\n{\n    global_config_t *conf = (global_config_t *) module_config;\n\n    if (!reload) {\n        /* copy the whole structure content */\n        global_config = *conf;\n        return 0;\n    }\n\n    if (strcmp(conf->fs_path, global_config.fs_path))\n        DisplayLog(LVL_MAJOR, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK\n                   \"::fs_path changed in config file, but cannot be modified dynamically\");\n    if (strcmp(conf->fs_type, global_config.fs_type))\n        DisplayLog(LVL_MAJOR, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK\n                   \"::fs_type changed in config file, but cannot be modified dynamically\");\n\n    if (global_config.stay_in_fs != conf->stay_in_fs) {\n        DisplayLog(LVL_EVENT, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK \"::stay_in_fs updated: %s->%s\",\n                   bool2str(global_config.stay_in_fs),\n                   bool2str(conf->stay_in_fs));\n        global_config.stay_in_fs = conf->stay_in_fs;\n    }\n\n    if (global_config.check_mounted != conf->check_mounted) {\n        DisplayLog(LVL_EVENT, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK \"::check_mounted updated: %s->%s\",\n                   bool2str(global_config.check_mounted),\n                   bool2str(conf->check_mounted));\n        global_config.check_mounted = conf->check_mounted;\n    }\n\n    if (global_config.last_access_only_atime != conf->last_access_only_atime) {\n        DisplayLog(LVL_EVENT, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK\n                   \"::last_access_only_atime updated: %s->%s\",\n                   bool2str(global_config.last_access_only_atime),\n                   bool2str(conf->last_access_only_atime));\n        global_config.last_access_only_atime = conf->last_access_only_atime;\n    }\n\n    if (global_config.uid_gid_as_numbers != conf->uid_gid_as_numbers) {\n        DisplayLog(LVL_MAJOR, \"GlobalConfig\",\n                   GLOBAL_CONFIG_BLOCK\n                   \"::uid_gid_as_numbers changed in config file, but cannot be modified dynamically\");\n    }\n    if (global_config.uid_gid_as_numbers)\n        DisplayLog(LVL_VERB, \"GlobalConfig\", \"UID and GID stored as numbers\");\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    if (conf->direct_mds_stat != global_config.direct_mds_stat) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   GLOBAL_CONFIG_BLOCK \"::direct_mds_stat updated: %u->%u\",\n                   global_config.direct_mds_stat, conf->direct_mds_stat);\n        global_config.direct_mds_stat = conf->direct_mds_stat;\n    }\n#endif\n\n#ifdef _LUSTRE\n    if (conf->lustre_projid != global_config.lustre_projid) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   GLOBAL_CONFIG_BLOCK \"::lustre_projid updated: %u->%u\",\n                   global_config.lustre_projid, conf->lustre_projid);\n        global_config.lustre_projid = conf->lustre_projid;\n    }\n#endif\n\n    return 0;\n}\n\nstatic void global_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, GLOBAL_CONFIG_BLOCK, NULL);\n\n#ifdef _HAVE_FID\n    print_line(output, 1, \"# filesystem to be monitored\");\n    print_line(output, 1, \"fs_path = \\\"/mnt/lustre\\\" ;\");\n    fprintf(output, \"\\n\");\n#else\n    print_line(output, 1, \"# filesystem to be monitored\");\n    print_line(output, 1, \"fs_path = \\\"/tmp\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# filesystem type (as returned by 'df' or 'mount' commands)\");\n    print_line(output, 1, \"fs_type = \\\"ext3\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# filesystem property used as FS key: fsname, devid or fsid (fsid NOT recommended)\");\n    print_line(output, 1, \"fs_key = fsname ;\");\n    fprintf(output, \"\\n\");\n#endif\n    print_line(output, 1,\n               \"# check that objects are in the same device as 'fs_path',\");\n    print_line(output, 1, \"# so it will not traverse mount points\");\n    print_line(output, 1, \"stay_in_fs = yes ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# check that the filesystem is mounted\");\n    print_line(output, 1, \"check_mounted = yes ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# Set the last_access time by only the atime variable, and not MAX(atime,mtime)\");\n    print_line(output, 1,\n               \"# There are no guarantees that all filesystems will correctly store atime\");\n    print_line(output, 1, \"last_access_only_atime = no ;\");\n    print_line(output, 1, \"uid_gid_as_numbers = no ;\");\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# File info is asked directly to MDS on Lustre filesystems\");\n    print_line(output, 1, \"# (scan faster, but size information is missing)\");\n    print_line(output, 1, \"direct_mds_stat        =    no ;\");\n#endif\n#ifdef _LUSTRE\n    print_line(output, 1, \"lustre_projid          =    no ;\");\n#endif\n    print_end_block(output, 0);\n}\n\nstatic void *global_cfg_new(void)\n{\n    return calloc(1, sizeof(global_config_t));\n}\n\nstatic void global_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\n/** structure with config handling functions */\nmod_cfg_funcs_t global_cfg_hdlr = {\n    .module_name = \"global\",\n    .new = global_cfg_new,\n    .free = global_cfg_free,\n    .set_default = global_cfg_set_default,\n    .read = global_cfg_read,\n    .set_config = global_cfg_set,\n    .write_default = global_cfg_write_default,\n    .write_template = global_cfg_write_template\n};\n"
  },
  {
    "path": "src/common/lustre_tools.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_logs.h\"\n#include \"xplatform_print.h\"\n#include \"Memory.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_basename.h\"\n\n#include <errno.h>\n#include <dirent.h> /* for DIR */\n#include <sys/ioctl.h>\n#include <pthread.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n\n#ifdef _HAVE_FID\n#include <sys/xattr.h> /* needed by Lustre_GetNameParent() */\n#endif\n\n#include \"lustre_extended_types.h\"\n\n/* for logs */\n#define TAG_STRIPE      \"GetStripe\"\n#define TAG_CR_STRIPE   \"CreateStriped\"\n#define TAG_OSTDF       \"OST_df\"\n#define TAG_POOLDF      \"pool_df\"\n#define TAG_MDSSTAT     \"mds_stat\"\n#define TAG_FIDPATH     \"FidPath\"\n#define TAG_LLAPI       \"llapi\"\n#define TAG_PROJID      \"projid\"\n\n#if HAVE_LLAPI_LOG_CALLBACKS\n/**\n * Map LLAPI log levels to robinhood's ones.\n * The reverse mapping is performed by rbh_msg_level_convert() in rbh_logs.c\n */\nstatic inline int lustre_msg_level_convert(enum llapi_message_level level)\n{\n    switch (level & LLAPI_MSG_MASK) {\n    case LLAPI_MSG_FATAL:\n    case LLAPI_MSG_ERROR:\n        return LVL_CRIT;\n\n    case LLAPI_MSG_WARN:\n        return LVL_MAJOR;\n\n    case LLAPI_MSG_NORMAL:\n    case LLAPI_MSG_INFO:\n        return LVL_VERB;\n\n    case LLAPI_MSG_DEBUG:\n    default:\n        return LVL_DEBUG;\n    }\n}\n\nstatic void display_llapi_msg(enum llapi_message_level lvl, int err,\n                              const char *fmt, va_list ap)\n{\n    vDisplayLog(lustre_msg_level_convert(lvl), TAG_LLAPI, fmt, ap);\n}\n#endif\n\n/** initialize access to lustre */\nint Lustre_Init(void)\n{\n#if HAVE_LLAPI_MSG_LEVEL\n# if HAVE_LLAPI_LOG_CALLBACKS\n    rbh_adjust_log_level_external();\n    llapi_error_callback_set(display_llapi_msg);\n    llapi_info_callback_set(display_llapi_msg);\n# else\n    llapi_msg_set_level(LLAPI_MSG_OFF);\n# endif\n#endif\n    return 0;\n}\n\nstatic void set_empty_stripe(stripe_info_t *p_stripe_info,\n                             stripe_items_t *p_stripe_items)\n{\n    if (p_stripe_info) {\n        memset(p_stripe_info, 0, sizeof(stripe_info_t));\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n        p_stripe_info->validator = VALID_NOSTRIPE;\n#endif\n    }\n\n    if (p_stripe_items)\n        memset(p_stripe_items, 0, sizeof(stripe_items_t));\n}\n\n\n\n/** copy object info to an allocated array of stripe_items */\nstatic void objects2stripe_items(stripe_items_t *stripe_items,\n                                 const struct lov_user_ost_data_v1 *objects,\n                                 int count)\n{\n    for (int i = 0; i < count; i++) {\n        stripe_items->stripe[i].ost_idx = objects[i].l_ost_idx;\n        stripe_items->stripe[i].ost_gen = objects[i].l_ost_gen;\n#ifdef HAVE_OBJ_ID\n        stripe_items->stripe[i].obj_id = objects[i].l_object_id;\n#ifdef HAVE_OBJ_SEQ\n        stripe_items->stripe[i].obj_seq = objects[i].l_object_seq;\n#else\n        stripe_items->stripe[i].obj_seq = objects[i].l_object_gr;\n#endif\n#else /* new structure (union of fid and id/seq) */\n        stripe_items->stripe[i].obj_id = objects[i].l_ost_oi.oi.oi_id;\n        stripe_items->stripe[i].obj_seq = objects[i].l_ost_oi.oi.oi_seq;\n#endif\n    }\n}\n\n/** Fill stripe info from lumv1 */\nstatic int stripe_info_lumv1(struct lov_user_md *lum,\n                             stripe_info_t *stripe_info,\n                             stripe_items_t *stripe_items)\n{\n    if (stripe_info) {\n        stripe_info->stripe_size = lum->lmm_stripe_size;\n\n        /* no stripe for released layouts */\n        if (lum->lmm_pattern & LOV_PATTERN_F_RELEASED)\n            stripe_info->stripe_count = 0;\n        else\n            stripe_info->stripe_count = lum->lmm_stripe_count;\n\n        stripe_info->pool_name[0] = '\\0';\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n        stripe_info->validator = lum->lmm_layout_gen;\n#endif\n    }\n\n    if (!stripe_items)\n        return 0;\n\n    /* no stripes */\n    if (lum->lmm_stripe_count == 0\n        || (lum->lmm_pattern & LOV_PATTERN_F_RELEASED)) {\n        stripe_items->count = 0;\n        stripe_items->stripe = NULL;\n        return 0;\n    }\n\n    stripe_items->count = lum->lmm_stripe_count;\n    stripe_items->stripe = MemCalloc(lum->lmm_stripe_count,\n                                     sizeof(stripe_item_t));\n    if (stripe_items->stripe == NULL)\n        return -ENOMEM;\n\n    objects2stripe_items(stripe_items, lum->lmm_objects, lum->lmm_stripe_count);\n    return 0;\n}\n\n#ifdef LOV_USER_MAGIC_V3\n/** Fill stripe info from lumv3 */\nstatic int stripe_info_lumv3(struct lov_user_md_v3 *lum,\n                             stripe_info_t *stripe_info,\n                             stripe_items_t *stripe_items)\n{\n    if (stripe_info) {\n        stripe_info->stripe_size = lum->lmm_stripe_size;\n        stripe_info->stripe_count = lum->lmm_stripe_count;\n        strncpy(stripe_info->pool_name, lum->lmm_pool_name,\n                LOV_MAXPOOLNAME);\n        stripe_info->pool_name[MAX_POOL_LEN - 1] = '\\0';\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n        stripe_info->validator = lum->lmm_layout_gen;\n#endif\n    }\n\n    if (!stripe_items)\n        return 0;\n\n    /* no stripes */\n    if (lum->lmm_stripe_count == 0\n        || (lum->lmm_pattern & LOV_PATTERN_F_RELEASED)) {\n        stripe_items->count = 0;\n        stripe_items->stripe = NULL;\n        return 0;\n    }\n\n    stripe_items->count = lum->lmm_stripe_count;\n    stripe_items->stripe = MemCalloc(lum->lmm_stripe_count,\n                                     sizeof(stripe_item_t));\n\n    if (stripe_items->stripe == NULL)\n        return -ENOMEM;\n\n    objects2stripe_items(stripe_items, lum->lmm_objects, lum->lmm_stripe_count);\n    return 0;\n}\n#endif\n\n#ifdef LOV_USER_MAGIC_COMP_V1\nstatic inline struct lov_user_md *\nlov_comp_entry(struct lov_comp_md_v1 *comp_v1, int ent_idx)\n{\n    return (struct lov_user_md *)((char *)comp_v1 +\n            comp_v1->lcm_entries[ent_idx].lcme_offset);\n}\n\n/**\n * Forward declaration of fill_stripe_info, as i can be used\n * recursively for processing PFL.\n */\nstatic int fill_stripe_info(struct lov_user_md *lum,\n                            stripe_info_t *stripe_info,\n                            stripe_items_t *stripe_items);\n\n/* FIXME This initial implementation is a workaround to provide minimal\n * support of progressive file layouts.\n * It only saves the striping information of the last initialized\n * component of a layout.\n * A better support of PFL will require a DB schema change.\n */\nstatic int stripe_info_compv1(struct lov_comp_md_v1 *lcm,\n                              stripe_info_t *stripe_info,\n                              stripe_items_t *stripe_items)\n{\n    struct lov_user_md *lum;\n    int rc;\n\n    /* use first layout by default */\n    int last_init_index = 0;\n\n    if (lcm->lcm_entry_count == 0)\n        /* no stripe info */\n        set_empty_stripe(stripe_info, stripe_items);\n\n    /* Search the last initialized element of the layout */\n    for (int i = 1; i < lcm->lcm_entry_count; i++) {\n        struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];\n\n        if (lcme->lcme_flags & LCME_FL_INIT)\n            last_init_index = i;\n    }\n\n    /* fill stripe information accordingly */\n    lum = lov_comp_entry(lcm, last_init_index);\n\n    rc = fill_stripe_info(lum, stripe_info, stripe_items);\n\n    /* Use composite layout generation number instead of\n     * sub-layout generation */\n    if (stripe_info)\n        stripe_info->validator = lcm->lcm_layout_gen;\n\n    return rc;\n}\n#endif\n\nstatic int fill_stripe_info(struct lov_user_md *lum,\n                            stripe_info_t *stripe_info,\n                            stripe_items_t *stripe_items)\n{\n    if (!lum)\n        return -EFAULT;\n\n    /* Check protocol version number */\n    switch (lum->lmm_magic) {\n    case LOV_USER_MAGIC_V1:\n        return stripe_info_lumv1(lum, stripe_info, stripe_items);\n\n#ifdef LOV_USER_MAGIC_V3\n    case LOV_USER_MAGIC_V3:\n        return stripe_info_lumv3((struct lov_user_md_v3 *)lum, stripe_info,\n                                 stripe_items);\n#endif\n#ifdef LOV_USER_MAGIC_COMP_V1\n    case LOV_USER_MAGIC_COMP_V1:\n        return stripe_info_compv1((struct lov_comp_md_v1 *)lum, stripe_info,\n                                 stripe_items);\n#endif\n    default:\n        DisplayLog(LVL_CRIT, TAG_STRIPE,\n                   \"Unsupported Luster magic number from getstripe: %#X\",\n                   lum->lmm_magic);\n        return -EINVAL;\n    }\n}\n\n#define LUM_SIZE_MAX (sizeof(struct lov_user_md_v3) + \\\n                  (LOV_MAX_STRIPE_COUNT * sizeof(struct lov_user_ost_data_v1)))\n\nint File_GetStripeByPath(const char *entry_path, stripe_info_t *p_stripe_info,\n                         stripe_items_t *p_stripe_items)\n{\n    int rc;\n    struct lov_user_md *p_lum;\n\n    if (!entry_path || !entry_path[0])\n        return -EFAULT;\n\n    p_lum = (struct lov_user_md *)MemAlloc(LUM_SIZE_MAX);\n    if (!p_lum)\n        return -ENOMEM;\n\n    memset(p_lum, 0, LUM_SIZE_MAX);\n    rc = llapi_file_get_stripe(entry_path, p_lum);\n\n    if (rc != 0) {\n        if (rc == -ENODATA) {\n            DisplayLog(LVL_DEBUG, TAG_STRIPE,\n                       \"File %s has no stripe information\", entry_path);\n            set_empty_stripe(p_stripe_info, p_stripe_items);\n            rc = 0;\n        } else if ((rc != -ENOENT) && (rc != -ESTALE))\n            DisplayLog(LVL_CRIT, TAG_STRIPE,\n                       \"Error %d getting stripe info for %s\", rc, entry_path);\n        goto out_free;\n    }\n\n    rc = fill_stripe_info(p_lum, p_stripe_info, p_stripe_items);\n\n out_free:\n    MemFree(p_lum);\n    return rc;\n}\n\nint File_GetStripeByDirFd(int dirfd, const char *fname,\n                          stripe_info_t *p_stripe_info,\n                          stripe_items_t *p_stripe_items)\n{\n    int rc = 0;\n    struct lov_user_md *p_lum;\n\n    if (!fname || !fname[0])\n        return -EFAULT;\n\n    p_lum = MemAlloc(LUM_SIZE_MAX);\n    if (!p_lum)\n        return -ENOMEM;\n\n    strcpy((char *)p_lum, fname);\n    rc = ioctl(dirfd, IOC_MDC_GETFILESTRIPE, p_lum);\n    if (rc == 0) {\n        rc = fill_stripe_info(p_lum, p_stripe_info, p_stripe_items);\n    } else {\n        rc = -errno;\n\n        if (rc == -ENODATA) {\n            DisplayLog(LVL_DEBUG, TAG_STRIPE,\n                       \"File %s has no stripe information\", fname);\n            set_empty_stripe(p_stripe_info, p_stripe_items);\n            rc = 0;\n        } else if ((rc != -ENOENT) && (rc != -ESTALE)) {\n            DisplayLog(LVL_CRIT, TAG_STRIPE,\n                       \"Error %d getting stripe info for %s\", rc, fname);\n        }\n    }\n\n    MemFree(p_lum);\n\n    return rc;\n}\n\n/**\n * check if a file has data on the given OST.\n */\nbool DataOnOST(size_t fsize, unsigned int ost_index,\n               const stripe_info_t *sinfo, const stripe_items_t *sitems)\n{\n    unsigned int stripe_blocks, i;\n\n    /* if file is empty, the answer is obviously NO */\n    if (fsize == 0)\n        return false;\n    /* If file size is > (stripe_count-1)*stripe_size, it has at least\n     * one byte on the last stripe, and a full stripe block on all previous\n     * OSTs the answer is yes.\n     * Note: this test works if stripe count is 1, and file is > 0.\n     */\n    else if (fsize > (sinfo->stripe_count - 1) * sinfo->stripe_size)\n        return true;\n\n    /* insane value, file may not be striped */\n    if (sinfo->stripe_size == 0)\n        return false;\n\n    /* In the remaining cases, we must check stripe_items.\n     * First compute the number of full stripe blocks,\n     * and the remaining piece of data.\n     */\n    stripe_blocks = fsize / sinfo->stripe_size;\n    /* one more block? */\n    if (fsize % sinfo->stripe_size)\n        stripe_blocks++;\n\n    /* check if ost_index is in the first stripe_blocks */\n    for (i = 0; i < sinfo->stripe_count && i < stripe_blocks; i++) {\n        if (sitems->stripe[i].ost_idx == ost_index)\n            return true;\n    }\n    /* no matched OST */\n    return false;\n}\n\n#define div_upper_round(_n, _d) (((_n)/(_d)) + ((_n) % (_d) ? 1 : 0))\n\n/** computes blocks on the given OST */\nblkcnt_t BlocksOnOST(blkcnt_t blocks, unsigned int ost_index,\n                     const stripe_info_t *sinfo, const stripe_items_t *sitems)\n{\n    unsigned long full_stripes, last_stripe_size, match_full, extra_blocks = 0;\n    int i;\n    int stripe_index = -1;\n\n    /* if block=0 the answer is obviously 0 */\n    if (blocks == 0)\n        return 0;\n    /* unsane value, file may not be stripped */\n    if (sinfo->stripe_size == 0)\n        return 0;\n\n    if ((sinfo->stripe_size % DEV_BSIZE) != 0) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"Unexpected stripe_size value %lu: not a multiple of DEV_BSIZE (%u)\",\n                   sinfo->stripe_size, DEV_BSIZE);\n        return 0;\n    }\n\n    /* what is the stripe index for this OST? */\n    for (i = 0; i < sinfo->stripe_count; i++) {\n        if (sitems->stripe[i].ost_idx == ost_index) {\n            stripe_index = i;\n            break;\n        }\n    }\n    if (stripe_index == -1)\n        /* no data on the given OST */\n        return 0;\n\n    full_stripes = (blocks * DEV_BSIZE) / sinfo->stripe_size;\n    last_stripe_size = (blocks * DEV_BSIZE) % sinfo->stripe_size;\n\n    /* how many full stripes for the given index? */\n    match_full = (full_stripes - stripe_index) / sinfo->stripe_count;\n    /* + an extra stripe? */\n    if (((full_stripes - stripe_index) % sinfo->stripe_count) > 0)\n        match_full++;\n    else\n        /* last full stripe is just before this OST: extra blocks are on it */\n        extra_blocks = div_upper_round(last_stripe_size, DEV_BSIZE);\n\n    /* return value (in blocks):\n     * match_full * stripe_size / DEV_BSIZE + extra_blocks\n     */\n    return match_full * (sinfo->stripe_size / DEV_BSIZE) + extra_blocks;\n}\n\n#ifdef HAVE_LLAPI_GETPOOL_INFO\nint CreateStriped(const char *path, const stripe_info_t *old_stripe,\n                  int overwrite)\n{\n    int rc;\n\n    /* try to restripe using previous pool name */\n    if (!EMPTY_STRING(old_stripe->pool_name))\n        rc = llapi_file_create_pool(path, old_stripe->stripe_size,\n                                    -1, old_stripe->stripe_count, 0,\n                                    (char *)old_stripe->pool_name);\n    else\n        rc = llapi_file_create(path, old_stripe->stripe_size,\n                               -1, old_stripe->stripe_count, 0);\n    if ((rc == -EEXIST) && overwrite) {\n        if (unlink(path)) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, TAG_CR_STRIPE,\n                       \"Can't remove previous entry %s: %s\", path,\n                       strerror(-rc));\n            return rc;\n        }\n        return CreateStriped(path, old_stripe,\n                             false /*target not expected to exist */);\n    } else if (rc != 0 && rc != -EEXIST) {\n        DisplayLog(LVL_MAJOR, TAG_CR_STRIPE,\n                   \"Error %d creating '%s' with stripe.\", rc, path);\n    }\n    return rc;\n}\n\n/* create a file with no stripe information */\nint CreateWithoutStripe(const char *path, mode_t mode, int overwrite)\n{\n    int rc;\n    int fd = open(path, O_CREAT | O_LOV_DELAY_CREATE, mode);\n    if (fd < 0) {\n        rc = -errno;\n        if (rc == -EEXIST && overwrite) {\n            if (unlink(path)) {\n                rc = -errno;\n                DisplayLog(LVL_MAJOR, TAG_CR_STRIPE,\n                           \"Can't remove previous entry %s: %s\", path,\n                           strerror(-rc));\n                return rc;\n            }\n            return CreateWithoutStripe(path, mode,\n                                       false /*target not expected to exist */\n                                       );\n        }\n        DisplayLog(LVL_MAJOR, TAG_CR_STRIPE,\n                   \"Failed to create %s without striping information: %s\", path,\n                   strerror(-rc));\n        return rc;\n    }\n    close(fd);\n    return 0;\n}\n#endif\n\n#ifdef _HAVE_FID\n\n#define FIDDIR      \".lustre/fid\"\n\n/**\n * Build .lustre/fid path associated to a handle.\n */\nint BuildFidPath(const entry_id_t *p_id,   /* IN */\n                 char *path)\n{   /* OUT */\n    if (!p_id || !path)\n        return EFAULT;\n\n    sprintf(path, \"%s\" DFID, get_fid_dir(), PFID(p_id));\n\n#ifdef _DEBUG\n    DisplayLog(LVL_FULL, TAG_FIDPATH, \"FidPath=%s\", path);\n#endif\n\n    return 0;\n}\n\n/* shift the end of the string by 1 char */\nstatic void str_shift(char *str)\n{\n    int i;\n\n    /* copy up to strlen + 1, to copy the final \\0 too */\n    for (i = 1; i <= strlen(str) + 1; i++)\n        str[i-1] = str[i];\n}\n\n/* Get POSIX path from fid (fid2path wrapper) */\nint Lustre_GetFullPath(const entry_id_t *p_id, char *fullpath,\n                       unsigned int len)\n{\n    char *curr = fullpath;\n    int rc;\n    long long recno = -1;\n    int linkno = 0;\n    char fid[256];\n    const char *mpath = NULL;\n    unsigned int mlen = 0;\n\n    mpath = get_mount_point(&mlen);\n\n    /* set mountpoint at the beginning of the path */\n    strcpy(fullpath, mpath);\n    curr += mlen;\n\n/* add the slash only if fid2path doesn't */\n#ifndef _FID2PATH_LEADING_SLASH\n    /* add slash */\n    *curr = '/';\n    curr++;\n#endif\n    /* just in case fid2path returns nothing */\n    *curr = '\\0';\n\n    /* fid string */\n    sprintf(fid, DFID, PFID(p_id));\n\n    /* MDT device */\n\n    /* ask the path to lustre */\n    rc = llapi_fid2path(mpath, fid, curr, len - mlen - 2, &recno, &linkno);\n\n    if ((rc != 0) && (rc != -ENOENT) && (rc != -ESTALE))\n        DisplayLog(LVL_CRIT, \"Fid2Path\",\n                   \"Error %d calling llapi_fid2path(%s,%s,%lld,%d), errno=%d.\"\n                   \" Cannot retrieve full path for %s\",\n                   rc, mpath, fid, recno, linkno, errno, fid);\n    /* curr == fullpath => fullpath is root: '/'\n     * so don't remove final slash */\n    else if (curr != fullpath) {\n        while (FINAL_SLASH(fullpath))\n            REMOVE_FINAL_SLASH(fullpath);\n    }\n\n    /* clean double slashes */\n    for (curr = strstr(fullpath, \"//\"); curr != NULL;\n         curr = strstr(curr, \"//\")) {\n         /* keep first slash */\n         curr++;\n         /* shift the end of the string by 1 char */\n         str_shift(curr);\n    }\n\n    return rc;\n}\n\n/* Get fid from Posix Path (path2fid wrapper) */\nint Lustre_GetFidFromPath(const char *fullpath, entry_id_t *p_id)\n{\n    int rc;\n    rc = llapi_path2fid(fullpath, p_id);\n\n    if ((rc != 0) && (rc != -ENOENT) && (rc != -ESTALE))\n        DisplayLog(LVL_DEBUG, \"Path2Fid\", \"llapi_path2fid(%s)=%d, seq=%llx,\"\n                   \" oid=%x, ver=%x\",\n                   fullpath, rc, p_id->f_seq, p_id->f_oid, p_id->f_ver);\n\n    return rc;\n}\n\n/* Get fid from a file descriptor (fd2fid wrapper) */\nint Lustre_GetFidByFd(int fd, entry_id_t *p_id)\n{\n    int rc = 0;\n#ifdef HAVE_FD2FID\n#define FD2FID \"llapi_fd2fid()\"\n    rc = llapi_fd2fid(fd, p_id);\n#else\n#define FD2FID \"ioctl(LL_IOC_PATH2FID)\"\n    if (ioctl(fd, LL_IOC_PATH2FID, p_id) != 0)\n        rc = -errno;\n#endif\n\n    if ((rc != 0) && (rc != -ENOENT) && (rc != -ESTALE))\n        DisplayLog(LVL_DEBUG, \"Fd2Fid\", FD2FID \"=%d\", rc);\n\n    return rc;\n}\n\n/** get (name+parent_id) for an entry\n * \\param linkno hardlink index\n * \\retval -ENODATA after last link\n * \\retval -ERANGE if namelen is too small\n */\nint Lustre_GetNameParent(const char *path, int linkno,\n                         lustre_fid *pfid, char *name, int namelen)\n{\n    int rc, i, len;\n    char buf[4096];\n    struct linkea_data ldata = { 0 };\n    struct lu_buf lb = { 0 };\n\n    rc = lgetxattr(path, XATTR_NAME_LINK, buf, sizeof(buf));\n    if (rc < 0)\n        return -errno;\n\n    lb.lb_buf = buf;\n    lb.lb_len = sizeof(buf);\n    ldata.ld_buf = &lb;\n    ldata.ld_leh = (struct link_ea_header *)buf;\n\n    ldata.ld_lee = LINKEA_FIRST_ENTRY(ldata);\n    ldata.ld_reclen = (ldata.ld_lee->lee_reclen[0] << 8)\n        | ldata.ld_lee->lee_reclen[1];\n\n    if (linkno >= ldata.ld_leh->leh_reccount)\n        /* beyond last link */\n        return -ENODATA;\n\n    for (i = 0; i < linkno; i++) {\n        ldata.ld_lee = LINKEA_NEXT_ENTRY(ldata);\n        ldata.ld_reclen = (ldata.ld_lee->lee_reclen[0] << 8)\n            | ldata.ld_lee->lee_reclen[1];\n    }\n\n    memcpy(pfid, &ldata.ld_lee->lee_parent_fid, sizeof(*pfid));\n    fid_be_to_cpu(pfid, pfid);\n\n    if (!fid_is_sane(pfid)) {\n        DisplayLog(LVL_MAJOR, __func__, \"insane fid: \" DFID, PFID(pfid));\n        return -EPROTO;\n    }\n\n    len = ldata.ld_reclen - sizeof(struct link_ea_entry);\n    if (len >= namelen)\n        return -ERANGE;\n\n    strncpy(name, ldata.ld_lee->lee_name, len);\n    name[len] = '\\0';\n    return 0;\n}\n#endif\n\n/** Retrieve OST usage info ('ost df')\n *  @return 0 on success\n *          ENODEV if ost_index > ost index max of this FS\n */\nint Get_OST_usage(const char *fs_path, unsigned int ost_index,\n                  struct statfs *ost_statfs)\n{\n    struct obd_statfs stat_buf;\n    struct obd_uuid uuid_buf;\n    int rc;\n\n    /* sanity check */\n    if (!ost_statfs)\n        return EFAULT;\n    memset(&stat_buf, 0, sizeof(struct obd_statfs));\n    memset(&uuid_buf, 0, sizeof(struct obd_uuid));\n\n    /* zero the output */\n    memset(ost_statfs, 0, sizeof(struct statfs));\n\n    /* llapi_obd_statfs does not modify path (checked in code) */\n    rc = llapi_obd_statfs((char *)fs_path, LL_STATFS_LOV, ost_index, &stat_buf,\n                          &uuid_buf);\n\n    if (rc == -ENODEV)\n        /* end of list */\n        return -rc;\n    else if (rc == -EAGAIN) {\n        /* gap in OST indexes? */\n        DisplayLog(LVL_EVENT, TAG_OSTDF,\n                   \"OST #%u does not exist in filesystem %s\", ost_index,\n                   fs_path);\n        return -rc;\n    } else if (rc != 0) {\n        /* other error */\n        DisplayLog(LVL_CRIT, TAG_OSTDF,\n                   \"Error %d in llapi_obd_statfs(). Cannot retrieve info\"\n                   \" about OST #%u\", -rc, ost_index);\n        return -rc;\n    }\n\n    /* convert info to struct statfs */\n    ost_statfs->f_bsize = stat_buf.os_bsize;\n    ost_statfs->f_blocks = stat_buf.os_blocks;\n    ost_statfs->f_bfree = stat_buf.os_bfree;\n    ost_statfs->f_bavail = stat_buf.os_bavail;\n    ost_statfs->f_ffree = stat_buf.os_ffree;\n    ost_statfs->f_files = stat_buf.os_files;\n\n    return 0;\n}\n\n#ifdef HAVE_LLAPI_GETPOOL_INFO\n/** Retrieve pool usage info\n *  @return 0 on success\n */\nint Get_pool_usage(const char *poolname, struct statfs *pool_statfs)\n{\n    struct statfs ost_statfs;\n    int rc, i, count;\n    char pool[LOV_MAXPOOLNAME + 10];\n#ifdef FIND_MAX_OSTS\n    char *ostlist[FIND_MAX_OSTS];\n    char buffer[4096];\n#else /* no max OST count since Lustre 2.2 */\n    unsigned int obdcount = 256;\n    char **ostlist = NULL;\n    int bufsize = sizeof(struct obd_uuid) * obdcount;\n    char *buffer = MemAlloc(bufsize + (sizeof(*ostlist) * obdcount));\n    ostlist = (char **)(buffer + bufsize);\n\n    /* sanity check */\n    if (!pool_statfs) {\n        MemFree(buffer);\n        return EFAULT;\n    }\n#endif\n\n    memset(pool_statfs, 0, sizeof(struct statfs));\n\n    /* retrieve list of OSTs in the pool */\n    sprintf(pool, \"%s.%s\", get_fsname(), poolname);\n#ifdef FIND_MAX_OSTS\n    rc = llapi_get_poolmembers(pool, ostlist, FIND_MAX_OSTS, buffer, 4096);\n#else\n    do {\n        rc = llapi_get_poolmembers(pool, ostlist, obdcount, buffer, bufsize);\n        if (rc == -EOVERFLOW) {\n            /* buffer too small, increase obdcount by 2 */\n            obdcount *= 2;\n            bufsize = sizeof(struct obd_uuid) * obdcount;\n            buffer =\n                MemRealloc(buffer, bufsize + (sizeof(*ostlist) * obdcount));\n            if (buffer == NULL)\n                return ENOMEM;\n            ostlist = (char **)(buffer + bufsize);\n        }\n    } while (rc == -EOVERFLOW);\n#endif\n\n    if (rc < 0)\n        return -rc;\n\n    count = rc;\n\n    /* get OST info and sum them */\n    for (i = 0; i < count; i++) {\n        char *ost;\n        int index;\n        /* get ost index in <fsname>-OST<index>_UUID */\n        ost = strrchr(ostlist[i], '-');\n        if (!ost) {\n            DisplayLog(LVL_CRIT, TAG_POOLDF, \"Invalid OST format: '%s'\",\n                       ostlist[i]);\n            return EINVAL;\n        }\n\n\n        /* skip '-' */\n        ost++;\n        if (sscanf(ost, \"OST%x\", &index) != 1) {\n            DisplayLog(LVL_CRIT, TAG_POOLDF, \"Could not find OST index in\"\n                       \" string '%s'\", ost);\n            return EINVAL;\n        }\n\n        rc = Get_OST_usage(get_mount_point(NULL), index, &ost_statfs);\n        if (rc)\n            return rc;\n\n        /* sum info to struct statfs */\n        pool_statfs->f_blocks += ost_statfs.f_blocks;\n        pool_statfs->f_bfree += ost_statfs.f_bfree;\n        pool_statfs->f_bavail += ost_statfs.f_bavail;\n        pool_statfs->f_bsize = ost_statfs.f_bsize;\n        pool_statfs->f_ffree += ost_statfs.f_ffree;\n        pool_statfs->f_files += ost_statfs.f_files;\n    }\n\n    return 0;\n}\n#endif\n\n\n/* A new LL_IOC_MDC_GETINFO has been defined since Lustre 2.12.4,\n * but it doesn't return a struct stat.\n * Use the old (compatible) ioctl() instead.\n * In Lustre 2.15, IOC_MDC_GETFILEINFO_V1 is already defined.\n */\n#ifndef IOC_MDC_GETFILEINFO_V1\n#   ifdef IOC_MDC_GETFILEINFO_OLD\n#       define IOC_MDC_GETFILEINFO_V1   IOC_MDC_GETFILEINFO_OLD\n#   else\n#       define IOC_MDC_GETFILEINFO_V1   IOC_MDC_GETFILEINFO\n#   endif\n#endif\n\n\n/* This code is an adaptation of llapi_mds_getfileinfo() in liblustreapi.\n * It is unused for now, but could be useful when SOM will be implemented.\n *\n * @return 0 on success, -1*POSIX error code on failure.\n */\nint lustre_mds_stat(const char *fullpath, int parentfd, struct stat *inode)\n{\n    /* This buffer must be large enough for handling a filename + \\0\n     * as well as the output structure (much smaller).\n     */\n    char buffer[MAXNAMLEN + 1];\n    /* always use lov_user_mds_data_v1, as we want a struct stat as output. */\n    struct lov_user_mds_data_v1 *lmd = (struct lov_user_mds_data_v1 *)buffer;\n    const char *filename;\n    int rc;\n\n    /* sanity checks */\n    if ((fullpath == NULL) || (inode == NULL))\n        return -EINVAL;\n\n    filename = rh_basename(fullpath);\n\n    memset(lmd, 0, sizeof(buffer));\n\n    if (snprintf(buffer, sizeof(buffer), \"%s\", filename) > MAXNAMLEN)\n        return -EOVERFLOW;\n\n    rc = ioctl(parentfd, IOC_MDC_GETFILEINFO_V1, (void *)lmd);\n    if (rc < 0)\n        rc = -errno;\n\n    switch (rc) {\n    case 0:\n        *inode = lmd->lmd_st;\n        break;\n\n    case -ENOTTY:\n        /* ioctl is not supported, it is not a lustre fs.\n         * Do the regular lstat(2) instead. */\n        rc = lstat(fullpath, inode);\n        if (rc) {\n            DisplayLog(LVL_CRIT, TAG_MDSSTAT,\n                       \"Error in %s: lstat failed for %s\", __func__,\n                       fullpath);\n            rc = -errno;\n        }\n        break;\n\n    case -ENOENT:\n    case -ESTALE:\n        DisplayLog(LVL_MAJOR, TAG_MDSSTAT, \"Warning: %s: %s does not exist\",\n                   __func__, fullpath);\n        rc = -ENOENT;\n        break;\n\n    default:\n        DisplayLog(LVL_CRIT, TAG_MDSSTAT,\n                   \"Error: %s: IOC_MDC_GETFILEINFO_V1 failed for %s: rc=%d, errno=%d\",\n                   __func__, fullpath, rc, errno);\n    }\n    return rc;\n}\n\n#ifdef _HAVE_FID\nstatic DIR *fid_dir_fd = NULL;\nstatic pthread_mutex_t dir_lock = PTHREAD_MUTEX_INITIALIZER;\n\n/**\n * Call IOC_MDC_GETFILEINFO for a given fid.\n *\n * @return 0 on success, -1*POSIX error code on failure.\n */\nint lustre_mds_stat_by_fid(const entry_id_t *p_id, struct stat *inode)\n{\n    char filename[MAXNAMLEN + 1];\n    /* the buffer must be large enough to contain \"<mnt>/.lustre/fid/FID\" path */\n    char buffer[RBH_PATH_MAX];\n    /* always use lov_user_mds_data_v1, as we want a struct stat as output. */\n    struct lov_user_mds_data_v1 *lmd = (struct lov_user_mds_data_v1 *)buffer;\n    int rc;\n\n    /* ensure fid directory is opened */\n    if (fid_dir_fd == NULL) {\n        P(dir_lock);\n        if (fid_dir_fd == NULL) {\n            char path[RBH_PATH_MAX];\n            char *curr = path;\n            unsigned int mlen;\n\n            /* filesystem root */\n            strcpy(path, get_mount_point(&mlen));\n            curr += mlen;\n\n            /* fid directory */\n            strcpy(curr, \"/\" FIDDIR);\n\n            /* open fir directory */\n            fid_dir_fd = opendir(path);\n        }\n        V(dir_lock);\n        if (fid_dir_fd == NULL)\n            return -errno;\n    }\n\n    sprintf(filename, DFID, PFID(p_id));\n    memset(lmd, 0, sizeof(buffer));\n\n    if (snprintf(buffer, sizeof(buffer), \"%s\", filename) > MAXNAMLEN)\n        return -EOVERFLOW;\n\n    rc = ioctl(dirfd(fid_dir_fd), IOC_MDC_GETFILEINFO_V1, (void *)lmd);\n\n    if (rc) {\n        if (errno == ENOTTY) {\n            return -ENOTSUP;\n        } else if ((errno == ENOENT) || (errno == ESTALE)) {\n            DisplayLog(LVL_MAJOR, TAG_MDSSTAT, \"Warning: %s: %s does not exist\",\n                       __func__, filename);\n            return -ENOENT;\n        } else {\n            DisplayLog(LVL_CRIT, TAG_MDSSTAT,\n                       \"Error: %s: IOC_MDC_GETFILEINFO_V1 failed for %s\",\n                       __func__, filename);\n            return -errno;\n        }\n    }\n\n    *inode = lmd->lmd_st;\n    return 0;\n}\n#endif\n\n#define BRIEF_OST_FORMAT \"ost#%u:%u\"\n#define HUMAN_OST_FORMAT \"ost#%u: %u\"\n\nvoid append_stripe_list(GString *str, const stripe_items_t *p_stripe_items,\n                        bool brief)\n{\n    const char *format;\n    int i;\n\n    if (!p_stripe_items || (p_stripe_items->count == 0)) {\n        g_string_append(str, \"(none)\");\n        return;\n    }\n\n    format = brief ? BRIEF_OST_FORMAT \", \" : HUMAN_OST_FORMAT \", \";\n\n    for (i = 0; i < p_stripe_items->count; i++) {\n        /* no comma after last item */\n        if (i == p_stripe_items->count - 1)\n            format = brief ? BRIEF_OST_FORMAT : HUMAN_OST_FORMAT;\n\n        g_string_append_printf(str, format, p_stripe_items->stripe[i].ost_idx,\n                               p_stripe_items->stripe[i].obj_id);\n    }\n}\n\n#ifndef _MDT_SPECIFIC_LOVEA\n/**\n * build LOVEA buffer from stripe information\n * @return size of significant information in buffer.\n */\nssize_t BuildLovEA(const entry_id_t *p_id, const attr_set_t *p_attrs,\n                   void *buff, size_t buf_sz)\n{\n    int i;\n    size_t len = 0;\n\n    if (!ATTR_MASK_TEST(p_attrs, stripe_info))  /* no stripe info */\n        return 0;\n\n    /* check inconsistent values */\n    if (!ATTR_MASK_TEST(p_attrs, stripe_items) ||\n        (ATTR(p_attrs, stripe_items).count !=\n         ATTR(p_attrs, stripe_info).stripe_count)) {\n        DisplayLog(LVL_MAJOR, \"BuildLovEA\",\n                   \"ERROR: inconsistent stripe info for \" DFID, PFID(p_id));\n        return -1;\n    }\n\n    /* is there a pool? */\n    if (EMPTY_STRING(ATTR(p_attrs, stripe_info).pool_name)) {\n        /* no => build lov_user_md_v1 */\n        struct lov_user_md_v1 *p_lum = (struct lov_user_md_v1 *)buff;\n        len = sizeof(struct lov_user_md_v1) +\n            ATTR(p_attrs,\n                 stripe_info).stripe_count *\n            sizeof(struct lov_user_ost_data_v1);\n\n        /* check buffer size */\n        if (buf_sz < len)\n            return -1;\n\n        p_lum->lmm_magic = LOV_USER_MAGIC_V1;\n        p_lum->lmm_pattern = LOV_PATTERN_RAID0; /* the only supported for now */\n#ifdef _HAVE_FID\n        p_lum->lmm_object_id = p_id->f_oid;\n        p_lum->lmm_object_seq = p_id->f_seq;\n#else /* lmm_object_gr for Lustre 1.x */\n        p_lum->lmm_object_id = p_id->inode;\n        p_lum->lmm_object_gr = 0;\n#endif\n        p_lum->lmm_stripe_size = ATTR(p_attrs, stripe_info).stripe_size;\n        p_lum->lmm_stripe_count = ATTR(p_attrs, stripe_info).stripe_count;\n        p_lum->lmm_stripe_offset = 0;\n\n        /* set stripe items */\n        for (i = 0; i < ATTR(p_attrs, stripe_items).count; i++) {\n            p_lum->lmm_objects[i].l_ost_idx =\n                ATTR(p_attrs, stripe_items).stripe[i].ost_idx;\n            p_lum->lmm_objects[i].l_ost_gen =\n                ATTR(p_attrs, stripe_items).stripe[i].ost_gen;\n#ifdef HAVE_OBJ_ID\n            p_lum->lmm_objects[i].l_object_id =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_id;\n#ifdef HAVE_OBJ_SEQ\n            p_lum->lmm_objects[i].l_object_seq =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#else\n            p_lum->lmm_objects[i].l_object_gr =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#endif\n#else /* new structure (union of fid and id/seq) */\n            p_lum->lmm_objects[i].l_ost_oi.oi.oi_id =\n                ATTR(p_attrs, stripe_items).obj_id;\n            p_lum->lmm_objects[i].l_ost_oi.oi.oi_seq =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#endif\n        }\n        return len;\n    } else {\n        /* yes => build lov_user_md_v3 */\n        struct lov_user_md_v3 *p_lum = (struct lov_user_md_v3 *)buff;\n        len = sizeof(struct lov_user_md_v3) +\n            ATTR(p_attrs,\n                 stripe_info).stripe_count *\n            sizeof(struct lov_user_ost_data_v1);\n\n        /* check buffer size */\n        if (buf_sz < len)\n            return (size_t)-1;\n\n        p_lum->lmm_magic = LOV_USER_MAGIC_V3;\n        p_lum->lmm_pattern = LOV_PATTERN_RAID0; /* the only supported for now */\n#ifdef _HAVE_FID\n        p_lum->lmm_object_id = p_id->f_oid;\n        p_lum->lmm_object_seq = p_id->f_seq;\n#else /* lmm_object_gr for Lustre 1.x */\n        p_lum->lmm_object_id = p_id->inode;\n        p_lum->lmm_object_gr = 0;\n#endif\n        p_lum->lmm_stripe_size = ATTR(p_attrs, stripe_info).stripe_size;\n        p_lum->lmm_stripe_count = ATTR(p_attrs, stripe_info).stripe_count;\n        p_lum->lmm_stripe_offset = 0;\n        /* pool name */\n        rh_strncpy(p_lum->lmm_pool_name, ATTR(p_attrs, stripe_info).pool_name,\n                   LOV_MAXPOOLNAME);\n\n        /* set stripe items */\n        for (i = 0; i < ATTR(p_attrs, stripe_items).count; i++) {\n            p_lum->lmm_objects[i].l_ost_idx =\n                ATTR(p_attrs, stripe_items).stripe[i].ost_idx;\n            p_lum->lmm_objects[i].l_ost_gen =\n                ATTR(p_attrs, stripe_items).stripe[i].ost_gen;\n#ifdef HAVE_OBJ_ID\n            p_lum->lmm_objects[i].l_object_id =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_id;\n#ifdef HAVE_OBJ_SEQ\n            p_lum->lmm_objects[i].l_object_seq =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#else\n            p_lum->lmm_objects[i].l_object_gr =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#endif\n#else /* new structure (union of fid and id/seq) */\n            p_lum->lmm_objects[i].l_ost_oi.oi.oi_id =\n                ATTR(p_attrs, stripe_items).obj_id;\n            p_lum->lmm_objects[i].l_ost_oi.oi.oi_seq =\n                ATTR(p_attrs, stripe_items).stripe[i].obj_seq;\n#endif\n        }\n        return len;\n    }\n}\n#endif\n\n/**\n * Retrieve fsxattr.\n * From lustre/utils/lfs_project.c\n *\n * \\return 0 on success, -errno on error.\n */\nstatic int project_get_xattr(const char *pathname, struct fsxattr *fsx)\n{\n        int ret;\n        int fd;\n        int rc;\n\n        fd = open(pathname, O_RDONLY | O_NOCTTY | O_NDELAY);\n        if (fd < 0) {\n            rc = -errno;\n            DisplayLog(LVL_CRIT, TAG_PROJID,\n                       \"Error: %s: failed to open '%s': %s\",\n                        __func__, pathname, strerror(-rc));\n            return rc;\n        }\n\n        ret = ioctl(fd, FS_IOC_FSGETXATTR, fsx);\n        if (ret) {\n            rc = -errno;\n            DisplayLog(LVL_CRIT, TAG_PROJID,\n                       \"Error: %s: failed to get xattr for '%s': %s\",\n                        __func__, pathname, strerror(-rc));\n            goto out_close;\n        }\n        rc = 0;\n\nout_close:\n        close(fd);\n        return rc;\n}\n\nint lustre_project_get_id(const char *pathname)\n{\n        struct fsxattr fsx;\n        int rc;\n\n        rc = project_get_xattr(pathname, &fsx);\n        if (rc < 0)\n            return rc;\n\n        return fsx.fsx_projid;\n}\n"
  },
  {
    "path": "src/common/mntent_compat.c",
    "content": "/**\n * This file has been extracted from FreeBSD fam package.\n */\n#include \"mntent_compat.h\"\n#include <sys/param.h>\n#include <sys/mount.h>\n#include <fstab.h>\n\nstruct statfs *getmntent_mntbufp;\nint getmntent_mntcount = 0;\nint getmntent_mntpos = 0;\nchar mntent_global_opts[256];\nstruct mntent mntent_global_mntent;\n\nFILE *setmntent(char *filep, char *type)\n{\n    getmntent_mntpos = 0;\n    getmntent_mntcount = getmntinfo(&getmntent_mntbufp, MNT_WAIT);\n    return (FILE *) 1;  // dummy\n}\n\nvoid getmntent_addopt(char **c, const char *s)\n{\n    int i = strlen(s);\n    *(*c)++ = ',';\n    strcpy(*c, s);\n    *c += i;\n}\n\nstruct mntent *getmntent(FILE *filep)\n{\n    char *c = mntent_global_opts + 2;\n    struct fstab *fst;\n    if (getmntent_mntpos >= getmntent_mntcount)\n        return 0;\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_RDONLY)\n        strcpy(mntent_global_opts, \"ro\");\n    else\n        strcpy(mntent_global_opts, \"rw\");\n\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_SYNCHRONOUS)\n        getmntent_addopt(&c, \"sync\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOEXEC)\n        getmntent_addopt(&c, \"noexec\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOSUID)\n        getmntent_addopt(&c, \"nosuid\");\n#ifdef MNT_NODEV\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NODEV)\n        getmntent_addopt(&c, \"nodev\");\n#endif\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_UNION)\n        getmntent_addopt(&c, \"union\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_ASYNC)\n        getmntent_addopt(&c, \"async\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOATIME)\n        getmntent_addopt(&c, \"noatime\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOCLUSTERR)\n        getmntent_addopt(&c, \"noclusterr\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOCLUSTERW)\n        getmntent_addopt(&c, \"noclusterw\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_NOSYMFOLLOW)\n        getmntent_addopt(&c, \"nosymfollow\");\n    if (getmntent_mntbufp[getmntent_mntpos].f_flags & MNT_SUIDDIR)\n        getmntent_addopt(&c, \"suiddir\");\n\n    mntent_global_mntent.mnt_fsname =\n        getmntent_mntbufp[getmntent_mntpos].f_mntfromname;\n    mntent_global_mntent.mnt_dir =\n        getmntent_mntbufp[getmntent_mntpos].f_mntonname;\n    mntent_global_mntent.mnt_type =\n        getmntent_mntbufp[getmntent_mntpos].f_fstypename;\n    mntent_global_mntent.mnt_opts = mntent_global_opts;\n    if ((fst = getfsspec(getmntent_mntbufp[getmntent_mntpos].f_mntfromname))) {\n        mntent_global_mntent.mnt_freq = fst->fs_freq;\n        mntent_global_mntent.mnt_passno = fst->fs_passno;\n    } else if ((fst =\n             getfsfile(getmntent_mntbufp[getmntent_mntpos].f_mntonname))) {\n        mntent_global_mntent.mnt_freq = fst->fs_freq;\n        mntent_global_mntent.mnt_passno = fst->fs_passno;\n    } else if (strcmp(getmntent_mntbufp[getmntent_mntpos].f_fstypename, \"ufs\")\n               == 0) {\n        if (strcmp(getmntent_mntbufp[getmntent_mntpos].f_mntonname, \"/\") == 0) {\n            mntent_global_mntent.mnt_freq = 1;\n            mntent_global_mntent.mnt_passno = 1;\n        } else {\n            mntent_global_mntent.mnt_freq = 2;\n            mntent_global_mntent.mnt_passno = 2;\n        }\n    } else {\n        mntent_global_mntent.mnt_freq = 0;\n        mntent_global_mntent.mnt_passno = 0;\n    }\n    ++getmntent_mntpos;\n    return &mntent_global_mntent;\n}\n\nint endmntent(FILE *filep)\n{\n    return 0;\n}\n"
  },
  {
    "path": "src/common/mntent_compat.h",
    "content": "/**\n * This file has been extracted from FreeBSD fam package.\n */\n#ifdef HAVE_MNTENT_H\n#include <mntent.h>\n#else\n\n#ifndef mntent_h_\n#define mntent_h_\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/param.h>\n#include <sys/ucred.h>\n#include <sys/mount.h>\n\n#define MOUNTED \"mounted\"\n#define MNTTYPE_NFS \"nfs\"\n\nstruct mntent {\n    char *mnt_fsname; /* file system name */\n    char *mnt_dir;    /* file system path prefix */\n    char *mnt_type;   /* dbg, efs, nfs */\n    char *mnt_opts;   /* ro, hide, etc. */\n    int   mnt_freq;   /* dump frequency, in days */\n    int   mnt_passno; /* pass number on parallel fsck */\n};\n\nFILE *setmntent(char *filep, char *type);\nstruct mntent *getmntent(FILE *filep);\nint endmntent(FILE *filep);\n\n#endif /* mntent_h_ */\n#endif /* not HAVE_MNTENT_H */\n"
  },
  {
    "path": "src/common/param_utils.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include \"status_manager.h\"\n\n#include <ctype.h>\n\n#define PARAMS_TAG \"params\" /* tag for logs */\n\n/**\n * Callback function for placeholder processing when\n * parsing a string with placeholders like {xxx}.\n * @param[in]     name  the placeholder name.\n * @param[in]     start_idx placeholder start index in original string.\n * @param[in]     end_idx   placeholder end index in original string.\n * @param[in,out] udata user data (udata parameter from placeholder_foreach()).\n * @return 0 on success, an error code on failure.\n */\ntypedef int (*placeholder_func_t) (const char *name, int start_idx,\n                                   int end_idx, void *udata);\n\n/** check if a string matches a variable name */\nstatic bool match_varname(const char *str, int len)\n{\n    int i;\n\n    if (len == 0)\n        return true;    /* allowed empty var */\n\n    /* letter expected as first char */\n    if (!isalpha(str[0]))\n        return false;\n\n    /* letter, number, underscore or dot expected */\n    for (i = 1; i < len; i++)\n        if (!isalnum(str[i]) && str[i] != '_' && str[i] != '.')\n            return false;\n\n    return true;\n}\n\n/** placeholder iterator flags */\ntypedef enum {\n    PH_ALLOW_EMPTY = (1 << 0),   /**< allow empty variable names */\n    PH_STRICT_BRACES = (1 << 1), /**< strictly check open/close braces */\n} ph_flags_t;\n\n/**\n * Look for placeholders {xxx} in str and call ph_func for each\n * placeholder found. Processing stops at first error returned\n * by the callback function ph_func.\n * @param[in] str the string to be parsed.\n * @param[in] str_descr string context description to be displayed in\n *                      error messages (e.g. \"cfg_block::foo_param line 42\").\n * @param[in] ph_func callback function to be called for each placeholder.\n * @param[in,out] udata arbitrary user data passed to ph_func callback.\n * @return 0 on success, an error code on failure.\n */\nstatic int placeholder_foreach(const char *str, const char *str_descr,\n                               placeholder_func_t ph_func, void *udata,\n                               ph_flags_t flags)\n{\n    const char *pass_begin = str;\n\n    do {\n        const char *begin_var;\n        const char *end_var = NULL;\n        char *var_name;\n        int rc;\n\n        /* Look for {var} patterns, allowing the usage of {}'s for other\n         * purposes (like JSON format).\n         * E.g. in '{\"entry_id\":\"{fid}\",\"fileclass\":\"{fileclass}\"}'\n         * only {fid} and {fileclass} must be interpreted as placeholders.\n         */\n\n        /* look for a variable */\n        begin_var = strchr(pass_begin, '{');\n\n        if (flags & PH_STRICT_BRACES)\n            /* check for unexpected '}' */\n            end_var = strchr(pass_begin, '}');\n\n        /* no more variables */\n        if (!begin_var) {\n            if ((flags & PH_STRICT_BRACES) && (end_var != NULL)) {\n                DisplayLog(LVL_CRIT, PARAMS_TAG,\n                           \"ERROR: unexpected '}' near '%s' in %s\", pass_begin,\n                           str_descr);\n                return -EINVAL;\n            }\n            /* don't check '}' */\n            break;\n        }\n\n        if (flags & PH_STRICT_BRACES) {\n            if (end_var == NULL) {\n                DisplayLog(LVL_CRIT, PARAMS_TAG, \"ERROR: unmatched '{' in %s\",\n                           str_descr);\n                return -EINVAL;\n            } else if (end_var < begin_var) {\n                DisplayLog(LVL_CRIT, PARAMS_TAG,\n                           \"ERROR: unexpected '}' near '%.*s' in %s\",\n                           (int)(begin_var - pass_begin + 1), pass_begin,\n                           str_descr);\n                return -EINVAL;\n            }\n            /* end_var is already set and is after begin_var */\n        } else {\n            /* get the first matching '}' after '{' */\n            end_var = strchr(begin_var, '}');\n            /* no strict braces control: allow no closing brace */\n            if (!end_var)\n                break;\n        }\n\n        if (!(flags & PH_ALLOW_EMPTY) && (end_var == begin_var + 1)) {\n            DisplayLog(LVL_CRIT, PARAMS_TAG, \"ERROR: empty var name in %s\",\n                       str_descr);\n            return -EINVAL;\n        }\n\n        /* if the section between braces doesn't match a variable name,\n         * skip the opening braces to look for a '{var}' section */\n        if (!match_varname(begin_var + 1, end_var - begin_var - 1)) {\n            /* unexpected format */\n            if (flags & PH_STRICT_BRACES) {\n                DisplayLog(LVL_CRIT, PARAMS_TAG,\n                           \"Unexpected variable syntax near '%.*s' in %s\",\n                           (int)(end_var - begin_var + 1), begin_var,\n                           str_descr);\n                return -EINVAL;\n            }\n\n            /* just skip it and continue parsing */\n            pass_begin = begin_var + 1;\n            continue;\n        }\n\n        var_name = strndup(begin_var + 1, end_var - begin_var - 1);\n        if (!var_name)\n            return -ENOMEM;\n\n#ifdef _DEBUG_POLICIES\n        fprintf(stderr, \"processing variable '%s' in %s\\n\", var_name,\n                str_descr);\n#endif\n\n        rc = ph_func(var_name, begin_var - str, end_var - str, udata);\n        free(var_name);\n        if (rc)\n            return rc;\n\n        pass_begin = end_var + 1;\n\n    } while (1);\n\n    return 0;\n}\n\n/**\n * Function to get the value of a placeholder.\n * @param[out] free_str whether the returned value must be freed.\n */\ntypedef char *(*param_value_get_func_t) (const entry_id_t *id,\n                                         const attr_set_t *attrs,\n                                         int attr_index, bool *free_str);\n\n/** information about placeholder values */\nstruct param_descr {\n    const char *name;\n    int attr_index;                    /**< -1 for none */\n    param_value_get_func_t get_func;\n};\n\n/* ========== placeholder value helpers ========== */\n\n/** return a string attribute */\nstatic char *get_str_attr(const entry_id_t *id, const attr_set_t *attrs,\n                          int attr_index, bool *free_str)\n{\n    *free_str = false;\n\n    if (attrs == NULL) {\n        DisplayLog(LVL_MAJOR, PARAMS_TAG,\n                   \"ERROR: entry attributes are not available in this context\");\n        return NULL;\n    }\n    if (!attr_mask_test_index(&attrs->attr_mask, attr_index)) {\n        /* for getting field_name in field_info array */\n        assert(attr_index < 32);\n        DisplayLog(LVL_MAJOR, PARAMS_TAG,\n                   \"ERROR: missing attribute '%s' to perform variable substitution\",\n                   field_infos[attr_index].field_name);\n        return NULL;\n    }\n\n    if (attr_index == ATTR_INDEX_stripe_info)\n#ifdef _LUSTRE\n        return (char *)(ATTR(attrs, stripe_info).pool_name);\n#else\n        return NULL;\n#endif\n    else\n        return (char *)&attrs->attr_values + field_infos[attr_index].offset;\n}\n\n/** return a fid string representation */\nstatic char *get_fid_str(const entry_id_t *id, const attr_set_t *attrs,\n                         int attr_index, bool *free_str)\n{\n    char *fid_str;\n\n    if (id == NULL) {\n        DisplayLog(LVL_MAJOR, PARAMS_TAG,\n                   \"ERROR: entry fid is not available in this context\");\n        return NULL;\n    }\n\n    if (attr_index == -1) {\n        if (asprintf(&fid_str, DFID_NOBRACE, PFID(id)) < 0)\n            return NULL;\n    } else {\n        if (asprintf(&fid_str,\n                     DFID_NOBRACE,\n                     PFID((entry_id_t *)&attrs->attr_values +\n                         field_infos[attr_index].offset)) < 0)\n            return NULL;\n    }\n\n    *free_str = true;\n    return fid_str;\n}\n\n/** return FS name */\nstatic char *get_fsname_param(const entry_id_t *id, const attr_set_t *attrs,\n                              int attr_index, bool *free_str)\n{\n    *free_str = false;\n    return (char *)get_fsname();\n}\n\n/** return FS root directory */\nstatic char *get_fsroot_param(const entry_id_t *id, const attr_set_t *attrs,\n                              int attr_index, bool *free_str)\n{\n    *free_str = false;\n    return (char *)global_config.fs_path;\n}\n\n/** return path to robinhood configuration file */\nstatic char *get_cfg_param(const entry_id_t *id, const attr_set_t *attrs,\n                           int attr_index, bool *free_str)\n{\n    *free_str = false;\n    return (char *)config_file_path();\n}\n\n/** standard parameters allowed in placeholders */\nstatic const struct param_descr std_params[] = {\n    /* entry attributes std params */\n    {\"name\", ATTR_INDEX_name, get_str_attr},\n    {\"path\", ATTR_INDEX_fullpath, get_str_attr},\n    {\"fullpath\", ATTR_INDEX_fullpath, get_str_attr},\n    {\"fid\", -1, get_fid_str},\n    {\"parent_fid\", ATTR_INDEX_parent_id, get_fid_str},\n    {\"ost_pool\", ATTR_INDEX_stripe_info, get_str_attr},\n\n    /* global params */\n    {\"fsname\", -1, get_fsname_param},\n    {\"fsroot\", -1, get_fsroot_param},\n    {\"fspath\", -1, get_fsroot_param},\n    {\"cfg\", -1, get_cfg_param},\n\n    /* end of params */\n    {NULL, -1, NULL}\n};\n\n/** get the std parameter descriptor for the given name */\nstatic const struct param_descr *get_stdarg(const char *name)\n{\n    const struct param_descr *c;\n\n    for (c = &std_params[0]; c->name != NULL; c++) {\n        if (!strcasecmp(c->name, name))\n            return c;\n    }\n    return NULL;\n}\n\n/** argument structure for set_param_mask() callback */\nstruct set_param_mask_args {\n    /** description of the string being parsed */\n    const char *str_descr;\n    /** mask being built (to be returned by params_mask()) */\n    attr_mask_t mask;\n};\n\n/** callback function to generate std params mask */\nstatic int set_param_mask(const char *name, int begin_idx, int end_idx,\n                          void *udata)\n{\n    struct set_param_mask_args *args = udata;\n    const struct param_descr *a;\n\n    if (unlikely(args == NULL))\n        return -EINVAL;\n\n    /* only std parameters have a mask */\n    a = get_stdarg(name);\n    if (a != NULL) {\n        if (a->attr_index != -1)\n            attr_mask_set_index(&args->mask, a->attr_index);\n    }\n\n    /* unknown param have no mask */\n    return 0;\n}\n\nattr_mask_t params_mask(const char *str, const char *str_descr, bool *err)\n{\n    struct set_param_mask_args args = {\n        .mask = {0},\n        .str_descr = str_descr\n    };\n\n    *err = false;\n\n    if (placeholder_foreach(str, str_descr, set_param_mask, (void *)&args, 0)) {\n        *err = true;\n        return null_mask;\n    }\n\n    return args.mask;\n}\n\n/** argument structure for build_cmd() callback */\nstruct build_cmd_args {\n    bool quote;\n\n    /** entry id, attrs, ... */\n    const entry_id_t *id;\n    const attr_set_t *attrs;\n    /** arbitrary parameters */\n    const struct rbh_params *user_params;\n    /** additional parameters */\n    const char **addl_params;\n    /** status manager instance from context */\n    const sm_instance_t *smi;\n    /** description of the string being parsed */\n    const char *str_descr;\n    /** original string passed to subst_cmd_params() */\n    const char *orig_str;\n    /** index following the last processed placeholder in orig_str */\n    int last_idx;\n    /** String being built (to be returned by subst_cmd_params()).\n     * Initially allocated and empty (\"\"). */\n    GString *out_str;\n};\n\nchar *quote_shell_arg(const char *arg)\n{\n    const char *replace_with = \"'\\\\''\";\n    char *arg_walk, *quoted, *quoted_walk;\n    int count = 0;\n\n    arg_walk = (char *)arg;\n    while (*arg_walk) {\n        if (*arg_walk == '\\'') {\n            ++count;\n            if (count < 0) {\n                /* It's unlikely given our input, but avoid integer overflow. */\n                return NULL;\n            }\n        }\n        ++arg_walk;\n    }\n\n    quoted = (char *)calloc(1, strlen(arg) +\n                            (count * strlen(replace_with)) + 2 + 1);\n    if (!quoted)\n        return NULL;\n\n    quoted_walk = quoted;\n    *quoted_walk = '\\'';\n    ++quoted_walk;\n\n    arg_walk = (char *)arg;\n    while (*arg_walk) {\n        if (*arg_walk == '\\'') {\n            strcat(quoted_walk, replace_with);\n            quoted_walk += strlen(replace_with);\n        } else {\n            *quoted_walk = *arg_walk;\n            ++quoted_walk;\n        }\n        ++arg_walk;\n    }\n\n    *quoted_walk = '\\'';\n    ++quoted_walk;\n    *quoted_walk = '\\0';\n\n    return quoted;\n}\n\n/** callback function to build a command by replacing placeholders. */\nstatic int build_cmd(const char *name, int begin_idx, int end_idx, void *udata)\n{\n    struct build_cmd_args *args = udata;\n    const char *val = NULL;\n    char *quoted_arg = NULL;\n    bool free_val = false;\n    int rc;\n\n    if (unlikely(args == NULL))\n        return -EINVAL;\n\n    /* append from last position to current position as is */\n    if (begin_idx > args->last_idx)\n        g_string_append_len(args->out_str, args->orig_str + args->last_idx,\n                            begin_idx - args->last_idx);\n\n    /* get value for the current parameter */\n\n    /* 1) search in user parameters */\n    if (val == NULL && args->user_params != NULL)\n        val = rbh_param_get(args->user_params, name);\n\n    /* 2) search in std parameters */\n    if (val == NULL) {\n        const struct param_descr *a = get_stdarg(name);\n\n        if (a != NULL) {\n            val = a->get_func(args->id, args->attrs, a->attr_index, &free_val);\n            if (val == NULL)\n                return -ENOENT;\n        }\n    }\n\n    /* 3) search in additional parameters */\n    if (val == NULL && args->addl_params != NULL) {\n        const char **cp;\n\n        for (cp = &args->addl_params[0]; *cp != NULL; cp += 2) {\n            if (!strcasecmp(cp[0], name))\n                val = (char *)cp[1];\n        }\n    }\n\n    /* 4) search in policy-specific parameters (status, specific info...) */\n    if (val == NULL) {\n        void *pval;\n        const sm_info_def_t *def;\n        unsigned int idx;\n\n        rc = sm_attr_get(args->smi, args->attrs, name, &pval, &def, &idx);\n        if (rc == 0) {\n            GString *gs = g_string_new(\"\");\n\n            ListMgr_PrintAttrPtr(gs, def->db_type, pval, \"\");\n            free_val = true;\n            val = g_string_free(gs, FALSE);\n        } else if (rc == -ENODATA) {\n            /* parameter exists but is not set.\n             * No previous value was found, use empty string instead.\n             */\n            val = \"\";\n        }\n    }\n\n    if (val == NULL) {\n        /* not found */\n        DisplayLog(LVL_CRIT, PARAMS_TAG,\n                   \"ERROR: unexpected variable '%s' in %s\", name,\n                   args->str_descr);\n        return -EINVAL;\n    }\n\n    if (args->quote) {\n        /* quote the value and append it to command line */\n        quoted_arg = quote_shell_arg(val);\n        if (!quoted_arg) {\n            rc = -ENOMEM;\n            goto out_free;\n        }\n\n        g_string_append(args->out_str, quoted_arg);\n    } else\n        g_string_append(args->out_str, val);\n\n    args->last_idx = end_idx + 1;\n    rc = 0;\n\n out_free:\n    free(quoted_arg);\n    if (free_val)\n        free((char *)val);\n    return rc;\n}\n\nchar *subst_params(const char *str_in,\n                   const char *str_descr,\n                   const entry_id_t *p_id,\n                   const attr_set_t *p_attrs,\n                   const action_params_t *params,\n                   const char **subst_array,\n                   const struct sm_instance *smi,\n                   bool quote, bool strict_braces)\n{\n    struct build_cmd_args args = {\n        .quote = quote,\n        .id = p_id,\n        .attrs = p_attrs,\n        .user_params = params,\n        .addl_params = subst_array,\n        .smi = smi,\n        .str_descr = str_descr,\n        .orig_str = str_in,\n        .last_idx = 0,\n        .out_str = NULL,\n    };\n    char *ret;\n\n    args.out_str = g_string_new(\"\");\n\n    if (!args.out_str || !args.str_descr)\n        goto err_free;\n\n    if (placeholder_foreach(str_in, args.str_descr, build_cmd, (void *)&args,\n                            PH_ALLOW_EMPTY | (strict_braces ? PH_STRICT_BRACES :\n                                              0)))\n        goto err_free;\n\n    /* append the end of the string */\n    if (args.last_idx < strlen(str_in))\n        g_string_append(args.out_str, str_in + args.last_idx);\n\n    /* don't release the string itself (freed by the caller) */\n    ret = g_string_free(args.out_str, FALSE);\n\n    DisplayLog(LVL_FULL, PARAMS_TAG, \"'%s'->'%s' in %s\", str_in, ret,\n               str_descr);\n    return ret;\n\n err_free:\n    if (args.out_str)\n        g_string_free(args.out_str, TRUE);\n    return NULL;\n}\n\n/*\n * Ideally would want cmd_in to be char const * const *, but\n * implicit casts don't work well with these in standard C\n */\nint subst_shell_params(char **cmd_in,\n                       const char *str_descr,\n                       const entry_id_t *p_id,\n                       const attr_set_t *p_attrs,\n                       const action_params_t *params,\n                       const char **subst_array,\n                       const struct sm_instance *smi,\n                       bool strict_braces, char ***cmd_out)\n{\n    int i;\n    char **out_av;\n    int ac;\n\n    if (!cmd_in || !cmd_in[0] || !str_descr)\n        return -EINVAL;\n\n    /* count ac once to allocate properly */\n    for (ac = 0; cmd_in[ac]; ac++)\n        ;\n\n    /* allocate out_av, NULL terminated char array */\n    out_av = calloc(sizeof(*out_av), ac + 1);\n    for (i = 0; i < ac; i++) {\n        out_av[i] = subst_params(cmd_in[i], str_descr, p_id, p_attrs,\n                                 params, subst_array, smi, false,\n                                 strict_braces);\n        if (out_av[i] == NULL)\n            goto err_free;\n\n        DisplayLog(LVL_FULL, PARAMS_TAG, \"[%d] '%s'->'%s' in %s\", i,\n                   cmd_in[i], out_av[i], str_descr);\n    }\n\n    *cmd_out = out_av;\n\n    /* don't release out_av (freed by the caller) */\n    return 0;\n\n err_free:\n    g_strfreev(out_av);\n    /* only EINVAL for now, might expand that if we explode subst_params */\n    return -EINVAL;\n}\n\nchar *concat_cmd(char **cmd)\n{\n    GString *built_command;\n    char *out_str;\n    int i;\n\n    if (!cmd || !cmd[0])\n        return NULL;\n\n    built_command = g_string_new(cmd[0]);\n    for (i = 1; cmd[i]; i++) {\n        g_string_append_c(built_command, ' ');\n        g_string_append(built_command, cmd[i]);\n    }\n\n    out_str = g_string_free(built_command, FALSE);\n\n    return out_str;\n}\n\n/* use compare_generic ? */\nint compare_cmd(char **c1, char **c2)\n{\n    int rc;\n\n    /* strcmp does not perform argument sanity check */\n    while (c1 && *c1 && c2 && *c2) {\n        rc = strcmp(*c1, *c2);\n        if (rc)\n            return rc;\n        /* got to next element in array */\n        c1++;\n        c2++;\n    }\n\n    /* only c1 left, it's bigger */\n    if (c1 && *c1)\n        return 1;\n\n    if (c2 && *c2)\n        return -1;\n\n    return 0;\n}\n"
  },
  {
    "path": "src/common/queue.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Module for handling queue of items with feedback management.\n * The algorithm is based on a cyclic queue.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"queue.h\"\n#include \"rbh_logs.h\"\n#include \"Memory.h\"\n#include \"rbh_misc.h\"\n\n#include <pthread.h>\n\n#define QUEUE_TAG \"Queue\"\n\n/* cyclic queue utilities */\n#define is_empty(pq) ((pq)->first_index == (pq)->last_index)\n#define is_full(pq)  ((pq)->first_index == \\\n                      ((pq)->last_index + 1) % (pq)->array_size)\n#define nb_items(pq) (((pq)->last_index - (pq)->first_index) % (pq)->array_size)\n\n#define lockq(pq) pthread_mutex_lock(&((pq)->queue_lock))\n#define unlockq(pq) pthread_mutex_unlock(&((pq)->queue_lock))\n\n/**\n * Initialize a queue.\n */\nint CreateQueue(entry_queue_t *p_queue, unsigned int queue_size,\n                unsigned int max_status, unsigned int feedback_count)\n{\n    int rc;\n\n    if (!p_queue)\n        return EFAULT;\n\n    /* number of slots that can be used */\n    p_queue->queue_size = queue_size;\n\n    /* array must be 1 slot larger than queue_size because 1 slot is lost\n     * in cyclic queue management */\n    p_queue->array_size = queue_size + 1;\n\n    p_queue->first_index = 0;\n    p_queue->last_index = 0;\n\n    /* allocates array of entries and stats */\n    p_queue->queue = MemCalloc(p_queue->array_size, sizeof(void *));\n    if (p_queue->queue == NULL)\n        return ENOMEM;\n\n    p_queue->status_count = max_status + 1;\n    p_queue->status_array = MemCalloc(max_status + 1, sizeof(unsigned int));\n    if (p_queue->status_array == NULL)\n        return ENOMEM;\n\n    p_queue->feedback_count = feedback_count;\n    p_queue->feedback_array =\n        MemCalloc(feedback_count, sizeof(unsigned long long));\n    if (p_queue->feedback_array == NULL)\n        return ENOMEM;\n\n    /* init locks */\n    pthread_mutex_init(&p_queue->queue_lock, NULL);\n\n    rc = sem_init(&p_queue->sem_empty, 0, queue_size);\n    if (rc)\n        return rc;\n\n    rc = sem_init(&p_queue->sem_full, 0, 0);\n    if (rc)\n        return rc;\n\n    /* init stats */\n    p_queue->last_submitted = 0;\n    p_queue->last_unqueued = 0;\n    p_queue->last_ack = 0;\n    p_queue->nb_thr_waiting = 0;\n\n    return 0;\n}\n\n/**\n * Reset status info\n */\nvoid Reset_StatusCount(entry_queue_t *p_queue)\n{\n    unsigned int i;\n\n    lockq(p_queue);\n\n    for (i = 0; i < p_queue->status_count; i++)\n        p_queue->status_array[i] = 0;\n\n    unlockq(p_queue);\n}\n\n/**\n * Reset feedback info at given index\n */\nvoid Reset_Feedback(entry_queue_t *p_queue, unsigned int feedback_index)\n{\n    if (feedback_index >= p_queue->feedback_count) {\n        DisplayLog(LVL_CRIT, QUEUE_TAG,\n                   \"Error: feedback_index overflow (feedback_index=%u, max=%u)\",\n                   feedback_index, p_queue->feedback_count - 1);\n        return;\n    }\n\n    lockq(p_queue);\n\n    p_queue->feedback_array[feedback_index] = 0;\n\n    unlockq(p_queue);\n\n}\n\n/**\n * Insert an entry to the queue.\n * Can be blocking if the queue is full.\n */\nint Queue_Insert(entry_queue_t *p_queue, void *entry)\n{\n\n    if (p_queue == NULL)\n        return EFAULT;\n\n    sem_wait_safe(&p_queue->sem_empty); /* wait for free places */\n\n    lockq(p_queue); /* enter into the critical section */\n\n    /* The queue should not be full */\n    if (is_full(p_queue)) {\n\n        unlockq(p_queue);\n        DisplayLog(LVL_CRIT, QUEUE_TAG,\n                   \"UNEXPECTED ERROR: queue should not be full!\");\n        return EFAULT;\n    } else {\n\n        /* Inserts data into the queue */\n        p_queue->queue[p_queue->last_index] = entry;\n        p_queue->last_index = (p_queue->last_index + 1) % p_queue->array_size;\n\n    }\n\n    p_queue->last_submitted = time(NULL);\n\n    unlockq(p_queue);\n\n    sem_post_safe(&p_queue->sem_full);  /* increase filled places */\n\n    return 0;\n\n}\n\n/**\n * Get an entry from the queue.\n * The call is blocking until there is an element available\n * in the queue.\n */\nint Queue_Get(entry_queue_t *p_queue, void **p_ptr)\n{\n    lockq(p_queue);\n    p_queue->nb_thr_waiting++;\n    unlockq(p_queue);\n\n    sem_wait_safe(&p_queue->sem_full);  /* wait for filled places */\n\n    lockq(p_queue); /* enters into the critical section */\n\n    p_queue->nb_thr_waiting--;\n\n    /* The queue should not be empty */\n    if (is_empty(p_queue)) {\n\n        unlockq(p_queue);\n        DisplayLog(LVL_CRIT, QUEUE_TAG,\n                   \"UNEXPECTED ERROR: queue should not be empty!\");\n        return EFAULT;\n    } else {\n        /* retrieves data into the queue */\n        *p_ptr = p_queue->queue[p_queue->first_index];\n        p_queue->first_index = (p_queue->first_index + 1) % p_queue->array_size;\n    }\n\n    p_queue->last_unqueued = time(NULL);\n\n    unlockq(p_queue);\n\n    sem_post_safe(&p_queue->sem_empty); /* increase free places */\n\n    return 0;\n\n}\n\n/**\n * Acknwoledge when an entry has been handled.\n * Indicates the status and optionnal feedback info\n * (as unsigned long long array).\n * (to be called by the worker thread)\n */\nvoid Queue_Acknowledge(entry_queue_t *p_queue, unsigned int status,\n                       unsigned long long *feedback_array,\n                       unsigned int feedback_count)\n{\n    unsigned int i;\n\n    lockq(p_queue);\n\n    if (status >= p_queue->status_count)\n        DisplayLog(LVL_CRIT, QUEUE_TAG,\n                   \"ERROR: status overflow (status=%u, max=%u)\", status,\n                   p_queue->status_count - 1);\n    else\n        p_queue->status_array[status]++;\n\n    if (feedback_count > p_queue->feedback_count)\n        DisplayLog(LVL_CRIT, QUEUE_TAG,\n                   \"ERROR: feedback_array overflow (feedback_count=%u, max=%u)\",\n                   feedback_count, p_queue->feedback_count);\n\n    for (i = 0; i < MIN2(feedback_count, p_queue->feedback_count); i++)\n        p_queue->feedback_array[i] += feedback_array[i];\n\n    p_queue->last_ack = time(NULL);\n\n    unlockq(p_queue);\n}\n\nvoid RetrieveQueueStats(entry_queue_t *p_queue, unsigned int *p_nb_thr_wait,\n                        unsigned int *p_nb_items, time_t *p_last_submitted,\n                        time_t *p_last_unqueued, time_t *p_last_ack,\n                        unsigned int *status_array,\n                        unsigned long long *feedback_array)\n{\n    unsigned int i;\n\n    lockq(p_queue);\n\n    if (p_nb_thr_wait)\n        *p_nb_thr_wait = p_queue->nb_thr_waiting;\n    if (p_nb_items)\n        *p_nb_items = nb_items(p_queue);\n    if (p_last_submitted)\n        *p_last_submitted = p_queue->last_submitted;\n    if (p_last_unqueued)\n        *p_last_unqueued = p_queue->last_unqueued;\n    if (p_last_ack)\n        *p_last_ack = p_queue->last_ack;\n\n    if (status_array)\n        for (i = 0; i < p_queue->status_count; i++)\n            status_array[i] = p_queue->status_array[i];\n\n    if (feedback_array)\n        for (i = 0; i < p_queue->feedback_count; i++)\n            feedback_array[i] = p_queue->feedback_array[i];\n\n    unlockq(p_queue);\n}\n"
  },
  {
    "path": "src/common/rbh_cmd.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file  rbh_cmd.h\n * \\brief External command execution.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n\n#include <assert.h>\n#include <unistd.h>\n\n#define TAG \"ExecCmd\"\n\n/**\n * When executing an external processes, two I/O channels are open on its\n * stdout / stderr streams.  Every time a line is read from these channels\n * we call a user-provided function back.\n */\nstruct io_chan_arg {\n    int         ident;\n    parse_cb_t  cb;\n    void       *udata;\n    struct exec_ctx *exec_ctx;\n};\n\n/**\n * GMainLoop exposes a refcount but it is not related to running and stopping\n * the loop. Because we can have several users of the loop (child process\n * termination watcher, stdout watcher, stderr watcher), we need to wait for\n * all of them to complete before calling g_main_loop_quit(). Use custom\n * reference counting for this purpose.\n */\nstruct exec_ctx {\n    GMainLoop    *loop;\n    GMainContext *gctx;\n    int           ref;\n    int           rc;\n};\n\nstatic inline void ctx_incref(struct exec_ctx *ctx)\n{\n    assert(ctx->ref >= 0);\n    ctx->ref++;\n}\n\nstatic inline void ctx_decref(struct exec_ctx *ctx)\n{\n    assert(ctx->ref > 0);\n    if (--ctx->ref == 0)\n        g_main_loop_quit(ctx->loop);\n}\n\n/** convert process return code to errno-like value */\nstatic int child_status2errno(int status, const char **msg)\n{\n    int rc;\n\n    if (WIFEXITED(status)) {\n        rc = WEXITSTATUS(status);\n        /* handle shell special return values */\n        switch (rc) {\n        case 0:\n            *msg = \"no error\";\n            return 0;\n        case 126:\n            *msg = \"permissions problem or command is not an executable\";\n            return -EPERM;\n        case 127:\n            *msg = \"command not found\";\n            return -ENOENT;\n        case 128:\n            *msg = \"invalid argument to exit\";\n            return -EINVAL;\n        default:\n            *msg = \"non-zero exit status\";\n            /* return code to caller as-is */\n            return rc;\n        }\n    }\n\n    if (WIFSIGNALED(status)) {\n        *msg = \"command terminated by signal\";\n        return -EINTR;\n    }\n\n    *msg = \"unexpected error\";\n    return -EIO;\n}\n\n/**\n * External process termination handler.\n */\nstatic void watch_child_cb(GPid pid, gint status, gpointer data)\n{\n    struct exec_ctx *ctx = data;\n    const char      *err = \"\";\n\n    DisplayLog(LVL_DEBUG, TAG, \"Child %d terminated with %d\", pid, status);\n\n    if (status != 0) {\n        ctx->rc = child_status2errno(status, &err);\n        DisplayLog(LVL_DEBUG, TAG, \"Command failed (%d): %s\", ctx->rc, err);\n    }\n\n    g_spawn_close_pid(pid);\n    ctx_decref(ctx);\n}\n\n/**\n * IO channel watcher.\n * Read one line from the current channel and forward it to the user function.\n *\n * Return true as long as the channel has to stay registered, false otherwise.\n */\nstatic gboolean readline_cb(GIOChannel *channel, GIOCondition cond,\n                            gpointer ud)\n{\n    struct io_chan_arg  *args = ud;\n    GError              *error = NULL;\n    gchar               *line;\n    gsize                size;\n    GIOStatus            res;\n\n    /* The channel is closed, no more data to read */\n    if (cond == G_IO_HUP) {\n        g_io_channel_unref(channel);\n        ctx_decref(args->exec_ctx);\n        return false;\n    }\n\n    res = g_io_channel_read_line(channel, &line, &size, NULL, &error);\n    if (res != G_IO_STATUS_NORMAL) {\n        DisplayLog(LVL_MAJOR, TAG, \"Cannot read from child: %s\",\n                   error->message);\n        g_error_free(error);\n        g_io_channel_unref(channel);\n        ctx_decref(args->exec_ctx);\n        return false;\n    }\n\n    if (args->cb != NULL)\n        args->cb(args->udata, line, size, args->ident);\n    g_free(line);\n    return true;\n}\n\n/**\n * Wrapper to set io channel encoding to NULL\n */\nstatic int iochan_null_enc(GIOChannel *chan)\n{\n    GError *err_desc = NULL;\n    int rc = 0;\n\n    if (g_io_channel_set_encoding(chan, NULL, &err_desc)\n            != G_IO_STATUS_NORMAL) {\n/* G_CONVERT_ERROR_NO_MEMORY exists since glib 2.40 */\n#if GLIB_CHECK_VERSION(2,40,0)\n        if (err_desc->code == G_CONVERT_ERROR_NO_MEMORY)\n            rc = -ENOMEM;\n        else\n#endif\n            rc = -EINVAL;\n\n        DisplayLog(LVL_MAJOR, TAG, \"Could not set channel encoding: %s\",\n                   err_desc->message);\n        g_error_free(err_desc);\n    }\n\n    return rc;\n}\n\n/**\n * g_child_watch_add will bind the source to the \"main\" main context,\n * g_main_context_get_default(), which is not what we want\n */\nstatic int g_child_watch_add_tothread(GPid pid,\n                                      GChildWatchFunc function, gpointer data)\n{\n    GSource *source;\n    guint id;\n\n    g_return_val_if_fail(function != NULL, 0);\n    g_return_val_if_fail(pid > 0, 0);\n\n    source = g_child_watch_source_new(pid);\n\n    g_source_set_callback(source, (GSourceFunc) function, data, NULL);\n    id = g_source_attach(source, g_main_context_get_thread_default());\n    g_source_unref(source);\n\n    return id;\n}\n\nstatic int g_io_add_watch_tothread(GIOChannel *channel,\n                                   GIOCondition condition,\n                                   GIOFunc func, gpointer user_data)\n{\n    GSource *source;\n    guint id;\n\n    g_return_val_if_fail(channel != NULL, 0);\n\n    source = g_io_create_watch(channel, condition);\n\n    g_source_set_callback(source, (GSourceFunc) func, user_data, NULL);\n\n    id = g_source_attach(source, g_main_context_get_thread_default());\n    g_source_unref(source);\n\n    return id;\n}\n\n/**\n * Execute synchronously an external command, read its output and invoke\n * a user-provided filter function on every line of it.\n */\nint execute_shell_command(char **cmd, parse_cb_t cb_func, void *cb_arg)\n{\n    struct exec_ctx     ctx = { 0 };\n    GPid                pid;\n    GError             *err_desc = NULL;\n    GSpawnFlags         flags = G_SPAWN_SEARCH_PATH | G_SPAWN_DO_NOT_REAP_CHILD;\n    GIOChannel         *out_chan = NULL;\n    GIOChannel         *err_chan = NULL;\n    struct io_chan_arg  out_args;\n    struct io_chan_arg  err_args;\n    char               *log_cmd;\n    int                 p_stdout;\n    int                 p_stderr;\n    bool                success;\n    int                 rc = 0;\n\n    ctx.gctx = g_main_context_new();\n    g_main_context_push_thread_default(ctx.gctx);\n    ctx.loop = g_main_loop_new(ctx.gctx, false);\n    ctx.ref = 0;\n    ctx.rc = 0;\n\n    DisplayLog(LVL_DEBUG, TAG, \"Spawning external command \\\"%s\\\"\", cmd[0]);\n\n    success = g_spawn_async_with_pipes(NULL,    /* Working dir */\n                                       cmd, /* Parameters */\n                                       NULL,    /* Environment */\n                                       flags,   /* Execution directives */\n                                       NULL,    /* Child setup function */\n                                       NULL,    /* Child setup arg */\n                                       &pid,    /* Child PID */\n                                       NULL,    /* STDIN (unused) */\n                                       cb_func ? &p_stdout : NULL,  /* STDOUT */\n                                       cb_func ? &p_stderr : NULL,  /* STDERR */\n                                       &err_desc);\n    if (!success) {\n        rc = -ECHILD;\n        log_cmd = concat_cmd(cmd);\n        DisplayLog(LVL_MAJOR, TAG, \"Failed to execute \\\"%s\\\": %s\",\n                   log_cmd, err_desc->message);\n        free(log_cmd);\n        goto out_free;\n    }\n\n    /* register a watcher in the loop, thus increase refcount of our exec_ctx */\n    ctx_incref(&ctx);\n    g_child_watch_add_tothread(pid, watch_child_cb, &ctx);\n\n    if (cb_func != NULL) {\n        out_args.ident    = STDOUT_FILENO;\n        out_args.cb       = cb_func;\n        out_args.udata    = cb_arg;\n        out_args.exec_ctx = &ctx;\n        err_args.ident    = STDERR_FILENO;\n        err_args.cb       = cb_func;\n        err_args.udata    = cb_arg;\n        err_args.exec_ctx = &ctx;\n\n        out_chan = g_io_channel_unix_new(p_stdout);\n        err_chan = g_io_channel_unix_new(p_stderr);\n\n        /* instruct the refcount system to close the channels when unused */\n        g_io_channel_set_close_on_unref(out_chan, true);\n        g_io_channel_set_close_on_unref(err_chan, true);\n\n        if ((rc = iochan_null_enc(out_chan)) ||\n            (rc = iochan_null_enc(err_chan)))\n            goto out_free;\n\n        /* update refcount for the two watchers */\n        ctx_incref(&ctx);\n        ctx_incref(&ctx);\n\n        g_io_add_watch_tothread(out_chan, G_IO_IN | G_IO_HUP,\n                                readline_cb, &out_args);\n        g_io_add_watch_tothread(err_chan, G_IO_IN | G_IO_HUP,\n                                readline_cb, &err_args);\n    }\n\n    g_main_loop_run(ctx.loop);\n\n out_free:\n    g_main_loop_unref(ctx.loop);\n    g_main_context_pop_thread_default(ctx.gctx);\n    g_main_context_unref(ctx.gctx);\n\n    if (err_desc)\n        g_error_free(err_desc);\n\n    return rc ? rc : ctx.rc;\n}\n\n/**\n * Template callback to redirect stderr to robinhood log\n * @param arg (void*)log_level.\n */\nint cb_stderr_to_log(void *arg, char *line, size_t size, int stream)\n{\n    log_level lvl = (log_level) arg;\n    int       len;\n\n    if (line == NULL)\n        return -EINVAL;\n\n    /* only log 'stderr' */\n    if (stream != STDERR_FILENO)\n        return 0;\n\n    if (log_config.debug_level < lvl)\n        return 0;\n\n    len = strnlen(line, size);\n    /* terminate the string */\n    if (len >= size)\n        line[len - 1] = '\\0';\n\n    /* remove '\\n' */\n    if ((len > 0) && (line[len - 1] == '\\n'))\n        line[len - 1] = '\\0';\n\n    DisplayLogFn(lvl, TAG, \"%s\", line);\n    return 0;\n}\n"
  },
  {
    "path": "src/common/rbh_logs.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n *  Robinhood logs management.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n\n#include <stdio.h>\n#include <pthread.h>\n#include <sys/param.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <sys/utsname.h>\n#include <string.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <ctype.h>\n#include <glib.h>\n\n#define SYSLOG_NAMES    /* to get the array of syslog facilities */\n#include <syslog.h>\n\n/* test that log file exists every 5min (compliency with log rotation) */\n#define TIME_TEST_FILE     300\n\n/* flush log buffer every 30s */\n#define TIME_FLUSH_LOG      30\n\n/* maximum log line size */\n#define MAX_LINE_LEN      2048\n/* maximum mail content size */\n#define MAX_MAIL_LEN      4096\n\nstatic bool log_initialized = false;\n\nlog_config_t log_config = {\n    .debug_level = LVL_EVENT,   /* used for non-initialized logging */\n    .syslog_facility = LOG_LOCAL1,\n    .syslog_priority = LOG_INFO\n};\n\n/* type for log descriptors */\ntypedef struct _log_stream_ {\n    enum {\n        RBH_LOG_DEFAULT,\n        RBH_LOG_REGFILE,\n        RBH_LOG_STDIO,\n        RBH_LOG_SYSLOG\n    } log_type;\n    pthread_rwlock_t  f_lock;  /* to protect the fields below _and logname_ */\n    FILE             *f_log;   /* for regfile and stdio */\n    ino_t             f_ino;   /* for regfile */\n} log_stream_t;\n\n#define RBH_LOG_INITIALIZER { .log_type = RBH_LOG_DEFAULT, \\\n                          .f_lock   = PTHREAD_RWLOCK_INITIALIZER, \\\n                          .f_log    = NULL,        \\\n                          .f_ino    = -1, }\n\n/* log descriptors for each purpose (log, reports, alerts) */\n\nstatic log_stream_t log     = RBH_LOG_INITIALIZER;\nstatic log_stream_t report  = RBH_LOG_INITIALIZER;\nstatic log_stream_t alert   = RBH_LOG_INITIALIZER;\n#ifdef HAVE_CHANGELOGS\nstatic log_stream_t chglogs = RBH_LOG_INITIALIZER;\n#endif\n\n/* syslog info */\nstatic bool syslog_opened = false;\n\n/* Check if the log file has been rotated\n * after a given delay.\n */\nstatic time_t last_time_test = 0;\n\n/* time of last flush */\nstatic time_t last_time_flush_log = 0;\n\n/* mutex for alert list */\nstatic pthread_mutex_t alert_mutex = PTHREAD_MUTEX_INITIALIZER;\n\ntypedef struct alert_type {\n    char           *title;\n    char          **entries;\n    char          **info;\n    unsigned int    count;\n\n    /* estimated size for mail (not perfectly accurate: add margins to be\n     * safe) */\n    unsigned int    estim_size;\n\n    struct alert_type *next;\n} alert_type_t;\n\nalert_type_t *alert_list = NULL;\nbool alert_batching = false;\nunsigned int alert_count = 0;\n\n/* log line headers */\nstatic char prog_name[RBH_PATH_MAX];\nstatic char machine_name[RBH_PATH_MAX];\n\n/* assign an index to each thread (displayed as [pid/thread_nbr] in the log) */\n\n#if !HAVE_PTHREAD_GETSEQUENCE_NP\n/* threads keys */\nstatic pthread_key_t thread_key;\nstatic pthread_once_t once_key = PTHREAD_ONCE_INIT;\nstatic unsigned int next_index = 1;\n\n/* init check */\nstatic inline void log_init_check(void)\n{\n    if (!log_initialized) {\n        fprintf(stderr, \"Log management is not initialized. Aborting.\\n\");\n        exit(1);\n    }\n}\n\n/* Thread context management */\nstatic void init_keys(void)\n{\n    pthread_key_create(&thread_key, NULL);\n}   /* init_keys */\n#endif\n\n/* returns thread index */\nstatic unsigned int GetThreadIndex(void)\n{\n\n#if HAVE_PTHREAD_GETSEQUENCE_NP\n    return pthread_getsequence_np(pthread_self());\n#else\n    unsigned int index;\n\n    /* first, we init the keys if this is the first time */\n    pthread_once(&once_key, init_keys);\n\n    index = (unsigned long)pthread_getspecific(thread_key);\n\n    if (index == 0) {\n        index = next_index++;\n        pthread_setspecific(thread_key, (void *)(unsigned long)index);\n    }\n\n    return index;\n\n#endif\n}\n\n#if _LUSTRE && HAVE_LLAPI_LOG_CALLBACKS\nstatic inline enum llapi_message_level rbh_msg_level_convert(int level)\n{\n    switch (level) {\n    case LVL_CRIT:\n        return LLAPI_MSG_ERROR;\n\n    case LVL_MAJOR:\n        return LLAPI_MSG_WARN;\n\n    case LVL_EVENT:\n        return LLAPI_MSG_NORMAL;\n\n    case LVL_VERB:\n        return LLAPI_MSG_INFO;\n\n    case LVL_DEBUG:\n    case LVL_FULL:\n    default:\n        return LLAPI_MSG_DEBUG;\n    }\n}\n#endif\n\nvoid rbh_adjust_log_level_external(void)\n{\n#if _LUSTRE && HAVE_LLAPI_LOG_CALLBACKS\n    llapi_msg_set_level(rbh_msg_level_convert(log_config.debug_level));\n#endif\n}\n\n/* initialize a single log descriptor */\nstatic int init_log_descr(const char *logname, log_stream_t *p_log)\n{\n    struct stat filestat;\n\n    p_log->f_ino = -1;\n\n    if (!strcasecmp(logname, \"stdout\")) {\n        p_log->log_type = RBH_LOG_STDIO;\n        p_log->f_log = stdout;\n    } else if (!strcasecmp(logname, \"stderr\")) {\n        p_log->log_type = RBH_LOG_STDIO;\n        p_log->f_log = stderr;\n    } else if (!strcasecmp(logname, \"syslog\")) {\n        p_log->log_type = RBH_LOG_SYSLOG;\n        p_log->f_log = NULL;\n\n        /* open syslog once */\n        if (!syslog_opened) {\n            openlog(prog_name, LOG_PID, log_config.syslog_facility);\n            syslog_opened = true;\n        }\n    } else {    /* log to regular file */\n\n        p_log->log_type = RBH_LOG_REGFILE;\n        p_log->f_log = fopen(logname, \"a\");\n\n        if (p_log->f_log == NULL) {\n            fprintf(stderr,\n                    \"Error opening log file %s: %s. Logging to stderr instead.\\n\",\n                    logname, strerror(errno));\n            p_log->log_type = RBH_LOG_STDIO;\n            p_log->f_log = stderr;\n            return 0;   /* do not propagate error as there is a workaround */\n        }\n\n        if (fstat(fileno(p_log->f_log), &filestat) != -1)\n            p_log->f_ino = filestat.st_ino;\n    }\n    return 0;\n}\n\n/* check syslog facility name.\n * keep p_level unchanged if not specified.\n */\nstatic int check_syslog_facility(const char *descriptor, int *p_fac,\n                                 int *p_level)\n{\n    char  descr_cp[256];\n    char *curr;\n    int   i;\n    bool  match;\n\n    rh_strncpy(descr_cp, descriptor, 256);\n    curr = strchr(descr_cp, '.');\n    if (curr != NULL) {\n        curr[0] = '\\0';\n        curr++; /* location of syslog level */\n    }\n\n    match = false;\n    for (i = 0; facilitynames[i].c_name != NULL; i++) {\n        if (!strcasecmp(facilitynames[i].c_name, descr_cp)) {\n            if (p_fac)\n                *p_fac = facilitynames[i].c_val;\n            match = true;\n            break;\n        }\n    }\n    if (!match)\n        return ENOENT;\n\n    if (curr != NULL) {\n        /* now doing the same for priority */\n        match = false;\n        for (i = 0; prioritynames[i].c_name != NULL; i++) {\n            if (!strcasecmp(prioritynames[i].c_name, curr)) {\n                if (p_level)\n                    *p_level = prioritynames[i].c_val;\n                match = true;\n                break;\n            }\n        }\n        if (!match)\n            return ENOENT;\n    }\n\n    return 0;\n}\n\n/* Open log files */\n\nint InitializeLogs(const char *program_name)\n{\n    struct utsname uts;\n    char          *tmp;\n    int            rc;\n\n    /* get node name */\n    if (uname(&uts) == -1)\n        strcpy(machine_name, \"???\");\n    else\n        rh_strncpy(machine_name, uts.nodename, RBH_PATH_MAX);\n\n    /* if the name is the full machine name (node.subnet.domain.ext),\n     * only kief the brief name */\n    if ((tmp = strchr(machine_name, '.')) != NULL)\n        *tmp = '\\0';\n\n    if (program_name == NULL)\n        strcpy(prog_name, \"???\");\n    else\n        rh_strncpy(prog_name, program_name, RBH_PATH_MAX);\n\n    /* open log files */\n    rc = init_log_descr(log_config.log_file, &log);\n    if (rc)\n        return rc;\n\n    rc = init_log_descr(log_config.report_file, &report);\n    if (rc)\n        return rc;\n\n    if (!EMPTY_STRING(log_config.alert_file)) {\n        rc = init_log_descr(log_config.alert_file, &alert);\n        if (rc)\n            return rc;\n    }\n#ifdef HAVE_CHANGELOGS\n    if (!EMPTY_STRING(log_config.changelogs_file)) {\n        rc = init_log_descr(log_config.changelogs_file, &chglogs);\n        if (rc)\n            return rc;\n    }\n#endif\n\n    /* Update log level for external components we get logs from (LLAPI...) */\n    rbh_adjust_log_level_external();\n\n    last_time_test = time(NULL);\n    log_initialized = true;\n\n    return 0;\n\n}   /* InitializeLogs */\n\nint TestDisplayLevel(log_level level)\n{\n    return (log_config.debug_level >= level);\n}\n\n/* flush a single log descriptor */\nstatic void flush_log_descr(log_stream_t *p_log)\n{\n    pthread_rwlock_rdlock(&p_log->f_lock);\n    if ((p_log->log_type == RBH_LOG_STDIO)\n        || (p_log->log_type == RBH_LOG_REGFILE)) {\n        if (p_log->f_log != NULL)\n            fflush(p_log->f_log);\n    }\n    pthread_rwlock_unlock(&p_log->f_lock);\n}\n\n/* Flush logs (for example, at the end of a purge pass or after dumping\n * stats) */\nvoid FlushLogs(void)\n{\n    log_init_check();\n\n    flush_log_descr(&log);\n    flush_log_descr(&report);\n    flush_log_descr(&alert);\n#ifdef HAVE_CHANGELOGS\n    flush_log_descr(&chglogs);\n#endif\n}\n\nstatic void test_log_descr(const char *logname, log_stream_t *p_log)\n{\n    struct stat filestat;\n\n    /* test log rotation only for regular files */\n    if (p_log->log_type != RBH_LOG_REGFILE)\n        return;\n\n    /* If the lock is taken (another thread is doing the check)\n     * just wait for it to be released and safely continue to\n     * log after file may have been closed and switched\n     */\n    if (pthread_rwlock_trywrlock(&p_log->f_lock) != 0) {\n        pthread_rwlock_rdlock(&p_log->f_lock);\n        pthread_rwlock_unlock(&p_log->f_lock);\n        return;\n    }\n\n    if (stat(logname, &filestat) == -1) {\n        if (errno == ENOENT) {\n            /* the file disappeared, or has been renamed: opening a new one */\n            fclose(p_log->f_log);\n            p_log->f_log = fopen(logname, \"a\");\n\n            if (fstat(fileno(p_log->f_log), &filestat) != -1)\n                p_log->f_ino = filestat.st_ino;\n        }\n    } else if (p_log->f_ino != filestat.st_ino) {\n        /* the old log file was renamed, and a new one has been created:\n         * opening it.\n         */\n        fclose(p_log->f_log);\n        p_log->f_log = fopen(logname, \"a\");\n        p_log->f_ino = filestat.st_ino;\n    }\n\n    pthread_rwlock_unlock(&p_log->f_lock);\n}\n\n/* check if log file have been renamed */\n\nstatic void test_file_names(void)\n{\n    log_init_check();\n\n    test_log_descr(log_config.log_file, &log);\n    test_log_descr(log_config.report_file, &report);\n\n    if (!EMPTY_STRING(log_config.alert_file))\n        test_log_descr(log_config.alert_file, &alert);\n\n#ifdef HAVE_CHANGELOGS\n    if (!EMPTY_STRING(log_config.changelogs_file))\n        test_log_descr(log_config.changelogs_file, &chglogs);\n#endif\n}\n\n/* Convert log level to  string.\n * \\return -1 on error.\n */\nlog_level str2debuglevel(char *str)\n{\n    if (!strcasecmp(str, \"CRIT\"))\n        return LVL_CRIT;\n    if (!strcasecmp(str, \"MAJOR\"))\n        return LVL_MAJOR;\n    if (!strcasecmp(str, \"EVENT\"))\n        return LVL_EVENT;\n    if (!strcasecmp(str, \"VERB\"))\n        return LVL_VERB;\n    if (!strcasecmp(str, \"DEBUG\"))\n        return LVL_DEBUG;\n    if (!strcasecmp(str, \"FULL\"))\n        return LVL_FULL;\n    return -1;\n}\n\n/** replace 'non-printable chars with '?' */\nstatic void clean_str(char *str)\n{\n    char *c;\n\n    for (c = str; *c != '\\0'; c++) {\n        if ((*c != '\\n') && (*c != '\\t') && (!isprint(*c)))\n            *c = '?';\n    }\n}\n\nstatic void display_line_log(log_stream_t *p_log, const char *tag,\n                             const char *format, va_list arglist)\n{\n    char          line_log[MAX_LINE_LEN];\n    int           written;\n    time_t        now = time(NULL);\n    unsigned int  th = GetThreadIndex();\n    struct tm     date;\n    int           would_print;\n\n    if (log_initialized) {\n        /* periodically check if log files have been renamed */\n        if (now - last_time_test > TIME_TEST_FILE) {\n            test_file_names();\n            last_time_test = now;\n        }\n    }\n\n    pthread_rwlock_rdlock(&p_log->f_lock);\n    /* if logs are not initalized or the log is a NULL FILE*,\n     * default logging to stderr */\n    if ((!log_initialized) ||\n        ((p_log->log_type != RBH_LOG_SYSLOG) && (p_log->f_log == NULL))) {\n        localtime_r(&now, &date);\n        written =\n            snprintf(line_log, MAX_LINE_LEN,\n                     \"%.4d/%.2d/%.2d %.2d:%.2d:%.2d %s[%lu/%u] %s%s\",\n                     1900 + date.tm_year, date.tm_mon + 1, date.tm_mday,\n                     date.tm_hour, date.tm_min, date.tm_sec,\n                     log_config.log_process ? \"robinhood\" : \"\",\n                     (unsigned long)getpid(), th, tag ? tag : \"\",\n                     tag ? \" | \" : \"\");\n\n        would_print =\n            vsnprintf(line_log + written, MAX_LINE_LEN - written, format,\n                      arglist);\n        clean_str(line_log);\n\n        if (would_print >= MAX_LINE_LEN - written)\n            fprintf(stderr, \"%s... <Line truncated. Original size=%u>\\n\",\n                    line_log, would_print);\n        else\n            fprintf(stderr, \"%s\\n\", line_log);\n    } else if (p_log->log_type == RBH_LOG_SYSLOG) {\n        /* add tag to syslog line */\n        char new_format[MAX_LINE_LEN];\n        if (tag)\n            snprintf(new_format, MAX_LINE_LEN, \"%s | %s\", tag, format);\n        else\n            rh_strncpy(new_format, format, MAX_LINE_LEN);\n\n        vsyslog(log_config.syslog_priority, new_format, arglist);\n    } else {    /* log to a file */\n\n        localtime_r(&now, &date);\n\n        written =\n            snprintf(line_log, MAX_LINE_LEN,\n                     \"%.4d/%.2d/%.2d %.2d:%.2d:%.2d %s%s%s[%lu/%u] %s%s\",\n                     1900 + date.tm_year, date.tm_mon + 1, date.tm_mday,\n                     date.tm_hour, date.tm_min, date.tm_sec,\n                     log_config.log_process ? prog_name : \"\",\n                     log_config.log_host ? \"@\" : \"\",\n                     log_config.log_host ? machine_name : \"\",\n                     (unsigned long)getpid(), th,\n                     tag ? tag : \"\", tag ? \" | \" : \"\");\n\n        would_print =\n            vsnprintf(line_log + written, MAX_LINE_LEN - written, format,\n                      arglist);\n        clean_str(line_log);\n\n        if (p_log->f_log != NULL) {\n            if (would_print >= MAX_LINE_LEN - written)\n                fprintf(p_log->f_log,\n                        \"%s... <Line truncated. Original size=%u>\\n\", line_log,\n                        would_print);\n            else\n                fprintf(p_log->f_log, \"%s\\n\", line_log);\n        }\n    }\n    pthread_rwlock_unlock(&p_log->f_lock);\n}\n\nstatic void display_line_log_(log_stream_t *p_log, const char *tag,\n                              const char *format, ...)\n{\n    va_list args;\n    va_start(args, format);\n    display_line_log(p_log, tag, format, args);\n    va_end(args);\n}\n\n/** Display a message in the log.\n *  If logs are not initialized, write to stderr.\n */\n\nvoid DisplayLogFn(log_level debug_level, const char *tag, const char *format,\n                  ...)\n{\n    va_list args;\n\n    va_start(args, format);\n    vDisplayLogFn(debug_level, tag, format, args);\n    va_end(args);\n}\n\nvoid vDisplayLogFn(log_level debug_level, const char *tag, const char *format,\n                   va_list ap)\n{\n    time_t now = time(NULL);\n\n    if (log_config.debug_level >= debug_level) {\n        display_line_log(&log, tag, format, ap);\n\n        /* test if it's time to flush.\n         * Also flush major errors, to display it immediately. */\n        if ((now - last_time_flush_log) > TIME_FLUSH_LOG\n            || debug_level >= LVL_MAJOR) {\n            flush_log_descr(&log);\n            last_time_flush_log = now;\n        }\n    }\n}\n\n/* Display a message in report file */\n\nvoid DisplayReport(const char *format, ...)\n{\n    va_list args;\n\n    va_start(args, format);\n    display_line_log(&report, NULL, format, args);\n    va_end(args);\n\n    /* always flush reports, because we don't want to lose events */\n    flush_log_descr(&report);\n\n}   /* DisplayReport */\n\n#ifdef HAVE_CHANGELOGS\nvoid DisplayChangelogs(const char *format, ...)\n{\n    va_list args;\n\n    /* Bail out if no file defined */\n    if (EMPTY_STRING(log_config.changelogs_file))\n        return;\n\n    va_start(args, format);\n    display_line_log(&chglogs, NULL, format, args);\n    va_end(args);\n}\n#endif\n\nvoid Alert_StartBatching()\n{\n    /* no batching */\n    if (log_config.batch_alert_max == 1)\n        return;\n\n    P(alert_mutex);\n    alert_batching = true;\n    V(alert_mutex);\n}\n\n/* Flush batched alerts.\n * Must be called under the protection of alert_mutex\n * release mutex ASAP if release_mutex_asap is true,\n * else: don't release it.\n */\nstatic void FlushAlerts(bool release_mutex_asap)\n{\n    alert_type_t   *pcurr;\n    unsigned int    alert_types = 0;\n    unsigned int    mail_size = 0;\n    char           *title;\n    GString        *contents = NULL;\n    time_t          now;\n    struct tm       date;\n\n    /* first list scan, to determine the number of alerts, etc... */\n    for (pcurr = alert_list; pcurr != NULL; pcurr = pcurr->next) {\n        alert_types++;\n        mail_size += pcurr->estim_size;\n    }\n\n    if (alert_count == 0) {\n        if (release_mutex_asap)\n            V(alert_mutex);\n        return;\n    }\n\n    now = time(NULL);\n    localtime_r(&now, &date);\n\n    if (asprintf(&title, \"robinhood alert summary (%s on %s): %u alerts\",\n                 global_config.fs_path, machine_name, alert_count) == -1) {\n        if (release_mutex_asap)\n            V(alert_mutex);\n        DisplayLog(LVL_CRIT, \"LogAlert\",\n                   \"Could not allocate mail title (robinhood alert summary (%s on %s): %u alerts\",\n                   global_config.fs_path, machine_name, alert_count);\n        return;\n    }\n\n    contents = g_string_new(\"\");\n    g_string_printf(contents, \"Date: %.4d/%.2d/%.2d %.2d:%.2d:%.2d\\n\"\n                    \"Program: %s (pid %lu)\\n\"\n                    \"Host: %s\\n\"\n                    \"Filesystem: %s\\n\",\n                    1900 + date.tm_year, date.tm_mon + 1, date.tm_mday,\n                    date.tm_hour, date.tm_min, date.tm_sec, prog_name,\n                    (unsigned long)getpid(), machine_name,\n                    global_config.fs_path);\n\n    g_string_append(contents, \"\\n===== alert summary ====\\n\\n\");\n    g_string_append_printf(contents, \"%u alerts:\\n\", alert_count);\n\n    for (pcurr = alert_list; pcurr != NULL; pcurr = pcurr->next) {\n        g_string_append_printf(contents, \"\\t* %u %s\\n\", pcurr->count,\n                               pcurr->title);\n    }\n\n    for (pcurr = alert_list; pcurr != NULL;) {\n        unsigned int i;\n\n        g_string_append_printf(contents, \"\\n==== alert '%s' ====\\n\\n\",\n                               pcurr->title);\n\n        for (i = 0; i < pcurr->count; i++) {\n            /* print and free */\n            if (pcurr->entries[i]) {\n                g_string_append_printf(contents, \"%s\\n\", pcurr->entries[i]);\n                if (log_config.alert_show_attrs)\n                    g_string_append_printf(contents, \"Entry info:\\n%s\\n\",\n                                           pcurr->info[i]);\n\n                free(pcurr->entries[i]);\n                free(pcurr->info[i]);\n            }\n        }\n        /* free the list of entries */\n        free(pcurr->entries);\n        free(pcurr->info);\n        free(pcurr->title);\n\n        /* set the list to the next item */\n        alert_list = pcurr->next;\n        /* free the item */\n        free(pcurr);\n        /* next item */\n        pcurr = alert_list;\n    }\n\n    /* reset alert count */\n    alert_count = 0;\n\n    /* all alerts has been released, we can put the lock */\n    if (release_mutex_asap)\n        V(alert_mutex);\n\n    /* send the mail and/or write the alert in alert file */\n    if (!EMPTY_STRING(log_config.alert_mail))\n        SendMail(log_config.alert_mail, title, contents->str);\n\n    if (!EMPTY_STRING(log_config.alert_file)) {\n        if (alert.log_type == RBH_LOG_SYSLOG) {\n            /* we need to split the content after each '\\n' */\n            char *curr = contents->str;\n            char *next = NULL;\n            display_line_log_(&alert, \"ALERT\", \"=== ALERT REPORT ===\");\n            do {\n                next = strchr(curr, '\\n');\n                if (next != NULL) {\n                    next[0] = '\\0';\n                    next++;\n                }\n                display_line_log_(&alert, \"ALERT\", curr);\n                curr = next;\n            } while (curr != NULL);\n            display_line_log_(&alert, \"ALERT\", \"=== END OF ALERT REPORT ===\");\n        } else {\n            display_line_log_(&alert, \"ALERT\", \"=== ALERT REPORT ===\\n%s\",\n                              contents->str);\n            display_line_log_(&alert, \"ALERT\", \"=== END OF ALERT REPORT ===\");\n        }\n\n        /* always flush alerts, because we don't want to lose events */\n        flush_log_descr(&alert);\n    }\n\n    free(title);\n    g_string_free(contents, TRUE);\n    /* mutex already released, can go out now */\n\n}   /*  Flush alerts */\n\nstatic void Alert_Add(const char *title, const char *entry, const char *info)\n{\n    alert_type_t  *pcurr;\n    bool           found = false;\n    unsigned int   entrylen = strlen(entry);\n    unsigned int   infolen = strlen(info);\n\n    /* look for an alert with the same title */\n    P(alert_mutex);\n    for (pcurr = alert_list; pcurr != NULL; pcurr = pcurr->next) {\n        if (!strcmp(pcurr->title, title)) {\n            /* OK, found */\n            found = true;\n            break;\n        }\n    }\n\n    /* if not found: add new alert type */\n    if (!found) {\n        pcurr = (alert_type_t *) malloc(sizeof(alert_type_t));\n        if (!pcurr)\n            goto out_unlock;\n\n        pcurr->title = strdup(title);\n        if (!pcurr->title) {\n            free(pcurr);\n            goto out_unlock;\n        }\n        pcurr->estim_size = strlen(title);\n        pcurr->count = 0;\n        pcurr->entries = NULL;\n        pcurr->info = NULL;\n        pcurr->next = alert_list;\n        alert_list = pcurr;\n    }\n\n    /* pcurr now points to the appropriate alert type */\n    pcurr->count++;\n\n    /* total alert count */\n    alert_count++;\n\n    /* realloc manual (3): if ptr is NULL, the call is equivalent to\n     * malloc(size) */\n    pcurr->entries =\n        (char **)realloc(pcurr->entries, pcurr->count * (sizeof(char *)));\n    if (!pcurr->entries) {\n        pcurr->count = 0;\n        goto out_unlock;\n    }\n    pcurr->entries[pcurr->count - 1] = (char *)malloc(entrylen + 2);\n    strcpy(pcurr->entries[pcurr->count - 1], entry);\n    pcurr->estim_size += entrylen;\n\n    pcurr->info =\n        (char **)realloc(pcurr->info, pcurr->count * (sizeof(char *)));\n    if (!pcurr->info) {\n        pcurr->count = 0;\n        goto out_unlock;\n    }\n    pcurr->info[pcurr->count - 1] = (char *)malloc(infolen + 2);\n    strcpy(pcurr->info[pcurr->count - 1], info);\n    pcurr->estim_size += infolen;\n\n    if ((log_config.batch_alert_max > 1) &&\n        (alert_count >= log_config.batch_alert_max)) {\n        /* this also unlocks the mutex as soon as it is possible */\n        FlushAlerts(true);\n        return;\n    }\n\n out_unlock:\n    V(alert_mutex);\n}\n\nvoid Alert_EndBatching()\n{\n    if (alert_batching) {\n        P(alert_mutex);\n        alert_batching = false;\n        /* release the mutex too */\n        FlushAlerts(true);\n    }\n}\n\nvoid RaiseEntryAlert(const char *alert_name,    /* alert name (if set) */\n                     const char *alert_string,  /* alert description */\n                     const char *entry_path,    /* entry path */\n                     const char *entry_info)\n{   /* alert related attributes */\n    const char *title;\n    bool free_title = false;\n\n    /* lockless check (not a big problem if some alerts are sent without\n     * being batched).\n     */\n    if (alert_batching) {\n        if (alert_name && !EMPTY_STRING(alert_name))\n            title = alert_name;\n        else {\n            if (asprintf((char **)&title, \"unnamed alert %s\", alert_string)\n                == -1) {\n                DisplayLog(LVL_CRIT, \"LogAlert\",\n                           \"Could not allocate title for unnamed alert %s\",\n                           alert_string);\n                title = alert_string;\n            } else {\n                free_title = true;\n            }\n        }\n\n        Alert_Add(title, entry_path, entry_info);\n    } else {\n        if (asprintf((char **)&title, \"Robinhood alert (%s on %s): %s\",\n                     global_config.fs_path, machine_name,\n                     (alert_name && !EMPTY_STRING(alert_name) ?\n                      alert_name : \"entry matches alert rule\")) == -1) {\n            DisplayLog(LVL_CRIT, \"LogAlert\",\n                       \"Could not allocate log alert title: Robinhood alert (%s on %s): %s\",\n                       global_config.fs_path, machine_name,\n                       (alert_name && !EMPTY_STRING(alert_name) ?\n                        alert_name : \"entry matches alert rule\"));\n            title = global_config.fs_path;\n        } else {\n            free_title = true;\n        }\n\n        if (log_config.alert_show_attrs)\n            RaiseAlert(title, \"Entry: %s\\nAlert condition: %s\\n\"\n                       \"Entry info:\\n%s\", entry_path, alert_string, entry_info);\n        else\n            RaiseAlert(title, \"Entry: %s\\nAlert condition: %s\\n\",\n                       entry_path, alert_string);\n    }\n\n    if (free_title)\n        free((char *)title);\n}\n\n/* Display a message in alert file */\n\nvoid RaiseAlert(const char *title, const char *format, ...)\n{\n    va_list     args;\n    char        mail[MAX_MAIL_LEN];\n    char       *title2;\n    int         written;\n    time_t      now = time(NULL);\n    struct tm   date;\n\n    log_init_check();\n\n    /* send alert mail, if an address was specified in config file */\n    if (!EMPTY_STRING(log_config.alert_mail)) {\n        localtime_r(&now, &date);\n        written = snprintf(mail, MAX_MAIL_LEN,\n                           \"===== %s =====\\n\"\n                           \"Date: %.4d/%.2d/%.2d %.2d:%.2d:%.2d\\n\"\n                           \"Program: %s (pid %lu)\\n\"\n                           \"Host: %s\\n\"\n                           \"Filesystem: %s\\n\",\n                           title, 1900 + date.tm_year, date.tm_mon + 1,\n                           date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec,\n                           prog_name, (unsigned long)getpid(), machine_name,\n                           global_config.fs_path);\n\n        va_start(args, format);\n        vsnprintf(mail + written, MAX_MAIL_LEN - written, format, args);\n        va_end(args);\n\n        if (asprintf(&title2, \"%s (%s on %s)\", title,\n                     global_config.fs_path, machine_name) == -1) {\n            DisplayLog(LVL_CRIT, \"LogAlert\",\n                       \"Could not allocate alert title: %s (%s on %s)\",\n                       title, global_config.fs_path, machine_name);\n            title2 = global_config.fs_path;\n        }\n        SendMail(log_config.alert_mail, title2, mail);\n\n        if (title2 != global_config.fs_path)\n            free(title2);\n    }\n\n    if (!EMPTY_STRING(log_config.alert_file)) {\n        display_line_log_(&alert, \"ALERT\", \"%s\", title);\n        va_start(args, format);\n        display_line_log(&alert, \"ALERT\", format, args);\n        va_end(args);\n\n        /* always flush alerts, because we don't want to lose events */\n        flush_log_descr(&alert);\n    }\n\n}   /* DisplayAlert */\n\n/* Wait for next stat deadline */\nvoid WaitStatsInterval(void)\n{\n    rh_sleep(log_config.stats_interval > 0 ? log_config.stats_interval : 1);\n}\n\n/* ---------------- Config management routines -------------------- */\n\n#define RBH_LOG_CONFIG_BLOCK \"Log\"\n\nstatic void log_cfg_set_default(void *module_config)\n{\n    log_config_t *conf = (log_config_t *) module_config;\n\n    conf->debug_level = LVL_EVENT;\n    rh_strncpy(conf->log_file, \"/var/log/robinhood.log\", RBH_PATH_MAX);\n    rh_strncpy(conf->report_file, \"/var/log/robinhood_actions.log\",\n               RBH_PATH_MAX);\n\n    rh_strncpy(conf->alert_file, \"/var/log/robinhood_alerts.log\",\n               RBH_PATH_MAX);\n    conf->alert_mail[0] = '\\0';\n\n#ifdef HAVE_CHANGELOGS\n    conf->changelogs_file[0] = '\\0';\n#endif\n\n    conf->syslog_facility = LOG_LOCAL1;\n    conf->syslog_priority = LOG_INFO;\n\n    conf->batch_alert_max = 1;  /* no batching */\n    conf->alert_show_attrs = false;\n\n    conf->stats_interval = 900; /* 15min */\n\n    conf->log_process = 0;\n    conf->log_host = 0;\n}\n\nstatic void log_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, RBH_LOG_CONFIG_BLOCK, NULL);\n    print_line(output, 1, \"debug_level    :   EVENT\");\n    print_line(output, 1, \"log_file       :   \\\"/var/log/robinhood.log\\\"\");\n    print_line(output, 1,\n               \"report_file    :   \\\"/var/log/robinhood_actions.log\\\"\");\n    print_line(output, 1,\n               \"alert_file     :   \\\"/var/log/robinhood_alerts.log\\\"\");\n    print_line(output, 1, \"syslog_facility:   local1.info\");\n    print_line(output, 1, \"stats_interval :   15min\");\n    print_line(output, 1, \"batch_alert_max:   1 (no batching)\");\n    print_line(output, 1, \"alert_show_attrs: no\");\n    print_line(output, 1, \"log_procname: no\");\n    print_line(output, 1, \"log_hostname: no\");\n    print_end_block(output, 0);\n}\n\nstatic void log_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, RBH_LOG_CONFIG_BLOCK, NULL);\n\n    print_line(output, 1, \"# Log verbosity level\");\n    print_line(output, 1,\n               \"# Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\");\n    print_line(output, 1, \"debug_level = EVENT ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Log file\");\n    print_line(output, 1, \"log_file = \\\"/var/log/robinhood.log\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# File for reporting purge events\");\n    print_line(output, 1, \"report_file = \\\"/var/log/robinhood_actions.log\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# set alert_file, alert_mail or both depending on the alert method you wish\");\n    print_line(output, 1, \"alert_file = \\\"/var/log/robinhood_alerts.log\\\" ;\");\n    print_line(output, 1, \"alert_mail = \\\"root@localhost\\\" ;\");\n    fprintf(output, \"\\n\");\n#ifdef HAVE_CHANGELOGS\n    print_line(output, 1, \"# File to dump changelogs into\");\n    print_line(output, 1, \"changelogs_file = \\\"/var/log/robinhood_cl.log\\\" ;\");\n#endif\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Interval for dumping stats (to logfile)\");\n    print_line(output, 1, \"stats_interval = 20min ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# Alert batching (to send a digest instead of 1 alert per file)\");\n    print_line(output, 1,\n               \"# 0: unlimited batch size, 1: no batching (1 alert per file),\");\n    print_line(output, 1, \"# N>1: batch N alerts per digest\");\n    print_line(output, 1, \"batch_alert_max = 5000 ;\");\n    print_line(output, 1,\n               \"# Give the detail of entry attributes for each alert?\");\n    print_line(output, 1, \"alert_show_attrs = no ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# whether the process name appears in the log line\");\n    print_line(output, 1, \"log_procname = yes;\");\n    print_line(output, 1, \"# whether the host name appears in the log line\");\n    print_line(output, 1, \"log_hostname = yes;\");\n    print_end_block(output, 0);\n}\n\nstatic int log_cfg_read(config_file_t config, void *module_config,\n                        char *msg_out)\n{\n    int rc, tmpval;\n    char tmpstr[1024];\n    log_config_t *conf = (log_config_t *) module_config;\n    config_item_t log_block;\n\n    /* all allowed parameters names */\n    static const char * const allowed_params[] = {\n        \"debug_level\", \"log_file\", \"report_file\",\n        \"alert_file\", \"alert_mail\", \"stats_interval\", \"batch_alert_max\",\n        \"alert_show_attrs\", \"syslog_facility\", \"log_procname\", \"log_hostname\",\n#ifdef HAVE_CHANGELOGS\n        \"changelogs_file\",\n#endif\n        NULL\n    };\n\n    /* std parameters */\n    const cfg_param_t cfg_params[] = {\n        {\"log_file\", PT_STRING,\n         PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS | PFLG_STDIO_ALLOWED,\n         conf->log_file, sizeof(conf->log_file)}\n        ,\n        {\"report_file\", PT_STRING,\n         PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS | PFLG_STDIO_ALLOWED,\n         conf->report_file, sizeof(conf->report_file)}\n        ,\n        {\"alert_file\", PT_STRING,\n         PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS | PFLG_STDIO_ALLOWED,\n         conf->alert_file, sizeof(conf->alert_file)}\n        ,\n        {\"alert_mail\", PT_STRING, PFLG_MAIL,\n         conf->alert_mail, sizeof(conf->alert_mail)}\n        ,\n#ifdef HAVE_CHANGELOGS\n        {\"changelogs_file\", PT_STRING,\n         PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS | PFLG_STDIO_ALLOWED,\n         conf->changelogs_file, sizeof(conf->changelogs_file)}\n        ,\n#endif\n        /* TODO add cfg flag: clean if not found */\n        {\"stats_interval\", PT_DURATION, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->stats_interval, 0}\n        ,\n        {\"batch_alert_max\", PT_INT, PFLG_POSITIVE, &conf->batch_alert_max, 0}\n        ,\n        {\"alert_show_attrs\", PT_BOOL, 0, &conf->alert_show_attrs, 0}\n        ,\n        {\"log_procname\", PT_BOOL, 0, &conf->log_process, 0}\n        ,\n        {\"log_hostname\", PT_BOOL, 0, &conf->log_host, 0}\n        ,\n\n        {NULL, 0, 0, NULL, 0}\n    };\n\n    /* get Log block */\n    rc = get_cfg_block(config, RBH_LOG_CONFIG_BLOCK, &log_block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(log_block, RBH_LOG_CONFIG_BLOCK, cfg_params,\n                            msg_out);\n    if (rc)\n        return rc;\n\n    /* read specific parameters */\n    rc = GetStringParam(log_block, RBH_LOG_CONFIG_BLOCK, \"debug_level\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr), NULL,\n                        NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc != ENOENT) {\n        tmpval = str2debuglevel(tmpstr);\n\n        if (tmpval < 0) {\n            sprintf(msg_out,\n                    \"Invalid value for \" RBH_LOG_CONFIG_BLOCK\n                    \"::debug_level: '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected\",\n                    tmpstr);\n            return EINVAL;\n        } else\n            conf->debug_level = tmpval;\n    }\n\n    rc = GetStringParam(log_block, RBH_LOG_CONFIG_BLOCK, \"syslog_facility\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr), NULL,\n                        NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == 0) {\n        rc = check_syslog_facility(tmpstr, &conf->syslog_facility,\n                                   &conf->syslog_priority);\n        if (rc) {\n            sprintf(msg_out,\n                    \"Invalid syslog channel '%s': expected syntax: <facility>[.<priority>]\",\n                    tmpstr);\n            return rc;\n        }\n    }\n\n    CheckUnknownParameters(log_block, RBH_LOG_CONFIG_BLOCK, allowed_params);\n\n    return 0;\n}\n\nstatic int log_cfg_reload(log_config_t *conf)\n{\n    if (conf->debug_level != log_config.debug_level) {\n        if (!log_config.force_debug_level) {\n            DisplayLog(LVL_MAJOR, \"LogConfig\",\n                       RBH_LOG_CONFIG_BLOCK \"::debug_level modified: '%d'->'%d'\",\n                       log_config.debug_level, conf->debug_level);\n            log_config.debug_level = conf->debug_level;\n        } else {\n            DisplayLog(LVL_EVENT, \"LogConfig\", \"Log level is forced by command \"\n                       \"line. Not taking configuration parameter \"\n                       RBH_LOG_CONFIG_BLOCK \"::debug_level into account.\");\n        }\n    }\n\n    /* log files can be changed dynamically: this will just be considered as if\n     * it was renamed */\n    if (strcmp(conf->log_file, log_config.log_file)) {\n        if (!log_config.force_log_file) {\n            DisplayLog(LVL_MAJOR, \"LogConfig\",\n                       RBH_LOG_CONFIG_BLOCK \"::log_file modified: '%s'->'%s'\",\n                       log_config.log_file, conf->log_file);\n\n            /* lock file name to avoid reading inconsistent filenames */\n            pthread_rwlock_wrlock(&log.f_lock);\n            strcpy(log_config.log_file, conf->log_file);\n            pthread_rwlock_wrlock(&log.f_lock);\n        } else {\n            DisplayLog(LVL_EVENT, \"LogConfig\", \"Log file is forced by command \"\n                       \"line. Not taking configuration parameter \"\n                       RBH_LOG_CONFIG_BLOCK \"::log_file into account.\");\n        }\n    }\n\n    if (strcmp(conf->report_file, log_config.report_file)) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK \"::report_file modified: '%s'->'%s'\",\n                   log_config.report_file, conf->report_file);\n\n        /* lock file name to avoid reading inconsistent filenames */\n        pthread_rwlock_wrlock(&report.f_lock);\n        strcpy(log_config.report_file, conf->report_file);\n        pthread_rwlock_unlock(&report.f_lock);\n    }\n\n    if (strcmp(conf->alert_file, log_config.alert_file)) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK \"::alert_file modified: '%s'->'%s'\",\n                   log_config.alert_file, conf->alert_file);\n\n        /* lock file name to avoid reading inconsistent filenames */\n        pthread_rwlock_wrlock(&alert.f_lock);\n        strcpy(log_config.alert_file, conf->alert_file);\n        pthread_rwlock_unlock(&alert.f_lock);\n    }\n\n    if (strcmp(conf->alert_mail, log_config.alert_mail))\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK\n                   \"::alert_mail changed in config file, but cannot be modified dynamically\");\n\n#ifdef HAVE_CHANGELOGS\n    if (strcmp(conf->changelogs_file, log_config.changelogs_file)) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK\n                   \"::changelogs_file modified: '%s'->'%s'\",\n                   log_config.changelogs_file, conf->changelogs_file);\n\n        /* lock file name to avoid reading inconsistent filenames */\n        pthread_rwlock_wrlock(&chglogs.f_lock);\n        strcpy(log_config.changelogs_file, conf->changelogs_file);\n        pthread_rwlock_unlock(&chglogs.f_lock);\n    }\n#endif\n\n    if (conf->stats_interval != log_config.stats_interval) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK \"::stats_interval modified: \"\n                   \"'%\" PRI_TT \"'->'%\" PRI_TT \"'\",\n                   log_config.stats_interval, conf->stats_interval);\n        log_config.stats_interval = conf->stats_interval;\n    }\n\n    if (conf->batch_alert_max != log_config.batch_alert_max) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK\n                   \"::batch_alert_max modified: '%u'->'%u'\",\n                   log_config.batch_alert_max, conf->batch_alert_max);\n\n        /* flush batched alerts first */\n        P(alert_mutex);\n\n        if (alert_batching)\n            /* don't release mutex */\n            FlushAlerts(false);\n\n        log_config.batch_alert_max = conf->batch_alert_max;\n        V(alert_mutex);\n    }\n\n    if (conf->log_process != log_config.log_process) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK \"::log_procname modified: '%s'->'%s'\",\n                   bool2str(log_config.log_process),\n                   bool2str(conf->log_process));\n        log_config.log_process = conf->log_process;\n    }\n\n    if (conf->log_host != log_config.log_host) {\n        DisplayLog(LVL_MAJOR, \"LogConfig\",\n                   RBH_LOG_CONFIG_BLOCK \"::log_hostname modified: '%s'->'%s'\",\n                   bool2str(log_config.log_host), bool2str(conf->log_host));\n        log_config.log_host = conf->log_host;\n    }\n\n    rbh_adjust_log_level_external();\n    return 0;\n}\n\nvoid force_debug_level(log_level level)\n{\n    log_config.debug_level = level;\n    log_config.force_debug_level = true;\n}\n\n/**\n * Force log file.\n * Won't be overridden by configuration.\n */\nvoid force_log_file(const char *file)\n{\n    rh_strncpy(log_config.log_file, file, sizeof(log_config.log_file));\n    log_config.force_log_file = true;\n}\n\n\nstatic int log_cfg_set(void *cfg, bool reload)\n{\n    log_config_t *config = (log_config_t *) cfg;\n    char old_log_file[RBH_PATH_MAX] = \"\";\n    int old_level = -1;\n\n    if (reload)\n        return log_cfg_reload(config);\n\n    /* keep previous values in case log level is forced */\n    if (log_config.force_debug_level)\n        old_level = log_config.debug_level;\n\n    if (log_config.force_log_file)\n        rh_strncpy(old_log_file, log_config.log_file, sizeof(old_log_file));\n\n    log_config = *config;\n\n    /* restore previous values */\n    if (old_level != -1) {\n        log_config.force_debug_level = true;\n        log_config.debug_level = old_level;\n    }\n    if (!EMPTY_STRING(old_log_file)) {\n        log_config.force_log_file = true;\n        rh_strncpy(log_config.log_file, old_log_file,\n                   sizeof(log_config.log_file));\n    }\n\n    return 0;\n}\n\nstatic void *log_cfg_new(void)\n{\n    return calloc(1, sizeof(log_config_t));\n}\n\nstatic void log_cfg_free(void *cfg)\n{\n    if (cfg != NULL)\n        free(cfg);\n}\n\n/* export config functions */\nmod_cfg_funcs_t log_cfg_hdlr = {\n    .module_name = \"logs\",\n    .new = log_cfg_new,\n    .free = log_cfg_free,\n    .set_default = log_cfg_set_default,\n    .read = log_cfg_read,\n    .set_config = log_cfg_set,\n    .write_default = log_cfg_write_default,\n    .write_template = log_cfg_write_template\n};\n"
  },
  {
    "path": "src/common/rbh_misc.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_misc.h\"\n#include \"rbh_cfg.h\"\n#include \"global_config.h\"\n#include \"rbh_logs.h\"\n#include \"xplatform_print.h\"\n#include \"uidgidcache.h\"\n#include \"status_manager.h\"\n\n#include <stdlib.h>\n#include <unistd.h>\n#include <time.h>\n#include <errno.h>\n#include <stdio.h>\n#include <pthread.h>\n#include <libgen.h>\n#include <stdarg.h>\n#include <fnmatch.h>\n#include <sys/types.h>\n#include <utime.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <ctype.h>\n#include <string.h>\n\n#ifndef HAVE_GETMNTENT_R\n#include \"mntent_compat.h\"\n#else\n#include <mntent.h> /* for handling mntent */\n#endif\n\n/* Exa-scale definitions ;-) */\n#define KILO_BYTE  (1024LL)\n#define MEGA_BYTE  (1024LL * KILO_BYTE)\n#define GIGA_BYTE  (1024LL * MEGA_BYTE)\n#define TERA_BYTE  (1024LL * GIGA_BYTE)\n#define PETA_BYTE  (1024LL * TERA_BYTE)\n#define EXA_BYTE   (1024LL * PETA_BYTE)\n\n/* Durations  */\n#define MINUTE   60\n#define HOUR  (60*MINUTE)\n#define DAY   (24*HOUR)\n#define WEEK  (7*DAY)\n#define YEAR  (365*DAY)\n\nvoid Exit(int error_code)\n{\n    DisplayLog(LVL_MAJOR, \"EXIT\", \"Exiting program with code %d\", error_code);\n\n#if 0\n    /* Remove pid_file, if any */\n    if (pid_file != NULL) {\n        if (unlink(pid_file) != 0) {\n            DisplayLog(LVL_CRIT, \"EXIT\",\n                       \"Could not remove pid file %s: %s\", pid_file,\n                       strerror(errno));\n        }\n    }\n#endif\n\n    FlushLogs();\n    exit(error_code);\n}\n\n/* global info about the filesystem to be managed */\nstatic char *mount_point;\nstatic char fsname[RBH_PATH_MAX] = \"\";\nstatic dev_t dev_id = 0;\nstatic uint64_t fs_key = 0;\nstatic entry_id_t root_id;\n\n/* to optimize string concatenation */\nstatic unsigned int mount_len = 0;\n\n#ifdef _HAVE_FID\n#define DOTLUSTRE   \".lustre\"\n#define FIDDIR      \"/.lustre/fid/\"\nstatic char *fid_dir;\nstatic char *dot_lustre_dir;\nstatic entry_id_t dot_lustre_fid;\nstatic entry_id_t fid_fid;\n#endif\n\n/* used at initialization time, to avoid several modules\n * that start in parallel to check it several times.\n */\nstatic pthread_mutex_t mount_point_lock = PTHREAD_MUTEX_INITIALIZER;\n\n#define LAST_32PRIME    0xFFFFFFFB\n#define LAST_64PRIME    0xFFFFFFFFFFFFFFC5\nstatic uint64_t hash_name(const char *str)\n{\n    unsigned int i;\n    uint64_t val = 1;\n\n    for (i = 0; i < strlen(str); i++)\n        val = (val << 5) - val + (unsigned int)(str[i]);\n\n    return val % LAST_32PRIME;\n}\n\nstatic uint64_t fsidto64(fsid_t fsid)\n{\n    uint64_t out;\n    if (sizeof(fsid_t) <= sizeof(uint64_t)) {\n        memset(&out, 0, sizeof(out));\n        memcpy((&out) + (sizeof(out) - sizeof(fsid_t)), &fsid, sizeof(fsid));\n        DisplayLog(LVL_DEBUG, __func__, \"sizeof(fsid)=%lu <= 64bits, \"\n                   \"fsid as 64=%\" PRIX64, sizeof(fsid_t), out);\n        return out;\n    } else {\n        unsigned int i;\n        out = 1;\n        char *str = (char *)(&fsid);\n\n        for (i = 0; i < sizeof(fsid_t); i++)\n            out = (out << 5) - out + (unsigned int)(str[i]);\n\n        out = out % LAST_64PRIME;\n        DisplayLog(LVL_DEBUG, __func__, \"sizeof(fsid)=%lu > 64bits, \"\n                   \"hash64(fsid)=%\" PRIX64, sizeof(fsid_t), out);\n        return out;\n    }\n}\n\n/* this set of functions is for retrieving/checking mount point\n * and fs name (once for all threads):\n */\nstatic int _set_mount_point(char *mntpnt)\n{\n    char path[RBH_PATH_MAX + 100];\n\n    /* don't change mount_path while already running */\n    if (mount_len != 0)\n        return 0;\n\n    strcpy(path, mntpnt);\n\n    /* remove final slash, if any */\n    if ((mount_len > 1) && (path[mount_len - 1] == '/')) {\n        path[mount_len - 1] = '\\0';\n    }\n\n    mount_point = strdup(path);\n    mount_len = strlen(mount_point);\n\n#ifdef _HAVE_FID\n    int rc;\n\n    /* build the .lustre directory */\n    if (asprintf(&dot_lustre_dir, \"%s/%s\", path, DOTLUSTRE) == -1)\n        RBH_BUG(\"Not enough memory to initialize\");\n\n    /* build the .fid directory */\n    if (asprintf(&fid_dir, \"%s%s\", path, FIDDIR) == -1)\n        RBH_BUG(\"Not enough memory to initialize\");\n\n    /* also get their fid */\n    rc = path2id(dot_lustre_dir, &dot_lustre_fid, NULL);\n    if (rc) {\n        DisplayLog(LVL_MAJOR, __func__,\n                   \"Error: failed to get FID for special directory <%s>: %s.\",\n                   dot_lustre_dir, strerror(abs(rc)));\n        return rc;\n    }\n\n    rc = path2id(fid_dir, &fid_fid, NULL);\n    if (rc) {\n        DisplayLog(LVL_MAJOR, __func__,\n                   \"Error: failed to get FID for special directory <%s>: %s.\",\n                   fid_dir, strerror(abs(rc)));\n        return rc;\n    }\n#endif\n    return 0;\n}\n\nstatic int set_fs_info(char *name, char *mountp, dev_t dev, fsid_t fsid)\n{\n    int rc = 0;\n\n    P(mount_point_lock);\n    rc = _set_mount_point(mountp);\n    if (rc)\n        goto out_unlock;\n\n    strcpy(fsname, name);\n    dev_id = dev;\n\n    switch (global_config.fs_key) {\n    case FSKEY_FSNAME:\n        fs_key = hash_name(name);\n        DisplayLog(LVL_DEBUG, \"FSInfo\", \"fs_key: hash(fsname=%s)=%\" PRIX64,\n                   name, fs_key);\n        break;\n\n    case FSKEY_FSID:\n        fs_key = fsidto64(fsid);\n        DisplayLog(LVL_DEBUG, \"FSInfo\", \"fs_key: fsid as 64=%\" PRIX64, fs_key);\n        break;\n\n    case FSKEY_DEVID:\n        fs_key = dev_id;\n        DisplayLog(LVL_DEBUG, \"FSInfo\", \"fs_key: devid=%\" PRIX64, fs_key);\n        break;\n\n    default:\n        DisplayLog(LVL_MAJOR, \"FSInfo\", \"Invalid fs_key type %#x\",\n                   global_config.fs_key);\n        fs_key = 0;\n    }\n\n    /* now, path2id can be called */\n    rc = path2id(global_config.fs_path, &root_id, NULL);\n    if (rc)\n        DisplayLog(LVL_CRIT, \"FSInfo\",\n                   \"Failed to get id for root directory %s: %s\", mountp,\n                   strerror(-rc));\n\n out_unlock:\n    V(mount_point_lock);\n    return rc;\n}\n\n/** retrieve the mount point from any module\n * without final slash.\n */\nconst char *get_mount_point(unsigned int *plen)\n{\n    if (plen)\n        (*plen) = mount_len;\n    return mount_point;\n}\n\n#if _HAVE_FID\n/** Retrieve the .fid directory */\nconst char *get_fid_dir(void)\n{\n    return fid_dir;\n}\n\n/** Retrieve the .lustre directory */\nconst char *get_dot_lustre_dir(void)\n{\n    return dot_lustre_dir;\n}\n\n/** retrieve the fid of \"<root>/.lustre\" directory */\nconst entry_id_t *get_dot_lustre_fid(void)\n{\n    return &dot_lustre_fid;\n}\n\n/** retrieve the fid of \"<root>/.lustre/fid\" directory */\nconst entry_id_t *get_fid_fid(void)\n{\n    return &fid_fid;\n}\n#endif\n\n/* retrieve fsname from any module */\nconst char *get_fsname(void)\n{\n    return fsname;\n}\n\n/* return Filesystem device id  */\ndev_t get_fsdev(void)\n{\n    return dev_id;\n}\n\nuint64_t get_fskey(void)\n{\n    return fs_key;\n}\n\nconst entry_id_t *get_root_id(void)\n{\n    return &root_id;\n}\n\n/**\n * send a mail\n */\nint SendMail(const char *recipient, const char *subject, const char *message)\n{\n    char *buffer;\n    FILE *file;\n\n    if (asprintf(&buffer, \"s-nail -s \\\"%s\\\" %s\", subject, recipient) == -1) {\n        DisplayLog(LVL_CRIT, \"SENDMAIL\",\n                   \"Could not allocate title buffer for \\\"%s\\\"\", subject);\n        return -1;\n    }\n\n    if ((file = popen(buffer, \"w\")) == NULL) {\n        DisplayLog(LVL_CRIT, \"SENDMAIL\",\n                   \"Error %d sending mail with the following command=%s\", errno,\n                   buffer);\n        free(buffer);\n        return -1;\n    }\n\n    fwrite(message, strlen(message), 1, file);\n    free(buffer);\n    pclose(file);\n    return 0;\n}\n\n/**\n * Search for Robinhood config file.\n * search a config file with the given name (+extension)\n * If cfg_in is empty: search any config in config paths\n * /!\\ not thread safe\n */\nint SearchConfig(const char *cfg_in, char *cfg_out, bool *changed,\n                 char *unmatched, size_t max_len)\n{\n    static const char *default_cfg_path = SYSCONFDIR \"/robinhood.d\";\n    DIR *dir;\n    struct dirent *ent;\n    const char *cfg = cfg_in;\n\n    *changed = true;    /* most of the cases */\n\n    if (cfg == NULL || EMPTY_STRING(cfg)) {\n        /* check if a default config file is specified */\n        cfg = getenv(DEFAULT_CFG_VAR);\n    }\n\n    /* set unmatched, for better logging */\n    if (unmatched) {\n        if (cfg)\n            rh_strncpy(unmatched, cfg, max_len);\n        else\n            snprintf(unmatched, max_len, \"%s/*.conf\", default_cfg_path);\n    }\n\n    if (cfg == NULL || EMPTY_STRING(cfg)) {\n        int found = 0;\n\n        /* look for files in default config path */\n        dir = opendir(default_cfg_path);\n        if (dir) {\n            while ((ent = readdir(dir)) != NULL) {\n                /* ignore .xxx files */\n                if (ent->d_name[0] == '.')\n                    continue;\n                if (fnmatch(\"*.conf\", ent->d_name, 0)\n                    && fnmatch(\"*.cfg\", ent->d_name, 0))\n                    /* not a config file */\n                    continue;\n\n                sprintf(cfg_out, \"%s/%s\", default_cfg_path, ent->d_name);\n                if (access(cfg_out, F_OK) == 0) {\n                    /* that file matches. */\n                    found++;\n                    if (found >= 2)\n                        /* No need to continue. */\n                        break;\n                }\n            }\n\n            closedir(dir);\n        }\n\n        if (found == 1) {\n            /* Only one file found. cfg_out is already set. We're\n             * good. */\n            return 0;\n        }\n    } else if (access(cfg, F_OK) == 0) {\n        /* the specified config file exists */\n        if (cfg_out != cfg)\n            rh_strncpy(cfg_out, cfg, max_len);\n        *changed = false;\n        return 0;\n    } else if (strchr(cfg, '/')) {\n        /* the argument is a path (not a single name\n         * and this path was not found) */\n        *changed = false;\n        goto notfound;\n    } else {    /* look for a file in the given paths */\n\n        char cfg_cp[RBH_PATH_MAX] = \"\";\n        int has_ext = (strchr(cfg, '.') != NULL);\n\n        rh_strncpy(cfg_cp, cfg, MIN2(max_len, RBH_PATH_MAX));\n\n        /* if the file already has an extension, try path/name */\n        if (has_ext) {\n            snprintf(cfg_out, max_len, \"%s/%s\", default_cfg_path, cfg_cp);\n            if (access(cfg_out, F_OK) == 0)\n                return 0;\n        }\n\n        /* try path/name.cfg, path/name.conf */\n        snprintf(cfg_out, max_len, \"%s/%s.conf\", default_cfg_path, cfg_cp);\n        if (access(cfg_out, F_OK) == 0)\n            return 0;\n\n        snprintf(cfg_out, max_len, \"%s/%s.cfg\", default_cfg_path, cfg_cp);\n        if (access(cfg_out, F_OK) == 0)\n            return 0;\n    }\n\n notfound:\n    /* no file found, cleaning cfg_out */\n    cfg_out[0] = '\\0';\n    return -ENOENT;\n}\n\nchar *uid2str(uid_t uid, char *username)\n{\n    const struct passwd *p = GetPwUid(uid);\n\n    if (p != NULL)\n        snprintf(username, RBH_LOGIN_MAX, \"%s\", p->pw_name);\n    else\n        snprintf(username, RBH_LOGIN_MAX, \"%d\", (int)uid);\n\n    return username;\n}\n\nchar *gid2str(gid_t gid, char *groupname)\n{\n    const struct group *g = GetGrGid(gid);\n\n    if (g != NULL)\n        snprintf(groupname, RBH_LOGIN_MAX, \"%s\", g->gr_name);\n    else\n        snprintf(groupname, RBH_LOGIN_MAX, \"%d\", (int)gid);\n\n    return groupname;\n}\n\nconst char *mode2type(mode_t mode)\n{\n    if (S_ISREG(mode))\n        return STR_TYPE_FILE;\n    else if (S_ISDIR(mode))\n        return STR_TYPE_DIR;\n    else if (S_ISLNK(mode))\n        return STR_TYPE_LINK;\n    else if (S_ISCHR(mode))\n        return STR_TYPE_CHR;\n    else if (S_ISBLK(mode))\n        return STR_TYPE_BLK;\n    else if (S_ISFIFO(mode))\n        return STR_TYPE_FIFO;\n    else if (S_ISSOCK(mode))\n        return STR_TYPE_SOCK;\n    else\n        return NULL;\n}\n\nvoid stat2rbh_attrs(const struct stat *p_inode, attr_set_t *p_attr_set,\n                    bool size_info)\n{\n    ATTR_MASK_SET(p_attr_set, uid);\n    if (global_config.uid_gid_as_numbers)\n        ATTR(p_attr_set, uid).num = p_inode->st_uid;\n    else\n        uid2str(p_inode->st_uid, ATTR(p_attr_set, uid).txt);\n\n    ATTR_MASK_SET(p_attr_set, gid);\n    if (global_config.uid_gid_as_numbers)\n        ATTR(p_attr_set, gid).num = p_inode->st_gid;\n    else\n        gid2str(p_inode->st_gid, ATTR(p_attr_set, gid).txt);\n\n    if (size_info) {\n        ATTR_MASK_SET(p_attr_set, size);\n        ATTR(p_attr_set, size) = p_inode->st_size;\n\n        ATTR_MASK_SET(p_attr_set, blocks);\n        ATTR(p_attr_set, blocks) = p_inode->st_blocks;\n\n        /* times are also wrong when they come from the MDT device */\n        ATTR_MASK_SET(p_attr_set, last_access);\n\n        /* Vary the setting of last_access depending on value of\n         * global_config.last_access_only_atime */\n        if (global_config.last_access_only_atime)\n            ATTR(p_attr_set, last_access) = p_inode->st_atime;\n        else\n            ATTR(p_attr_set, last_access) =\n                MAX(p_inode->st_atime, p_inode->st_mtime);\n\n        ATTR_MASK_SET(p_attr_set, last_mod);\n        ATTR(p_attr_set, last_mod) = p_inode->st_mtime;\n\n        ATTR_MASK_SET(p_attr_set, last_mdchange);\n        ATTR(p_attr_set, last_mdchange) = p_inode->st_ctime;\n    }\n\n    if (ATTR_MASK_TEST(p_attr_set, creation_time)) {\n        /* creation time is always <= ctime */\n        if (p_inode->st_ctime < ATTR(p_attr_set, creation_time))\n            ATTR(p_attr_set, creation_time) = p_inode->st_ctime;\n    } else {\n        ATTR_MASK_SET(p_attr_set, creation_time);\n        ATTR(p_attr_set, creation_time) = p_inode->st_ctime;\n    }\n\n    const char *type = mode2type(p_inode->st_mode);\n    if (type != NULL) {\n        ATTR_MASK_SET(p_attr_set, type);\n        strcpy(ATTR(p_attr_set, type), type);\n    }\n\n    ATTR_MASK_SET(p_attr_set, nlink);\n    ATTR(p_attr_set, nlink) = p_inode->st_nlink;\n\n    ATTR_MASK_SET(p_attr_set, mode);\n    ATTR(p_attr_set, mode) = p_inode->st_mode & 07777;  /* mode + sticky bits */\n}\n\nvoid rbh_attrs2stat(const attr_set_t *p_attr_set, struct stat *p_inode)\n{\n    char buff[4096];\n\n    if (ATTR_MASK_TEST(p_attr_set, mode))\n        p_inode->st_mode = ATTR(p_attr_set, mode);\n    /* default to 600 for files, 700 for other cases */\n    else if (ATTR_MASK_TEST(p_attr_set, type) &&\n             !strcmp(ATTR(p_attr_set, type), STR_TYPE_FILE))\n        p_inode->st_mode = 0600;\n    else\n        p_inode->st_mode = 0700;\n\n    if (ATTR_MASK_TEST(p_attr_set, nlink))\n        p_inode->st_nlink = ATTR(p_attr_set, nlink);\n    else\n        p_inode->st_nlink = 1;\n\n    /* default to 0 (root) */\n    p_inode->st_uid = 0;\n    if (ATTR_MASK_TEST(p_attr_set, uid)) {\n        struct passwd pw;\n        struct passwd *p_pw;\n\n        if (global_config.uid_gid_as_numbers)\n            p_inode->st_uid = ATTR(p_attr_set, uid).num;\n        else if ((getpwnam_r\n                  (ATTR(p_attr_set, uid).txt, &pw, buff, sizeof(buff),\n                   &p_pw) != 0)\n                 || (p_pw == NULL))\n            DisplayLog(LVL_MAJOR, __func__,\n                       \"Warning: couldn't resolve uid for user '%s'\",\n                       ATTR(p_attr_set, uid).txt);\n        else\n            p_inode->st_uid = p_pw->pw_uid;\n    }\n\n    if (ATTR_MASK_TEST(p_attr_set, gid)) {\n        struct group gr;\n        struct group *p_gr;\n\n        if (global_config.uid_gid_as_numbers)\n            p_inode->st_gid = ATTR(p_attr_set, gid).num;\n        else if ((getgrnam_r\n                  (ATTR(p_attr_set, gid).txt, &gr, buff, sizeof(buff),\n                   &p_gr) != 0)\n                 || (p_gr == NULL))\n            DisplayLog(LVL_MAJOR, __func__,\n                       \"Warning: couldn't resolve gid for group '%s'\",\n                       ATTR(p_attr_set, gid).txt);\n        else\n            p_inode->st_gid = p_gr->gr_gid;\n    }\n\n    if (ATTR_MASK_TEST(p_attr_set, size))\n        p_inode->st_size = ATTR(p_attr_set, size);\n    else\n        p_inode->st_size = 0;\n\n    if (ATTR_MASK_TEST(p_attr_set, last_access))\n        p_inode->st_atime = ATTR(p_attr_set, last_access);\n    else    /* default to current time */\n        p_inode->st_atime = time(NULL);\n\n    if (ATTR_MASK_TEST(p_attr_set, last_mod))\n        p_inode->st_mtime = ATTR(p_attr_set, last_mod);\n    else    /* default to current time */\n        p_inode->st_mtime = time(NULL);\n\n    if (ATTR_MASK_TEST(p_attr_set, last_mdchange))\n        p_inode->st_ctime = ATTR(p_attr_set, last_mdchange);\n    else    /* default to current time */\n        p_inode->st_ctime = time(NULL);\n}\n\n#ifndef HAVE_GETMNTENT_R\n\n/* if getmntent_r() does not exists, define it as a wrapper of getmntent().\n * use a lock to ensure thread-safety.\n */\nstatic pthread_mutex_t mntent_lock = PTHREAD_MUTEX_INITIALIZER;\n\n/* copy a mntent structure to caller's buffer */\nstatic int copy_mntent(struct mntent *mntout, char *buf, int buflen,\n                       const struct mntent *mntin)\n{\n    char *curr = buf;\n\n    if (!buf || !mntout)\n        return EFAULT;\n\n    if (strlen(mntin->mnt_fsname) + 1\n        + strlen(mntin->mnt_dir) + 1\n        + strlen(mntin->mnt_type) + 1 + strlen(mntin->mnt_opts) + 1 > buflen)\n        return ENOMEM;\n\n    strcpy(curr, mntin->mnt_fsname);\n    mntout->mnt_fsname = curr;\n    curr += strlen(mntin->mnt_fsname) + 1;\n\n    strcpy(curr, mntin->mnt_dir);\n    mntout->mnt_dir = curr;\n    curr += strlen(mntin->mnt_dir) + 1;\n\n    strcpy(curr, mntin->mnt_type);\n    mntout->mnt_type = curr;\n    curr += strlen(mntin->mnt_type) + 1;\n\n    strcpy(curr, mntin->mnt_opts);\n    mntout->mnt_opts = curr;\n    curr += strlen(mntin->mnt_opts) + 1;\n\n    mntout->mnt_freq = mntin->mnt_freq;\n    mntout->mnt_passno = mntin->mnt_passno;\n\n    return 0;\n}\n\nstatic struct mntent *getmntent_r(FILE *fp, struct mntent *mntbuf,\n                                  char *buf, int buflen)\n{\n    struct mntent *pmntent;\n    /* struct mntent *getmntent(FILE *fp); */\n    P(mntent_lock);\n    pmntent = getmntent(fp);\n    /* copy mntent structure to caller buffer */\n    if (pmntent) {\n        if (copy_mntent(mntbuf, buf, buflen, pmntent) != 0)\n            pmntent = NULL; /* causes an error */\n    }\n    V(mntent_lock);\n    return pmntent;\n}\n#endif\n\n/**\n * Check if the given input path matches or in under\n * a mount entry.\n * @param[in] in_path       Path to be matched.\n * @param[in] p_mnt         Mount entry to be matched against.\n * @param[in] allow_root    Allow matching root filesystem.\n */\nstatic bool match_mount_path(const char *in_path, const struct mntent *p_mnt,\n                             bool allow_root)\n{\n    int pathlen = strlen(p_mnt->mnt_dir);\n\n    /* Matching root filesystem. */\n    if (!strcmp(p_mnt->mnt_dir, \"/\")) {\n        DisplayLog(LVL_DEBUG, \"CheckFS\",\n                   \"Root mountpoint is%s allowed for matching %s, type=%s, fs=%s\",\n                   allow_root ? \"\" : \" NOT\", in_path, p_mnt->mnt_type,\n                   p_mnt->mnt_fsname);\n        return allow_root;\n    }\n\n    /* In other cases The filesystem must be <mountpoint>/<smthg>\n     * or <mountpoint>\\0\n     */\n    if (!strncmp(in_path, p_mnt->mnt_dir, pathlen) &&\n        ((in_path[pathlen] == '/') || (in_path[pathlen] == '\\0'))) {\n        DisplayLog(LVL_DEBUG, \"CheckFS\",\n                   \"%s is under mountpoint %s, type=%s, fs=%s\",\n                   in_path, p_mnt->mnt_dir, p_mnt->mnt_type, p_mnt->mnt_fsname);\n        return true;\n    }\n    /* don't match */\n    return false;\n}\n\n/* Check mount point and FS type.\n * Also return the associated device number.\n * (for STAY_IN_FS security option).\n */\nint check_fs_info(const char *path, const char *expected_type,\n                  dev_t *p_fs_dev, char *fsname_out,\n                  bool check_mounted, bool save_fs)\n{\n    FILE *fp;\n    struct mntent *p_mnt;\n    struct mntent mnt_ent;\n    char *match_dir = NULL;\n    char *match_type = NULL;\n    char *match_fs = NULL;\n    char mnt_buff[4096];\n    char *rpath = NULL;\n    char *parentmntdir;\n    char *name = NULL;\n    char *tmp_buff;\n    struct stat pathstat;\n    struct stat parentmntstat;\n    int rc = 0;\n\n    if ((expected_type == NULL) || (expected_type[0] == '\\0')) {\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"/!\\\\ ERROR /!\\\\ No filesystem type specified\");\n        return EINVAL;\n    }\n\n    /* Convert to canonic path. */\n    /* Let realpath determine the output length (NULL argument). */\n    rpath = realpath(path, NULL);\n    if (rpath == NULL) {\n        DisplayLog(LVL_CRIT, \"CheckFS\", \"Error %d in realpath(%s): %s\",\n                   errno, (path ? path : \"<null>\"), strerror(errno));\n        return errno;\n    }\n    if (strlen(rpath) >= RBH_PATH_MAX) {\n        rc = -ENAMETOOLONG;\n        DisplayLog(LVL_CRIT, \"CheckFS\", \"Path length is too long!\");\n        goto out_free;\n    }\n\n    /* open mount tab and look for the given path */\n    fp = setmntent(MOUNTED, \"r\");\n    if (fp == NULL) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, \"CheckFS\", \"Error in setmntent(%s): %s\",\n                   MOUNTED, strerror(-rc));\n        goto out_free;\n    }\n\n    while ((p_mnt = getmntent_r(fp, &mnt_ent, mnt_buff,\n                                sizeof(mnt_buff))) != NULL) {\n        if (p_mnt->mnt_dir == NULL)\n            continue;\n\n        /* allow matching root if 'check_mounted' is disabled */\n        if (match_mount_path(rpath, p_mnt, !check_mounted)) {\n            /* free previous paths */\n            free(match_dir);\n            free(match_type);\n            free(match_fs);\n\n            /* duplicate new values */\n            match_dir = strdup(p_mnt->mnt_dir);\n            match_type = strdup(p_mnt->mnt_type);\n            match_fs = strdup(p_mnt->mnt_fsname);\n        }\n    }\n    endmntent(fp);\n\n    if (match_dir == NULL) {\n        DisplayLog(LVL_CRIT, \"CheckFS\", \"No mount entry matches '%s' in %s\",\n                   rpath, MOUNTED);\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"Set 'check_mounted = no' in configuration to force using root filesystem\");\n        rc = -ENOENT;\n        goto out_free;\n    }\n\n    /* display the matching entry */\n    DisplayLog(LVL_EVENT, \"CheckFS\",\n               \"'%s' matches mount point '%s', type=%s, fs=%s\", rpath,\n               match_dir, match_type, match_fs);\n\n    /* check filesystem type */\n    if (strcasecmp(match_type, expected_type) != 0) {\n        if (check_mounted) {\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"/!\\\\ ERROR /!\\\\ The specified type for '%s' (%s) does not match actual filesystem type (%s)\",\n                       rpath, expected_type, match_type);\n            rc = -EINVAL;\n            goto out_free;\n        } else {\n            DisplayLog(LVL_MAJOR, \"CheckFS\",\n                       \"/!\\\\ WARNING /!\\\\ The specified type for '%s' (%s) \"\n                       \"does not match actual filesystem type (%s).\",\n                       rpath, expected_type, match_type);\n            DisplayLog(LVL_MAJOR, \"CheckFS\",\n                       \"check_mounted is disabled: continuing.\");\n        }\n    }\n\n    /* stat the given fs_path */\n    if (stat(rpath, &pathstat) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"/!\\\\ ERROR /!\\\\ Couldn't stat '%s': %s\", rpath,\n                   strerror(-rc));\n        goto out_free;\n    }\n\n    /* Stat upper level of mount point, to check if\n     * the filesystem is mounted (device must be different).\n     * (dirname modifies string content, so we work on a copy\n     * in tmp_buff).\n     */\n    tmp_buff = strdup(match_dir);\n    parentmntdir = dirname(tmp_buff);\n\n    if (lstat(parentmntdir, &parentmntstat) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"/!\\\\ ERROR /!\\\\ Couldn't stat %s: %s\", parentmntdir,\n                   strerror(-rc));\n        goto out_free;\n    }\n\n    /* check that filesystem device is different from root\n     * (except if check_mounted is disabled) */\n    if ((pathstat.st_dev == parentmntstat.st_dev) && check_mounted) {\n        rc = -ENOENT;\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"/!\\\\ ERROR /!\\\\ Filesystem '%s' is not mounted ! dev(%s)=dev(%s)=%#\"\n                   PRIx64, match_dir, parentmntdir, rpath,\n                   (uint64_t)parentmntstat.st_dev);\n        goto out_free;\n    }\n    free(tmp_buff);\n#ifdef _LUSTRE\n    if (!strcmp(match_type, \"lustre\")) {\n        char *ptr = strstr(match_fs, \":/\");\n\n        if (ptr != NULL)\n            name = ptr + 2;\n        else\n            name = match_fs;\n    } else\n#endif\n        name = match_fs;\n\n    /* all checks are OK */\n    if (save_fs) {\n        /* getting filesystem fsid (needed for fskey) */\n        if (global_config.fs_key == FSKEY_FSID) {\n            struct statfs stf;\n\n            if (statfs(match_dir, &stf)) {\n                rc = -errno;\n                DisplayLog(LVL_CRIT, \"CheckFS\", \"ERROR calling statfs(%s): %s\",\n                           match_dir, strerror(-rc));\n                goto out_free;\n            }\n            /* if fsid == 0, it may mean that fsid is not significant on the\n             * current system => DISPLAY A WARNING */\n            if (fsidto64(stf.f_fsid) == 0)\n                DisplayLog(LVL_MAJOR, \"CheckFS\",\n                           \"WARNING: fsid(0) doesn't look significant on this system.\"\n                           \"It should not be used as fs_key!\");\n\n            rc = set_fs_info(name, match_dir, pathstat.st_dev, stf.f_fsid);\n        } else {\n            fsid_t dummy_fsid;\n\n            memset(&dummy_fsid, 0, sizeof(fsid_t));\n            rc = set_fs_info(name, match_dir, pathstat.st_dev, dummy_fsid);\n        }\n        if (rc)\n            goto out_free;\n    }\n\n    if (p_fs_dev != NULL)\n        *p_fs_dev = pathstat.st_dev;\n\n    if (fsname_out != NULL)\n        strcpy(fsname_out, name);\n\n    rc = 0;\n\n out_free:\n    free(match_dir);\n    free(match_type);\n    free(match_fs);\n    free(rpath);\n    return rc;\n}\n\n/**\n * Initialize filesystem access and retrieve current devid/fs_key\n * - global_config must be set\n * - initialize mount_point, fsname and dev_id\n */\nint InitFS(void)\n{\n    static bool initialized = false;\n    int rc;\n\n    if (initialized)\n        return 0;\n\n    /* Initialize mount point info */\n#ifdef _LUSTRE\n    if (!strcmp(global_config.fs_type, \"lustre\")) {\n        if ((rc = Lustre_Init())) {\n            DisplayLog(LVL_CRIT, \"InitFS\", \"Error %d initializing liblustreapi\",\n                       rc);\n            return rc;\n        }\n    }\n#endif\n\n    rc = check_fs_info(global_config.fs_path, global_config.fs_type, NULL, NULL,\n                       global_config.check_mounted, true);\n    if (rc) {\n        DisplayLog(LVL_CRIT, \"InitFS\", \"Error %d checking Filesystem\", rc);\n        return rc;\n    }\n\n    /* OK */\n    initialized = true;\n    return 0;\n}\n\n/**\n * This is to be called after a dev_id change was detected\n * return 0 if fskey is unchanged and update mount_point, fsname and dev_id\n * else, return != 0\n */\nint ResetFS(void)\n{\n    char name[RBH_PATH_MAX];\n    dev_t dev;\n    struct statfs stf;\n    int rc;\n    /* check depending on FS key type:\n     * - fsname: check mount tab\n     * - fsid: check statfs\n     * - devid: check dev_id\n     */\n    switch (global_config.fs_key) {\n    case FSKEY_FSNAME:\n        /* get and compare FS name */\n        rc = check_fs_info(global_config.fs_path, global_config.fs_type, NULL,\n                           name, global_config.check_mounted, false);\n        if (rc)\n            return rc;\n        /* did the name changed ? */\n        if (strcmp(name, fsname)) {\n            DisplayLog(LVL_CRIT, \"FSInfo\", \"fsname change detected: %s->%s\",\n                       fsname, name);\n            RaiseAlert(\"Filesystem changed\",\n                       \"fsname of '%s' has changed !!! %s->%s => EXITING\",\n                       global_config.fs_path, fsname, name);\n            return -1;\n        }\n        /* update fsid and devid */\n        rc = check_fs_info(global_config.fs_path, global_config.fs_type, NULL,\n                           NULL, global_config.check_mounted, true);\n        return rc;\n\n    case FSKEY_FSID:\n        /* get and compare FS ID */\n        if (statfs(global_config.fs_path, &stf)) {\n            rc = -errno;\n            DisplayLog(LVL_CRIT, \"FSInfo\", \"ERROR calling statfs(%s): %s\",\n                       global_config.fs_path, strerror(-rc));\n            return rc;\n        }\n        if (fsidto64(stf.f_fsid) != fs_key) {\n            DisplayLog(LVL_CRIT, \"FSInfo\",\n                       \"fsid change detected: %\" PRIX64 \"->%\" PRIX64, fs_key,\n                       fsidto64(stf.f_fsid));\n            RaiseAlert(\"Filesystem changed\",\n                       \"fsid of '%s' has changed !!! %\" PRIX64 \"->%\" PRIX64\n                       \" => EXITING\", global_config.fs_path, fs_key,\n                       fsidto64(stf.f_fsid));\n            return -1;\n        }\n        /* update fsname and devid */\n        rc = check_fs_info(global_config.fs_path, global_config.fs_type, NULL,\n                           NULL, global_config.check_mounted, true);\n        return rc;\n\n    case FSKEY_DEVID:\n        /* get and compare dev id */\n        rc = check_fs_info(global_config.fs_path, global_config.fs_type, &dev,\n                           NULL, global_config.check_mounted, false);\n        if (rc)\n            return rc;\n        /* did the device change? */\n        if (dev != dev_id) {\n            DisplayLog(LVL_CRIT, \"FSInfo\",\n                       \"devid change detected: %\" PRI_DT \"->%\" PRI_DT, dev_id,\n                       dev);\n\n            RaiseAlert(\"Filesystem changed\",\n                       \"devid of '%s' has changed !!! %\" PRI_DT \"->%\" PRI_DT\n                       \" => EXITING\", global_config.fs_path, dev_id, dev);\n            return -1;\n        }\n        /* update fsname and fsid */\n        rc = check_fs_info(global_config.fs_path, global_config.fs_type, NULL,\n                           NULL, global_config.check_mounted, true);\n        return rc;\n\n    default:\n        DisplayLog(LVL_MAJOR, \"FSInfo\", \"Invalid fs_key type %#x\",\n                   global_config.fs_key);\n        return -1;\n    }\n\n}\n\n/**\n *  Check that FS path is the same as the last time.\n */\nint CheckLastFS(void)\n{\n    int rc;\n    lmgr_t lmgr;\n    char value[1024];\n    char str_id[128];\n\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, \"CheckFS\", \"Error %d connecting to database\", rc);\n        return rc;\n    }\n    rc = ListMgr_GetVar(&lmgr, FS_PATH_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS) {\n        if (strcmp(value, global_config.fs_path)) {\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Filesystem %s does not correspond to database content (%s)\",\n                       global_config.fs_path, value);\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Drop the database and restart the daemon.\");\n            rc = -1;\n        } else {\n            DisplayLog(LVL_DEBUG, \"CheckFS\", \"%s matches database content.\",\n                       global_config.fs_path);\n            rc = 0;\n        }\n    } else if (rc == DB_NOT_EXISTS) {\n        DisplayLog(LVL_FULL, \"CheckFS\", FS_PATH_VAR \"='%s'.\",\n                   global_config.fs_path);\n        rc = ListMgr_SetVar(&lmgr, FS_PATH_VAR, global_config.fs_path);\n        if (rc)\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Error %d setting variable 'FS_path'%s\", rc,\n                       rc ==\n                       DB_NOT_EXISTS ?\n                       \" (likely: database schema is not created yet, and you have a read-only DB access).\"\n                       : \"\");\n    } else {\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"Error %d retrieving variable 'FS_path'\", rc);\n    }\n\n    /* can't check root id if not initialized */\n    if (fs_key == 0)\n        goto out;\n\n    snprintf(str_id, sizeof(str_id), DFID_NOBRACE, PFID(&root_id));\n\n    rc = ListMgr_GetVar(&lmgr, ROOT_ID_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS) {\n        if (strcmp(str_id, value) != 0) {\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Root id changed! Previous value: '%s', new value: '%s'\",\n                       value, str_id);\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Drop the database and restart the daemon.\");\n            rc = -1;\n        } else {\n            DisplayLog(LVL_DEBUG, \"CheckFS\",\n                       \"Root id '%s' matches previous value.\",\n                       str_id);\n            rc = 0;\n        }\n    } else if (rc == DB_NOT_EXISTS) {\n        DisplayLog(LVL_FULL, \"CheckFS\", ROOT_ID_VAR \"='%s'.\",\n                   str_id);\n        rc = ListMgr_SetVar(&lmgr, ROOT_ID_VAR, str_id);\n        if (rc)\n            DisplayLog(LVL_CRIT, \"CheckFS\",\n                       \"Error %d setting variable '\"ROOT_ID_VAR\"'%s\", rc,\n                       rc == DB_NOT_EXISTS ?\n                       \" (likely: database schema is not created yet, and you have a read-only DB access).\"\n                       : \"\");\n    } else {\n        DisplayLog(LVL_CRIT, \"CheckFS\",\n                   \"Error %d retrieving variable '\"ROOT_ID_VAR\"'\", rc);\n    }\n\nout:\n    ListMgr_CloseAccess(&lmgr);\n    return rc;\n}\n\n/* return 0 if thread has been killed,\n * -1 if thread was already terminated.\n */\nint TerminateThread(pthread_t thread_id)\n{\n    if (pthread_cancel(thread_id) == ESRCH)\n        return -1;\n\n    return 0;\n}\n\n/* Format a size in a \"human readable\" format */\n\nchar *FormatFileSize(char *buff, size_t str_sz, uint64_t file_size)\n{\n    if (file_size < KILO_BYTE)\n        snprintf(buff, str_sz, \"%llu\", (unsigned long long)file_size);\n    else if (file_size < MEGA_BYTE)\n        snprintf(buff, str_sz, \"%.2f KB\",\n                 (0.0 + file_size) / (0.0 + KILO_BYTE));\n    else if (file_size < GIGA_BYTE)\n        snprintf(buff, str_sz, \"%.2f MB\",\n                 (0.0 + file_size) / (0.0 + MEGA_BYTE));\n    else if (file_size < TERA_BYTE)\n        snprintf(buff, str_sz, \"%.2f GB\",\n                 (0.0 + file_size) / (0.0 + GIGA_BYTE));\n    else if (file_size < PETA_BYTE)\n        snprintf(buff, str_sz, \"%.2f TB\",\n                 (0.0 + file_size) / (0.0 + TERA_BYTE));\n    else if (file_size < EXA_BYTE)\n        snprintf(buff, str_sz, \"%.2f PB\",\n                 (0.0 + file_size) / (0.0 + PETA_BYTE));\n    else\n        snprintf(buff, str_sz, \"%.2f EB\", (0.0 + file_size) / (0.0 + EXA_BYTE));\n\n    return buff;\n\n}\n\n/**\n * Format a duration (in seconds) to a string with days hours minutes\n * seconds...\n */\nchar *FormatDuration(char *buff, size_t str_sz, time_t duration)\n{\n\n    unsigned int days;\n    unsigned int hours;\n    unsigned int minutes;\n    unsigned int secondes;\n\n    size_t written = 0;\n\n    if (duration < 0)\n        duration = -duration;\n\n    days = (unsigned int)duration / DAY;\n    hours = ((unsigned int)duration % DAY) / HOUR;\n    minutes = ((unsigned int)duration % HOUR) / MINUTE;\n    secondes = ((unsigned int)duration % MINUTE);\n\n    buff[0] = '\\0';\n\n    if (days > 0)\n        written += snprintf(buff, str_sz, \"%ud \", days);\n\n    if (hours > 0)\n        written += snprintf(buff + written, str_sz - written, \"%uh \", hours);\n\n    if (minutes > 0)\n        written +=\n            snprintf(buff + written, str_sz - written, \"%.2umin \", minutes);\n\n    if (secondes > 0 || duration == 0)\n        written +=\n            snprintf(buff + written, str_sz - written, \"%.2us \", secondes);\n\n    if ((written > 0) && (buff[written - 1] == ' '))\n        buff[written - 1] = '\\0';\n\n    return buff;\n\n}\n\n/**\n * Format a duration (in seconds) to a string with the best fitting unit\n * (float value)\n */\nchar *FormatDurationFloat(char *buff, size_t str_sz, time_t duration)\n{\n    float days;\n    float hours;\n    float minutes;\n    unsigned int secondes;\n\n    if (duration < 0)\n        duration = -duration;\n\n    days = (float)duration / DAY;\n    hours = (float)duration / HOUR;\n    minutes = (float)duration / MINUTE;\n    secondes = (unsigned int)duration;\n\n    buff[0] = '\\0';\n\n    if (days >= 1.0)\n        snprintf(buff, str_sz, \"%.1fd\", days);\n    else if (hours >= 1.0)\n        snprintf(buff, str_sz, \"%.1fh\", hours);\n    else if (minutes >= 1.0)\n        snprintf(buff, str_sz, \"%.1fmin\", minutes);\n    else\n        snprintf(buff, str_sz, \"%.2us\", secondes);\n\n    return buff;\n\n}\n\n/**\n * Convert a string to a long integer\n * @return -1 on error.\n */\nlong long str2bigint(const char *str)\n{\n    char suffix[256];\n    int nb_read;\n    long long value;\n\n    if (str == NULL)\n        return -1;\n\n    nb_read = sscanf(str, \"%lld%s\", &value, suffix);\n\n    if (nb_read <= 0)\n        return -1;  /* invalid format */\n\n    if ((nb_read == 1) || (suffix[0] == '\\0'))\n        return value;   /* no suffix => 0K */\n    else\n        return -1;\n}\n\n/**\n * Convert a string to a boolean\n * @return -1 on error.\n */\nint str2bool(const char *str)\n{\n    if (str == NULL)\n        return -1;\n\n    if (!strcmp(str, \"1\") || !strcasecmp(str, \"true\") ||\n        !strcasecmp(str, \"yes\") || !strcasecmp(str, \"enabled\") ||\n        !strcasecmp(str, \"on\"))\n        return 1;\n\n    if (!strcmp(str, \"0\") || !strcasecmp(str, \"false\") ||\n        !strcasecmp(str, \"no\") || !strcasecmp(str, \"disabled\") ||\n        !strcasecmp(str, \"off\"))\n        return 0;\n\n    return -1;\n}\n\n/**\n * Convert a string to a duration in seconds\n * @return -1 on error.\n */\nint str2duration(const char *str)\n{\n    int nb_read, duration;\n    char suffix[256];\n\n    if (str == NULL)\n        return -1;\n\n    nb_read = sscanf(str, \"%d%s\", &duration, suffix);\n\n    if (nb_read <= 0)\n        return -1;  /* invalid format */\n\n    if ((nb_read == 1) || (suffix[0] == '\\0'))\n        return duration;    /* no suffix: duration in seconds */\n\n    if (!strcasecmp(suffix, \"s\") || !strcasecmp(suffix, \"sec\"))\n        return duration;\n    if (!strcasecmp(suffix, \"m\") || !strcasecmp(suffix, \"min\"))\n        return MINUTE * duration;\n    if (!strcasecmp(suffix, \"h\") || !strcasecmp(suffix, \"hour\"))\n        return HOUR * duration;\n    if (!strcasecmp(suffix, \"d\") || !strcasecmp(suffix, \"day\"))\n        return DAY * duration;\n    if (!strcasecmp(suffix, \"w\") || !strcasecmp(suffix, \"week\"))\n        return WEEK * duration;\n    if (!strcasecmp(suffix, \"y\") || !strcasecmp(suffix, \"year\"))\n        return YEAR * duration;\n\n    return -1;\n}\n\n/**\n * Convert a string to a size (in bytes)\n * @return -1 on error.\n */\nuint64_t str2size(const char *str)\n{\n    int nb_read;\n    unsigned long long size;\n    char suffix[256];\n\n    if (str == NULL)\n        return (uint64_t)-1LL;\n\n    nb_read = sscanf(str, \"%llu%s\", &size, suffix);\n\n#ifdef _DEBUG_PARSING\n    printf(\"nb_read = %d, str = %s, size = %llu, suffix = %s\\n\", nb_read, str,\n           size, suffix);\n#endif\n\n    if (nb_read <= 0)\n        return (uint64_t)-1LL;  /* invalid format */\n\n    if ((nb_read == 1) || (suffix[0] == '\\0'))\n        return size;    /* no suffix: size in bytes */\n\n    if (!strcasecmp(suffix, \"B\"))\n        return size;\n    if (!strcasecmp(suffix, \"kB\") || !strcasecmp(suffix, \"K\"))\n        return KILO_BYTE * size;\n    if (!strcasecmp(suffix, \"MB\") || !strcasecmp(suffix, \"M\"))\n        return MEGA_BYTE * size;\n    if (!strcasecmp(suffix, \"GB\") || !strcasecmp(suffix, \"G\"))\n        return GIGA_BYTE * size;\n    if (!strcasecmp(suffix, \"TB\") || !strcasecmp(suffix, \"T\"))\n        return TERA_BYTE * size;\n    if (!strcasecmp(suffix, \"PB\") || !strcasecmp(suffix, \"P\"))\n        return PETA_BYTE * size;\n    if (!strcasecmp(suffix, \"EB\") || !strcasecmp(suffix, \"E\"))\n        return EXA_BYTE * size;\n\n    return (uint64_t)-1LL;\n}\n\n/**\n * extracts up to count digits from src string,\n * and copy them to dest string. dest is completed\n * with '\\0'.\n * @return the number of digits copied to dest.\n */\nstatic inline int extract_digits(const char *src, char *dest,\n                                 unsigned int count)\n{\n    unsigned int i;\n    unsigned int cpd = 0;\n    for (i = 0; (i < count) && (src[i] != '\\0'); i++) {\n        dest[i] = src[i];\n        cpd++;\n    }\n    dest[cpd] = '\\0';\n    return cpd;\n}\n\n/** parse date/time yyyymmdd[HH[MM[SS]]] */\ntime_t str2date(const char *str)\n{\n    struct tm datetime = {\n        .tm_sec = 0,\n        .tm_min = 0,\n        .tm_hour = 0,\n        .tm_mday = 0,\n        .tm_mon = 0,\n        .tm_year = 0,\n        .tm_wday = 0,\n        .tm_yday = 0,\n        .tm_isdst = -1\n    };\n    char tmpstr[16];\n    int tmpint;\n    const char *curr = str;\n\n    /* extract year */\n    if (extract_digits(curr, tmpstr, 4) < 4)\n        return (time_t)-1;\n    curr += 4;\n    if ((tmpint = str2int(tmpstr)) == -1)\n        return (time_t)-1;\n    datetime.tm_year = tmpint - 1900;   /* 1900 => 0 */\n\n    /* extract month */\n    if (extract_digits(curr, tmpstr, 2) < 2)\n        return (time_t)-1;\n    curr += 2;\n    if ((tmpint = str2int(tmpstr)) <= 0)\n        return (time_t)-1;\n    else if (tmpint > 12)\n        return (time_t)-1;\n    datetime.tm_mon = tmpint - 1;   /* January => 0 */\n\n    /* extract day */\n    if (extract_digits(curr, tmpstr, 2) < 2)\n        return (time_t)-1;\n    curr += 2;\n    if ((tmpint = str2int(tmpstr)) <= 0)\n        return (time_t)-1;\n    else if (tmpint > 31)\n        return (time_t)-1;\n    datetime.tm_mday = tmpint;  /* 1st => 1 */\n\n    /* extract hours */\n    tmpint = extract_digits(curr, tmpstr, 2);\n    if (tmpint == 0)    /* not specified */\n        goto convert;\n    else if (tmpint < 2)    /* invalid */\n        return (time_t)-1;\n    curr += 2;\n    if ((tmpint = str2int(tmpstr)) == -1)\n        return (time_t)-1;\n    else if (tmpint > 23)\n        return (time_t)-1;\n    datetime.tm_hour = tmpint;\n\n    /* extract minutes */\n    tmpint = extract_digits(curr, tmpstr, 2);\n    if (tmpint == 0)    /* not specified */\n        goto convert;\n    else if (tmpint < 2)    /* invalid */\n        return (time_t)-1;\n    curr += 2;\n    if ((tmpint = str2int(tmpstr)) == -1)\n        return (time_t)-1;\n    else if (tmpint > 59)\n        return (time_t)-1;\n    datetime.tm_min = tmpint;\n\n    /* extract seconds */\n    tmpint = extract_digits(curr, tmpstr, 2);\n    if (tmpint == 0)    /* not specified */\n        goto convert;\n    else if (tmpint < 2)    /* invalid */\n        return (time_t)-1;\n    curr += 2;\n    if ((tmpint = str2int(tmpstr)) == -1)\n        return (time_t)-1;\n    else if (tmpint > 59)\n        return (time_t)-1;\n    datetime.tm_sec = tmpint;\n\n    if (*curr != '\\0')\n        return (time_t)-1;\n\n convert:\n    return mktime(&datetime);\n}\n\n#define TYPEINDEX(mode) (((mode) >> 12) & 0x0f)\n#define TYPECHAR(mode)  (\"0pcCd?bB-?l?s???\"[TYPEINDEX(mode)])\n\n/* The special bits. If set, display SMODE0/1 instead of MODE0/1 */\nstatic const mode_t SBIT[] = {\n    0, 0, S_ISUID,\n    0, 0, S_ISGID,\n    0, 0, S_ISVTX\n};\n\n/* The 9 mode bits to test */\nstatic const mode_t MBIT[] = {\n    S_IRUSR, S_IWUSR, S_IXUSR,\n    S_IRGRP, S_IWGRP, S_IXGRP,\n    S_IROTH, S_IWOTH, S_IXOTH\n};\n\nstatic const char MODE1[] = \"rwxrwxrwx\";\nstatic const char MODE0[] = \"---------\";\nstatic const char SMODE1[] = \"..s..s..t\";\nstatic const char SMODE0[] = \"..S..S..T\";\n\n/*\n * Return the standard ls-like mode string from a file mode.\n * This is static and so is overwritten on each call.\n */\nconst char *mode_string(mode_t mode, char *buf)\n{\n    int i;\n\n    for (i = 0; i < 9; i++) {\n        if (mode & SBIT[i])\n            buf[i] = (mode & MBIT[i]) ? SMODE1[i] : SMODE0[i];\n        else\n            buf[i] = (mode & MBIT[i]) ? MODE1[i] : MODE0[i];\n    }\n    return buf;\n}\n\n/**\n *  Print attributes to a GString.\n *  This is used for alerts and diff display (brief argument).\n */\nvoid print_attrs(GString *str, const attr_set_t *p_attr_set,\n                 attr_mask_t overide_mask, bool brief)\n{\n    attr_mask_t mask = p_attr_set->attr_mask;\n    char tmpbuf[256];\n    const char *format;\n    int i;\n\n    assert(str != NULL);\n\n    if (!attr_mask_is_null(overide_mask))\n        mask = attr_mask_and(&mask, &overide_mask);\n\n    /* initialize to empty string */\n    g_string_assign(str, \"\");\n\n    if (mask.std & ATTR_MASK_fullpath) {\n        if (brief)\n            format = \"path='%s',\";\n        else\n            format = \"Path:     \\\"%s\\\"\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, fullpath));\n    }\n    /* this information is redundant with fullpath,\n     * so only display it if path is not known */\n    else if (mask.std & ATTR_MASK_name) {\n        if (brief)\n            format = \"name='%s',\";\n        else\n            format = \"Name:     \\\"%s\\\"\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, name));\n    }\n    if (mask.std & ATTR_MASK_parent_id) {\n        if (brief)\n            format = \"parent=\" DFID \",\";\n        else\n            format = \"Parent:   \" DFID \"\\n\";\n\n        g_string_append_printf(str, format, PFID(&ATTR(p_attr_set, parent_id)));\n    }\n\n    if (mask.std & ATTR_MASK_type) {\n        if (brief)\n            format = \"type=%s,\";\n        else\n            format = \"Type:     %s\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, type));\n    }\n\n    if (mask.std & ATTR_MASK_nlink) {\n        if (brief)\n            format = \"nlink=%u,\";\n        else\n            format = \"Nlinks:   %u\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, nlink));\n    }\n\n    if (mask.std & ATTR_MASK_mode) {\n        if (brief)\n            format = \"mode=%#o,\";\n        else\n            format = \"Mode:     %#o\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, mode));\n    }\n\n    if (mask.std & ATTR_MASK_uid) {\n        if (global_config.uid_gid_as_numbers) {\n            if (brief)\n                format = \"owner=%d,\";\n            else\n                format = \"Owner:    \\\"%d\\\"\\n\";\n\n            g_string_append_printf(str, format, ATTR(p_attr_set, uid).num);\n        } else {\n            if (brief)\n                format = \"owner=%s,\";\n            else\n                format = \"Owner:    \\\"%s\\\"\\n\";\n\n            g_string_append_printf(str, format, ATTR(p_attr_set, uid).txt);\n        }\n    }\n    if (mask.std & ATTR_MASK_gid) {\n        if (global_config.uid_gid_as_numbers) {\n            if (brief)\n                format = \"group=%d,\";\n            else\n                format = \"Group:    \\\"%d\\\"\\n\";\n\n            g_string_append_printf(str, format, ATTR(p_attr_set, gid).num);\n        } else {\n            if (brief)\n                format = \"group=%s,\";\n            else\n                format = \"Group:    \\\"%s\\\"\\n\";\n\n            g_string_append_printf(str, format, ATTR(p_attr_set, gid).txt);\n        }\n    }\n    if (mask.std & ATTR_MASK_size) {\n        if (brief) {\n            g_string_append_printf(str, \"size=%\" PRIu64 \",\",\n                                   ATTR(p_attr_set, size));\n        } else {\n            FormatFileSize(tmpbuf, sizeof(tmpbuf), ATTR(p_attr_set, size));\n            g_string_append_printf(str, \"Size:     %s\\n\", tmpbuf);\n        }\n    }\n    if (mask.std & ATTR_MASK_blocks) {\n        if (brief)\n            format = \"blocks=%Lu,\";\n        else\n            format = \"Blocks:   %Lu\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, blocks));\n    }\n    if (mask.std & ATTR_MASK_depth) {\n        if (brief)\n            format = \"depth=%u,\";\n        else\n            format = \"Depth:    %u\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, depth));\n    }\n\n    if (mask.std & ATTR_MASK_dircount) {\n        if (brief)\n            format = \"dircount=%u,\";\n        else\n            format = \"DirCount: %u\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, dircount));\n    }\n\n    if (mask.std & ATTR_MASK_last_access) {\n        if (brief) {\n            g_string_append_printf(str, \"access=%u,\",\n                                   ATTR(p_attr_set, last_access));\n        } else {\n            FormatDurationFloat(tmpbuf, sizeof(tmpbuf),\n                                time(NULL) - ATTR(p_attr_set, last_access));\n\n            g_string_append_printf(str, \"Last Access: %s ago\\n\", tmpbuf);\n        }\n    }\n    if (mask.std & ATTR_MASK_last_mod) {\n        if (brief) {\n            g_string_append_printf(str, \"modif=%u,\",\n                                   ATTR(p_attr_set, last_mod));\n        } else {\n            FormatDurationFloat(tmpbuf, sizeof(tmpbuf),\n                                time(NULL) - ATTR(p_attr_set, last_mod));\n            g_string_append_printf(str, \"Last Mod: %s ago\\n\", tmpbuf);\n        }\n    }\n\n    if (mask.std & ATTR_MASK_last_mdchange) {\n        if (brief) {\n            g_string_append_printf(str, \"change=%u,\",\n                                   ATTR(p_attr_set, last_mdchange));\n        } else {\n            FormatDurationFloat(tmpbuf, sizeof(tmpbuf),\n                                time(NULL) - ATTR(p_attr_set, last_mdchange));\n            g_string_append_printf(str, \"Last Change: %s ago\\n\", tmpbuf);\n        }\n    }\n\n    if (mask.std & ATTR_MASK_creation_time) {\n        if (brief) {\n            g_string_append_printf(str, \"creation=%u,\",\n                                   ATTR(p_attr_set, creation_time));\n        } else {\n            FormatDurationFloat(tmpbuf, sizeof(tmpbuf),\n                                time(NULL) - ATTR(p_attr_set, creation_time));\n            g_string_append_printf(str, \"Creation: %s ago\\n\", tmpbuf);\n        }\n    }\n#ifdef _LUSTRE\n    if (mask.std & ATTR_MASK_stripe_items) {\n        if (brief)\n            g_string_append(str, \"stripes={\");\n        else\n            g_string_append(str, \"Stripes: \");\n\n        append_stripe_list(str, &ATTR(p_attr_set, stripe_items), brief);\n\n        if (brief)\n            g_string_append(str, \"},\");\n        else\n            g_string_append_c(str, '\\n');\n    }\n\n    if (mask.std & ATTR_MASK_stripe_info) {\n        if (brief) {\n            g_string_append_printf(str,\n                                   \"stripe_count=%u,stripe_size=%\" PRIu64 \",\",\n                                   ATTR(p_attr_set, stripe_info).stripe_count,\n                                   ATTR(p_attr_set, stripe_info).stripe_size);\n\n            if (!EMPTY_STRING(ATTR(p_attr_set, stripe_info).pool_name)) {\n                g_string_append_printf(str, \"ost_pool=%s,\",\n                                       ATTR(p_attr_set, stripe_info).pool_name);\n            }\n        } else {\n            format = \"Stripe count: %u\\n\" \"Stripe size:  %s\\n\";\n\n            FormatFileSize(tmpbuf, sizeof(tmpbuf),\n                           ATTR(p_attr_set, stripe_info).stripe_size);\n\n            g_string_append_printf(str, format,\n                                   ATTR(p_attr_set, stripe_info).stripe_count,\n                                   tmpbuf);\n\n            if (!EMPTY_STRING(ATTR(p_attr_set, stripe_info).pool_name)) {\n                format = \"OST pool:     %s\\n\";\n\n                g_string_append_printf(str, format,\n                                       ATTR(p_attr_set, stripe_info).pool_name);\n            }\n        }\n    }\n#endif\n\n    for (i = 0; i < sm_inst_count; i++) {\n        sm_instance_t *smi = get_sm_instance(i);\n\n        /* print status */\n        if (mask.status & SMI_MASK(i)) {\n            if (brief)\n                format = \"%s=%s,\";\n            else\n                format = \"%s:  %s\\n\";\n\n            g_string_append_printf(str, format, smi->user_name,\n                                   STATUS_ATTR(p_attr_set, i));\n        }\n        /* print specific info for this status manager */\n        if (mask.sm_info & smi_info_bits(smi)) {\n            for (i = 0; i < smi->sm->nb_info; i++) {\n                if (mask.sm_info & smi_info_bit(smi, i)) {\n\n                    g_string_append_printf(str, brief ? \"%s=\" : \"%s:  \",\n                                           sm_attr_info[smi->sm_info_offset +\n                                                        i].user_attr_name);\n\n                    ListMgr_PrintAttrPtr(str,\n                                         sm_attr_info[smi->sm_info_offset +\n                                                      i].def->db_type,\n                                         SMI_INFO(p_attr_set, smi, i),\n                                         brief ? \"'\" : \"\\\"\");\n\n                    g_string_append_c(str, brief ? ',' : '\\n');\n                }\n            }\n        }\n    }\n\n    if (mask.std & ATTR_MASK_link) {\n        if (brief)\n            format = \"lnk='%s',\";\n        else\n            format = \"link: \\\"%s\\\"\\n\";\n\n        g_string_append_printf(str, format, ATTR(p_attr_set, link));\n    }\n\n    if (brief && str->len != 0) {\n        /* remove final ',' */\n        g_string_truncate(str, str->len - 1);\n    }\n}\n\n/* helpers for attr change */\n#define APPLYTAG \"ChgAttr\"\n#define LOG_ATTR_CHANGE(_nfunc, _arg_fmt, _dr, _rc, ...) do { \\\n            if (_rc)                                          \\\n                DisplayLog(LVL_CRIT, APPLYTAG, \"%s(\"_arg_fmt\") failed: %s\", \\\n                           _nfunc, __VA_ARGS__, strerror(_rc)); \\\n            else                                              \\\n                DisplayReport(\"%s%s(\"_arg_fmt\")\", _dr ? \"(dry-run) \" : \"\", \\\n                              _nfunc, __VA_ARGS__); \\\n        } while (0)\n\n/**\n *  Apply attribute changes\n *  \\param change_mask mask of attributes to be changed\n */\nint ApplyAttrs(const entry_id_t *p_id, const attr_set_t *p_attr_new,\n               const attr_set_t *p_attr_old,\n               attr_mask_t change_mask, bool dry_run)\n{\n    attr_mask_t mask = attr_mask_and(&p_attr_new->attr_mask, &change_mask);\n    int rc, err = 0;\n    const char *chattr_path = NULL;\n#ifdef _HAVE_FID\n    char fid_path[RBH_PATH_MAX];\n#endif\n\n    if (attr_mask_is_null(mask))\n        return 0;\n\n    if (!ATTR_MASK_TEST(p_attr_new, fullpath)) {\n#ifdef _HAVE_FID\n        /* build fid path */\n        BuildFidPath(p_id, fid_path);\n        chattr_path = fid_path;\n#else\n        DisplayLog(LVL_CRIT, APPLYTAG,\n                   \"No path: cannot apply changes to entry\");\n        return -EINVAL;\n#endif\n    } else\n        chattr_path = ATTR(p_attr_new, fullpath);\n\n    if (mask.std & ATTR_MASK_fullpath) {\n        if (!ATTR_MASK_TEST(p_attr_old, fullpath)) {\n            DisplayLog(LVL_CRIT, APPLYTAG,\n                       \"Cannot rename: source path is unknown\");\n            err++;\n        } else {\n            if (!dry_run\n                && rename(ATTR(p_attr_old, fullpath),\n                          ATTR(p_attr_new, fullpath)))\n                rc = errno;\n            else\n                rc = 0;\n\n            LOG_ATTR_CHANGE(\"rename\", \"%s, %s\", dry_run, rc,\n                            ATTR(p_attr_old, fullpath), ATTR(p_attr_new,\n                                                             fullpath));\n        }\n    } else if (mask.std & ATTR_MASK_parent_id) {\n        /* can't change parent without changing path!!! */\n    } else if (mask.std & ATTR_MASK_name) {\n        /* just change name */\n    }\n\n    if (mask.std & ATTR_MASK_type) {\n        /* can't change entry type without creating/removing it */\n    }\n\n    if (mask.std & (ATTR_MASK_uid | ATTR_MASK_gid)) {\n        uid_t u = -1;\n        gid_t g = -1;\n\n        if (mask.std & ATTR_MASK_uid) {\n            if (global_config.uid_gid_as_numbers) {\n                u = ATTR(p_attr_new, uid).num;\n            } else {\n                struct passwd p;\n                char buf[4096];\n                struct passwd *res = NULL;\n\n                rc = getpwnam_r(ATTR(p_attr_new, uid).txt, &p, buf, 4096, &res);\n                if (rc == 0 && res != NULL)\n                    u = res->pw_uid;\n            }\n        }\n\n        if (mask.std & ATTR_MASK_gid) {\n            if (global_config.uid_gid_as_numbers) {\n                g = ATTR(p_attr_new, gid).num;\n            } else {\n                struct group gs;\n                char buf[4096];\n                struct group *res = NULL;\n\n                rc = getgrnam_r(ATTR(p_attr_new, gid).txt, &gs, buf, 4096,\n                                &res);\n                if (rc == 0 && res != NULL)\n                    g = res->gr_gid;\n            }\n        }\n\n        if (u != -1 || g != -1) {\n\n            if (!dry_run && lchown(chattr_path, u, g))\n                rc = errno;\n            else\n                rc = 0;\n\n            LOG_ATTR_CHANGE(\"lchown\", \"%s, u=%d, g=%d\", dry_run, rc,\n                            chattr_path, u, g);\n        }\n    }\n\n    /* always set mode after chown, as it can be changed by chown */\n    if (mask.std & ATTR_MASK_mode) {\n\n        if (!dry_run && chmod(chattr_path, ATTR(p_attr_new, mode)))\n            rc = errno;\n        else\n            rc = 0;\n\n        LOG_ATTR_CHANGE(\"chmod\", \"%s, %#o\", dry_run, rc,\n                        chattr_path, ATTR(p_attr_new, mode));\n    }\n\n    /* the following changes can't be applied (not supported) */\n    /* stripe_items / stripe_info => restripe the file? */\n    /* status => perform the needed action? */\n\n    if (mask.std & ATTR_MASK_size) {\n        /** @TODO if new size is zero: truncate.\n         * else, we have no idea of what's in the file...\n         */\n    }\n    if (mask.std & (ATTR_MASK_last_access | ATTR_MASK_last_mod |\n                    ATTR_MASK_last_mdchange)) {\n        struct utimbuf t = {\n            .actime = -1,\n            .modtime = -1\n        };\n        int get_stat = 0;\n\n        if (mask.std & ATTR_MASK_last_access)\n            t.actime = ATTR(p_attr_new, last_access);\n        if (mask.std & ATTR_MASK_last_mod)\n            t.modtime = ATTR(p_attr_new, last_mod);\n\n        /* if there is still a value == -1, we must fill it\n         * or utime will set a bad value\n         */\n        if (t.actime == -1) {\n            if (ATTR_MASK_TEST(p_attr_old, last_access))\n                t.actime = ATTR(p_attr_old, last_access);\n            else\n                /* need to get old value of atime */\n                get_stat = 1;\n        }\n        if (t.modtime == -1) {\n            if (ATTR_MASK_TEST(p_attr_old, last_mod))\n                t.modtime = ATTR(p_attr_old, last_mod);\n            else\n                /* need to get old value of atime */\n                get_stat = 1;\n        }\n        if (get_stat) {\n            struct stat st;\n            if (lstat(chattr_path, &st) == 0) {\n                if (t.modtime == -1)\n                    t.modtime = st.st_mtime;\n                if (t.actime == -1)\n                    t.actime = st.st_atime;\n            }\n        }\n\n        if (!dry_run && utime(chattr_path, &t))\n            rc = errno;\n        else\n            rc = 0;\n\n        LOG_ATTR_CHANGE(\"utime\", \"%s, a=%ld, m=%ld\", dry_run, rc,\n                        chattr_path, t.actime, t.modtime);\n    }\n\n    return err;\n}\n\n/** Compute greatest common divisor (GCD) of 2 numbers */\nunsigned int gcd(unsigned int x, unsigned int y)\n{\n    unsigned int a = x;\n    unsigned int b = y;\n    while ((a * b) != 0) {\n        if (a > b)\n            a = a - b;\n        if (a < b)\n            b = b - a;\n        if (a == b)\n            b = 0;\n    }\n    return a;\n}\n\n/** Ensure that the thread is suspended for a given amount\n * of time, event if the process gets interrupts.\n */\nvoid rh_sleep(unsigned int seconds)\n{\n    time_t start = time(NULL);\n    int remain = seconds;\n    int spent;\n\n    while (remain > 0) {\n        remain = sleep(remain);\n        if (remain <= 0) {\n            spent = time(NULL) - start;\n            if (spent < seconds)\n                remain = seconds - spent;\n        }\n    }\n}\n\n/** Substitute a pattern in a string with another sub-string\n * \\param str_in_out must be large enough to receive\n *  the resulting string, and cannot exceed 1024.\n */\nint str_subst(char *str_in_out, const char *to_be_replaced,\n              const char *replacement)\n{\n    size_t len_from = strlen(to_be_replaced);\n    size_t len_to = strlen(replacement);\n    char *curr;\n\n    /* - same size: replace inline\n     * - smaller string size: replace inline then shift\n     */\n\n    for (curr = strstr(str_in_out, to_be_replaced);\n         curr != NULL; curr = strstr(curr, to_be_replaced)) {\n        unsigned int i;\n        char *curr_src;\n        char tmp_buff[1024];\n\n        /* if replacement is longer, save end of line */\n        if (len_to > len_from)\n            strcpy(tmp_buff, curr + len_from);\n\n        for (i = 0; i < len_to; i++)\n            curr[i] = replacement[i];\n        curr = curr + len_to;\n\n        /* if replacement is smaller, need to shift */\n        if (len_to < len_from) {\n            if (((char *)(curr + len_from - len_to))[0] == '\\0')\n                curr[0] = '\\0';\n\n            /* shift of len_from - len_to */\n            for (curr_src = curr + len_from - len_to, i = 0;\n                 *curr_src != '\\0'; curr_src++, i++) {\n                curr[i] = *curr_src;\n                if (curr_src[1] == '\\0')\n                    curr[i + 1] = '\\0';\n            }\n        } else if (len_to > len_from)\n            /* copy saved data */\n            strcpy(curr, tmp_buff);\n    }\n    return 0;\n}\n\n/** escape every special character in a regex\n *\n * \\param dest      the string to copy the escaped regex to\n * \\param dest_size the size of dest (including the terminating char)\n * \\param src       the null terminated string representing the regex to\n *                  escape\n * \\param charset   a string that contains every char to escape\n *\n * \\return          0 on success, -error_code on error\n */\nint str_escape_charset(char *dest, size_t dest_size, const char *src,\n                       char *charset)\n{\n    size_t last_idx = 0;\n    size_t escape_size = 0;\n\n    for (size_t idx = 0; idx < strlen(src); idx++) {\n        /* Is this a special character ? */\n        char *token = strchr(charset, src[idx]);\n        if (token == NULL)\n            continue;\n\n        /* Is there enough space left to escape the next token? */\n        if (idx + escape_size + 2 > dest_size)\n            return -ENOBUFS;\n\n        /* Copy from last position in src to the current one */\n        strncpy(&dest[last_idx + escape_size], &src[last_idx],\n                idx - last_idx);\n        /* Add an escape char */\n        dest[idx + escape_size] = '\\\\';\n\n        /* Update internals */\n        escape_size++;\n        last_idx = idx;\n    }\n    if (strlen(src) + escape_size + 1 > dest_size)\n        return -ENOBUFS;\n\n    /* Copy the rest of src (including the terminating char) */\n    strcpy(&dest[last_idx + escape_size], &src[last_idx]);\n    return 0;\n}\n\n/**\n * extract relative path from full path\n */\nint relative_path(const char *fullpath, const char *root, char *rel_path)\n{\n    size_t len;\n    char rootcopy[1024];\n\n    if (!strcmp(root, fullpath)) {\n        /* arg is root */\n        rel_path[0] = '\\0';\n        return 0;\n    }\n\n    /* copy root path */\n    strcpy(rootcopy, root);\n    len = strlen(rootcopy);\n\n    /* add '/' if needed */\n    if ((len > 1) && (rootcopy[len - 1] != '/')) {\n        rootcopy[len] = '/';\n        rootcopy[len + 1] = '\\0';\n        len++;\n    }\n\n    /* test if the full path starts with the same dirs */\n    if (strncmp(rootcopy, fullpath, len)) {\n        DisplayLog(LVL_MAJOR, \"RelPath\",\n                   \"ERROR: file path '%s' is not under filesystem root '%s'\",\n                   fullpath, rootcopy);\n        return -EINVAL;\n    }\n\n    strcpy(rel_path, fullpath + len);\n    return 0;\n}\n\nvoid upperstr(char *str)\n{\n    int i = 0;\n\n    for (i = 0; str[i]; i++)\n        str[i] = toupper(str[i]);\n}\n\nvoid lowerstr(char *str)\n{\n    int i = 0;\n\n    for (i = 0; str[i]; i++)\n        str[i] = tolower(str[i]);\n}\n\nint path2id(const char *path, entry_id_t *id, const struct stat *st)\n{\n    int rc;\n\n#ifdef _HAVE_FID\n    rc = Lustre_GetFidFromPath(path, id);\n    if (rc)\n        return rc;\n#else\n    struct stat stn;\n\n    if (st == NULL) {\n        if (lstat(path, &stn)) {\n            rc = -errno;\n            DisplayLog(LVL_CRIT, __func__, \"ERROR: cannot stat '%s': %s\",\n                       path, strerror(-rc));\n            return rc;\n        }\n        st = &stn;\n    }\n    /* build id from dev/inode */\n    id->inode = st->st_ino;\n    id->fs_key = get_fskey();\n#endif\n    return 0;\n}\n\n#define MKDIR_TAG \"MkDir\"\nint mkdir_recurse(const char *full_path, mode_t mode, entry_id_t *dir_id)\n{\n    char path_copy[MAXPATHLEN];\n    const char *curr;\n    int rc;\n    int exists = 0;\n\n    if (strncmp(global_config.fs_path, full_path, strlen(global_config.fs_path))\n        != 0) {\n        DisplayLog(LVL_MAJOR, MKDIR_TAG,\n                   \"Error: '%s' in not under filesystem root '%s'\", full_path,\n                   global_config.fs_path);\n        return -EINVAL;\n    }\n    /* skip fs root */\n    curr = full_path + strlen(global_config.fs_path);\n\n    if (*curr == '\\0') {    /* full_path is root dir */\n        exists = 1;\n        goto get_id;\n    } else if (*curr != '/') {  /* slash expected */\n        DisplayLog(LVL_MAJOR, MKDIR_TAG,\n                   \"Error: '%s' in not under filesystem root '%s'\", full_path,\n                   global_config.fs_path);\n        return -EINVAL;\n    }\n\n    /* skip first slash */\n    curr++;\n\n    while ((curr = strchr(curr, '/')) != NULL) {\n        struct stat st;\n\n        /* if fullpath = '/a/b',\n         * curr = &(fullpath[2]);\n         * so, copy 2 chars to get '/a'.\n         * and set fullpath[2] = '\\0'\n         */\n        int path_len = curr - full_path + 1;\n\n        /* extract directory name */\n        rh_strncpy(path_copy, full_path, path_len);\n\n        /* Check if the target location exists before\n         * creating the directory.\n         * If the target is not a directory, the copy\n         * will fail anyhow with the appropriate error. */\n        if (lstat(path_copy, &st) != 0 && errno == ENOENT) {\n            DisplayLog(LVL_FULL, MKDIR_TAG, \"mkdir(%s)\", path_copy);\n            /* Test EEXIST because the directory may have been crated by\n             * another thread in the meantime. */\n            if (mkdir(path_copy, mode) != 0 && errno != EEXIST) {\n                rc = -errno;\n                DisplayLog(LVL_CRIT, MKDIR_TAG, \"mkdir(%s) failed: %s\",\n                           path_copy, strerror(-rc));\n                return rc;\n            }\n        }\n\n        curr++;\n    }\n\n    /* finally create last level of dir */\n    DisplayLog(LVL_FULL, MKDIR_TAG, \"mkdir(%s)\", full_path);\n    if ((mkdir(full_path, mode) != 0) && (errno != EEXIST)) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, MKDIR_TAG, \"mkdir(%s) failed: %s\", full_path,\n                   strerror(-rc));\n        return rc;\n    } else if (errno == EEXIST)\n        exists = 1;\n\n get_id:\n    /* must return directory id */\n    if (dir_id) {\n        rc = path2id(full_path, dir_id, NULL);\n        if (rc)\n            return rc;\n    }\n\n    if (exists)\n        return -EEXIST;\n    else\n        return 0;\n}\n\n/** create parent directory, and return its id (even if it already exists) */\nint create_parent_of(const char *child_path, entry_id_t *p_parent_id)\n{\n    char tmp[RBH_PATH_MAX];\n    char *destdir;\n\n    /* copy to tmp buffer as dirname modifies its argument */\n    strcpy(tmp, child_path);\n    /* extract parent dir path */\n    destdir = dirname(tmp);\n    if (destdir == NULL) {\n        DisplayLog(LVL_CRIT, MKDIR_TAG,\n                   \"Error extracting directory path of '%s'\", child_path);\n        return -EINVAL;\n    }\n\n    /* create the directory */\n    return mkdir_recurse(destdir, 0750, p_parent_id);\n}\n\n#define CREAT_TAG \"Create\"\n/* create an object with the given attributes */\nint create_from_attrs(const attr_set_t *attrs_in,\n                      attr_set_t *attrs_out,\n                      entry_id_t *new_id, bool overwrite, bool setstripe)\n{\n    char link[RBH_PATH_MAX] = \"\";\n    const char *fspath;\n    int rc;\n    struct stat st_dest;\n    int fd;\n    mode_t mode_create = 0;\n    bool set_mode = false;\n\n    if (!ATTR_MASK_TEST(attrs_in, fullpath)\n        || !ATTR_MASK_TEST(attrs_in, type)) {\n        DisplayLog(LVL_MAJOR, CREAT_TAG,\n                   \"Missing mandatory attribute to create entry\");\n        return -EINVAL;\n    }\n    fspath = ATTR(attrs_in, fullpath);\n\n    /* initialize out attrs */\n    ATTR_MASK_INIT(attrs_out);\n\n    /* first create parent and retrieve parent id */\n    rc = create_parent_of(fspath, &ATTR(attrs_out, parent_id));\n    if (rc != 0 && rc != -EEXIST)\n        return rc;\n    else\n        ATTR_MASK_SET(attrs_out, parent_id);\n\n    if (!strcasecmp(ATTR(attrs_in, type), STR_TYPE_DIR)) {\n        /* entry is a directory */\n        if (ATTR_MASK_TEST(attrs_in, mode))\n            mode_create = ATTR(attrs_in, mode);\n        else\n            mode_create = 750;\n\n        /* then create the directory itself */\n        rc = mkdir(fspath, mode_create) ? -errno : 0;\n        if (rc != 0 && rc != -EEXIST)\n            return rc;\n        else if (rc == -EEXIST)\n            set_mode = true;\n    } else if (!strcasecmp(ATTR(attrs_in, type), STR_TYPE_LINK)) {\n        /* entry is a symlink */\n\n        if (!ATTR_MASK_TEST(attrs_in, link)) {\n            DisplayLog(LVL_MAJOR, CREAT_TAG,\n                       \"Missing mandatory attribute 'link' to create link\");\n            return -EINVAL;\n        }\n\n        if (symlink(ATTR(attrs_in, link), fspath) != 0) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, CREAT_TAG,\n                       \"Error creating symlink %s->\\\"%s\\\" in filesystem: %s\",\n                       fspath, link, strerror(-rc));\n            return rc;\n        }\n        /* can't set mode on a symlink */\n    } else if (!strcasecmp(ATTR(attrs_in, type), STR_TYPE_FILE)) {\n        int created = false;\n\n        if (ATTR_MASK_TEST(attrs_in, mode))\n            mode_create = ATTR(attrs_in, mode);\n        else\n            mode_create = 0640; /* default */\n\n#ifdef _LUSTRE\n        if (setstripe) {\n            /* create the file with the appropriate stripe in Lustre */\n            if (ATTR_MASK_TEST(attrs_in, stripe_info)) {\n                rc = CreateStriped(fspath, &ATTR(attrs_in, stripe_info),\n                                   overwrite);\n                if (rc == 0 || rc == -EEXIST) {\n                    created = true;\n                    set_mode = true;\n                } else\n                    DisplayLog(LVL_MAJOR, CREAT_TAG,\n                               \"setstripe failed: trying to create file with default striping\");\n            }\n        } else {\n            /* create with no stripe */\n            rc = CreateWithoutStripe(fspath, mode_create & 07777, overwrite);\n            if (rc == 0) {\n                created = true;\n                set_mode = false;\n            } else if (rc == -EEXIST) {\n                created = true;\n                set_mode = true;\n            } else\n                DisplayLog(LVL_MAJOR, CREAT_TAG,\n                           \"create(O_LOV_DELAY_CREATE) failed: trying to create file with default striping\");\n        }\n#endif\n        if (!created) {\n            fd = creat(fspath, mode_create & 07777);\n            if (fd < 0) {\n                rc = -errno;\n                DisplayLog(LVL_CRIT, CREAT_TAG,\n                           \"ERROR: couldn't create '%s': %s\", fspath,\n                           strerror(-rc));\n                return rc;\n            } else\n                close(fd);\n        }\n\n        /* set times */\n        if (ATTR_MASK_TEST(attrs_in, last_mod)) {\n            struct utimbuf utb;\n            utb.modtime = ATTR(attrs_in, last_mod);\n\n            if (ATTR_MASK_TEST(attrs_in, last_access))\n                utb.actime = ATTR(attrs_in, last_access);\n            else\n                utb.actime = utb.modtime;\n\n            /* set the same mtime as in the DB */\n            DisplayLog(LVL_FULL, CREAT_TAG,\n                       \"Restoring times for '%s': atime=%lu, mtime=%lu\", fspath,\n                       utb.actime, utb.modtime);\n            if (utime(fspath, &utb))\n                DisplayLog(LVL_MAJOR, CREAT_TAG,\n                           \"Warning: couldn't restore times for '%s': %s\",\n                           fspath, strerror(errno));\n        }\n    } else {\n        /* type not supported */\n        DisplayLog(LVL_CRIT, CREAT_TAG,\n                   \"Error: cannot restore entries of type '%s' (%s)\",\n                   ATTR(attrs_in, type), fspath);\n        return -ENOTSUP;\n    }\n\n    /* set owner, group */\n    if (ATTR_MASK_TEST(attrs_in, uid) || ATTR_MASK_TEST(attrs_in, gid)) {\n        uid_t uid = -1;\n        gid_t gid = -1;\n        char buff[4096];\n\n        if (ATTR_MASK_TEST(attrs_in, uid)) {\n            if (global_config.uid_gid_as_numbers) {\n                uid = ATTR(attrs_in, uid).num;\n            } else {\n                struct passwd pw;\n                struct passwd *p_pw;\n\n                if ((getpwnam_r(ATTR(attrs_in, uid).txt, &pw, buff, 4096, &p_pw)\n                     != 0)\n                    || (p_pw == NULL)) {\n                    DisplayLog(LVL_MAJOR, CREAT_TAG,\n                               \"Warning: couldn't resolve uid for user '%s'\",\n                               ATTR(attrs_in, uid).txt);\n                    uid = -1;\n                } else\n                    uid = p_pw->pw_uid;\n            }\n        }\n\n        if (ATTR_MASK_TEST(attrs_in, gid)) {\n            if (global_config.uid_gid_as_numbers) {\n                gid = ATTR(attrs_in, gid).num;\n            } else {\n                struct group gr;\n                struct group *p_gr;\n\n                if ((getgrnam_r(ATTR(attrs_in, gid).txt, &gr, buff, 4096, &p_gr)\n                     != 0)\n                    || (p_gr == NULL)) {\n                    DisplayLog(LVL_MAJOR, CREAT_TAG,\n                               \"Warning: couldn't resolve gid for group '%s'\",\n                               ATTR(attrs_in, gid).txt);\n                    gid = -1;\n                } else\n                    gid = p_gr->gr_gid;\n            }\n        }\n\n        DisplayLog(LVL_FULL, CREAT_TAG,\n                   \"Restoring owner/group for '%s': uid=%u, gid=%u\", fspath,\n                   uid, gid);\n\n        if (lchown(fspath, uid, gid)) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, CREAT_TAG,\n                       \"Warning: cannot set owner/group for '%s': %s\", fspath,\n                       strerror(-rc));\n        } else {\n            /* According to chown(2) manual: chown may clear sticky bits even\n             * if root does it, so, we must set the mode again if it contains\n             * special bits */\n            if (!set_mode && (mode_create & 07000))\n                set_mode = true;\n        }\n    }\n\n    if (set_mode) {\n        /* set the same mode as in the backend */\n        DisplayLog(LVL_FULL, CREAT_TAG, \"Restoring mode for '%s': mode=%#o\",\n                   fspath, mode_create & 07777);\n        if (chmod(fspath, mode_create & 07777))\n            DisplayLog(LVL_MAJOR, CREAT_TAG,\n                       \"Warning: couldn't restore mode for '%s': %s\", fspath,\n                       strerror(errno));\n    }\n\n    if (lstat(fspath, &st_dest)) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, CREAT_TAG,\n                   \"ERROR: lstat() failed on restored entry '%s': %s\", fspath,\n                   strerror(-rc));\n        return rc;\n    }\n\n    rc = path2id(fspath, new_id, &st_dest);\n    if (rc)\n        return rc;\n\n    /* update with the new attributes */\n    stat2rbh_attrs(&st_dest, attrs_out, true);\n\n    /* copy missing info: path, name, link, ... */\n    strcpy(ATTR(attrs_out, fullpath), fspath);\n    ATTR_MASK_SET(attrs_out, fullpath);\n\n    char *name = strrchr(fspath, '/');\n    if (name) {\n        name++;\n        strcpy(ATTR(attrs_out, name), name);\n        ATTR_MASK_SET(attrs_out, name);\n    }\n    ATTR(attrs_out, path_update) = time(NULL);\n    ATTR_MASK_SET(attrs_out, path_update);\n    ATTR(attrs_out, md_update) = time(NULL);\n    ATTR_MASK_SET(attrs_out, md_update);\n\n    if (S_ISLNK(st_dest.st_mode)) {\n        strcpy(ATTR(attrs_out, link), link);\n        ATTR_MASK_SET(attrs_out, link);\n    }\n#ifdef _LUSTRE\n    /* get new stripe */\n    if (S_ISREG(st_dest.st_mode)) {\n        /* get the new stripe info */\n        if (File_GetStripeByPath(fspath,\n                                 &ATTR(attrs_out, stripe_info),\n                                 &ATTR(attrs_out, stripe_items)) == 0) {\n            ATTR_MASK_SET(attrs_out, stripe_info);\n            ATTR_MASK_SET(attrs_out, stripe_items);\n        }\n    }\n#endif\n    return 0;\n}\n\nenum path_check_return path_check_update(const entry_id_t *p_id,\n                                         const char *fid_path,\n                                         attr_set_t *p_attrs,\n                                         attr_mask_t attr_mask)\n{\n#ifndef _HAVE_FID\n    return PCR_NO_CHANGE;\n#else\n    int rc;\n    bool updated = false;\n\n    if (attr_mask.std & (ATTR_MASK_name | ATTR_MASK_parent_id)) {\n        rc = Lustre_GetNameParent(fid_path, 0, &ATTR(p_attrs, parent_id),\n                                  ATTR(p_attrs, name), RBH_NAME_MAX);\n        if (rc == 0) {\n            ATTR_MASK_SET(p_attrs, name);\n            ATTR_MASK_SET(p_attrs, parent_id);\n            /* update path refresh time */\n            ATTR_MASK_SET(p_attrs, path_update);\n            ATTR(p_attrs, path_update) = time(NULL);\n            updated = true;\n        } else if (rc == -ENODATA) {\n            /* Entry has no path in namespace. It is likely a volatile,\n             * and should be ignored. */\n            DisplayLog(LVL_DEBUG, \"PatchCheck\", \"Entry \"DFID\" has no path. \"\n                       \"It is likely a volatile\", PFID(p_id));\n            return PCR_ORPHAN;\n        } else if (rc != -ENOENT) {\n            DisplayLog(LVL_MAJOR, \"PathCheck\",\n                       \"Failed to get parent+name for \" DFID \": %s\", PFID(p_id),\n                       strerror(-rc));\n        }\n    }\n\n    /* if fullpath is in the policy, get the fullpath */\n    if (attr_mask.std & ATTR_MASK_fullpath) {\n        rc = Lustre_GetFullPath(p_id, ATTR(p_attrs, fullpath), RBH_PATH_MAX);\n        if (rc == 0) {\n            ATTR_MASK_SET(p_attrs, fullpath);\n            updated = true;\n        } else if (rc != -ENOENT) {\n            DisplayLog(LVL_MAJOR, \"PathCheck\",\n                       \"Failed to retrieve fullpath for \" DFID \": %s\",\n                       PFID(p_id), strerror(-rc));\n        }\n    }\n    return updated ? PCR_UPDATED : PCR_NO_CHANGE;\n#endif\n}\n\n/* Find the numerical user ID (UID) for a given user name, which is\n * either a real name or a string containing a number.\n * Return 0 on success, and non-zero on error. */\nint set_uid_val(const char *username, db_type_u *val)\n{\n    long uid;\n    char *endptr;\n\n    if (!global_config.uid_gid_as_numbers) {\n        val->val_str = username;\n        return 0;\n    }\n\n    if (WILDCARDS_IN(username)) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Wilcards not allowed in user name\");\n        return -1;\n    }\n\n    /* The name could be a number already. */\n    errno = 0;\n    uid = strtol(username, &endptr, 0);\n\n    if ((errno == ERANGE && (uid == LONG_MAX || uid == LONG_MIN)) ||\n        (errno != 0 && uid == 0) || endptr == username) {\n        /* Not a number. */\n        struct passwd pw;\n        struct passwd *result;\n        char buff[4096];\n\n        if (getpwnam_r(username, &pw, buff, sizeof(buff), &result) == 0) {\n            val->val_int = pw.pw_uid;\n            return 0;\n        } else {\n            DisplayLog(LVL_CRIT, __func__, \"couldn't resolve uid for user '%s'\",\n                       username);\n            return -1;\n        }\n    }\n\n    if (uid < 0) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Given UID is negative (%ld)\", uid);\n        return -1;\n    }\n\n    if (uid > UINT_MAX) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Given UID is too big (%ld)\", uid);\n        return -1;\n    }\n\n    val->val_int = uid;\n    return 0;\n}\n\n/* Find the numerical group ID (GID) for a given group name, which is\n * either a real name or a string containing a number.\n * Return 0 on success, and non-zero on error. */\nint set_gid_val(const char *groupname, db_type_u *val)\n{\n    long gid;\n    char *endptr;\n\n    if (!global_config.uid_gid_as_numbers) {\n        val->val_str = groupname;\n        return 0;\n    }\n\n    if (WILDCARDS_IN(groupname)) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Wilcards not allowed in group name\");\n        return -1;\n    }\n\n    /* The name could be a number already. */\n    errno = 0;\n    gid = strtol(groupname, &endptr, 0);\n\n    if ((errno == ERANGE && (gid == LONG_MAX || gid == LONG_MIN)) ||\n        (errno != 0 && gid == 0) || endptr == groupname) {\n        /* Not a number. */\n        struct group grp;\n        struct group *result;\n        char buff[4096];\n\n        if (getgrnam_r(groupname, &grp, buff, sizeof(buff), &result) == 0) {\n            val->val_int = grp.gr_gid;\n            return 0;\n        } else {\n            DisplayLog(LVL_CRIT, __func__,\n                       \"couldn't resolve gid for group '%s'\", groupname);\n            return -1;\n        }\n    }\n\n    if (gid < 0) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Given GID is negative (%ld)\", gid);\n        return -1;\n    }\n\n    if (gid > UINT_MAX) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: Given GID is too big (%ld)\", gid);\n        return -1;\n    }\n\n    val->val_int = gid;\n    return 0;\n}\n\n/* Returns a printable string for a UID or GID, whether it's a number\n * or an actual string. */\nconst char *id_as_str(db_type_u *val)\n{\n    static __thread char buf[20];\n\n    if (!global_config.uid_gid_as_numbers)\n        return val->val_str;\n\n    sprintf(buf, \"%d\", val->val_int);\n    return buf;\n}\n"
  },
  {
    "path": "src/common/rbh_modules.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2014-2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file   rbh_modules.c\n * \\author Henri Doreau\n * \\brief  Dynamic modules management\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_modules.h\"\n#include \"global_config.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n\n#include <ctype.h>\n#include <dlfcn.h>\n#include <errno.h>\n#include <string.h>\n#include <assert.h>\n\n/**\n * Logging domain tag\n */\n#define MODULE_TAG  \"mod_mgt\"\n\n/**\n * Maximum module name, including final null terminator.\n */\n#define MAX_MOD_NAMELEN 128\n\n/**\n * Global module list and associated size.\n * Re-allocated when extended.\n */\nstatic rbh_module_t *mod_list;\nstatic int           mod_count;\n\nstatic const char *module_get_name(const rbh_module_t *mod)\n{\n    if (mod == NULL || mod->mod_ops.mod_get_name == NULL)\n        return NULL;\n\n    return mod->mod_ops.mod_get_name();\n}\n\nstatic int module_get_version(const rbh_module_t *mod)\n{\n    if (mod == NULL || mod->mod_ops.mod_get_version == NULL)\n        return 0;\n\n    return mod->mod_ops.mod_get_version();\n}\n\n/**\n * Load a given symbol from dlopened library.\n *\n * \\param[in]  mod         Module descriptor.\n * \\param[in]  sym_name    Name of symbol to be loaded.\n * \\param[out] sym_addr    Points to an address where the symbol is to be\n *                         stored.\n * \\param[in]  required    Indicate if the symbol definition is required.\n *\n * \\return 0 on success, negative error code on failure\n */\nstatic int module_sym_load(rbh_module_t *mod, const char *sym_name,\n                           void **sym_addr, bool required)\n{\n    char *errstr;\n\n    *sym_addr = dlsym(mod->sym_hdl, sym_name);\n\n    errstr = dlerror();\n    if (errstr != NULL) {\n        if (required) {\n            DisplayLog(LVL_CRIT, MODULE_TAG,\n                       \"Cannot load %s from module %s: %s\",\n                       sym_name, mod->name, errstr);\n            return -EINVAL;\n        }\n        /* not mandatory, only display in DEBUG level */\n        DisplayLog(LVL_DEBUG, MODULE_TAG,\n                   \"Module '%s': optional symbol '%s' not found: %s\",\n                   mod->name, sym_name, errstr);\n    }\n\n    return 0;\n}\n\n/**\n * dlopen() library and initialize module descriptor accordingly\n *\n * \\param[in]   libfile Path to dlopen\n * \\param[out]  mod     Robinhood module descriptor to initialize\n *\n * \\return 0 on success, negative error code on failure\n */\nstatic int module_load_from_file(const char *libfile, rbh_module_t *mod)\n{\n    int rc;\n    int i;\n    struct symbol_descr {\n      const char  *sym_name; /**< Exposed symbol name */\n      void        *sym_addr; /**< Destination address */\n      bool         sym_reqd; /**< Whether the symbol is required */\n    } mod_symbols[] = {\n      {\"mod_get_name\",           &mod->mod_ops.mod_get_name,           true},\n      {\"mod_get_version\",        &mod->mod_ops.mod_get_version,        true},\n      {\"mod_get_status_manager\", &mod->mod_ops.mod_get_status_manager, false},\n      {\"mod_get_action\",         &mod->mod_ops.mod_get_action,         false},\n      {\"mod_get_scheduler\",      &mod->mod_ops.mod_get_scheduler,      false},\n    };\n\n    if (libfile == NULL)\n        return -EINVAL;\n\n    memset(mod, 0, sizeof(*mod));\n\n    mod->sym_hdl = dlopen(libfile, RTLD_NOW | RTLD_LOCAL | RTLD_DEEPBIND);\n    if (mod->sym_hdl == NULL) {\n        DisplayLog(LVL_CRIT, MODULE_TAG, \"Cannot dlopen() '%s': %s\",\n                   libfile, dlerror());\n        return -EINVAL;\n    }\n\n    /* Use the filename as module name until loading is done and successful */\n    mod->name = libfile;\n\n    for (i = 0; i < ARRAY_SIZE(mod_symbols); i++) {\n        struct symbol_descr *sym = &mod_symbols[i];\n\n        rc = module_sym_load(mod, sym->sym_name, sym->sym_addr, sym->sym_reqd);\n        if (rc)\n            goto err_out;\n\n    }\n\n    /* Get direct reference to the module name for faster accesses */\n    mod->name = module_get_name(mod);\n    mod->version = module_get_version(mod);\n    if (mod->version != RBH_MODULE_VERSION) {\n        DisplayLog(LVL_CRIT, MODULE_TAG, \"Module '%s': incompatible version. \"\n                   \"version %d != expected version %d\", mod->name, mod->version,\n                   RBH_MODULE_VERSION);\n        rc = -EPROTO;\n        goto err_out;\n    }\n\n    DisplayLog(LVL_DEBUG, MODULE_TAG, \"Successfully loaded module '%s'\",\n               mod->name);\n\n    return 0;\n\nerr_out:\n    dlclose(mod->sym_hdl);\n    mod->sym_hdl = NULL;\n    return rc;\n}\n\n/**\n * Build module library name from its reduced form.\n * \"lhsm\" -> \"librbh_mod_lhsm.so\".\n * Directory handling is to be defined by the LD_LIBRARY_PATH if needed.\n *\n * \\param[out]  dst     Destination buffer, of at least MAX_MOD_NAMELEN bytes\n * \\param[in]   name    Module short name\n *\n * \\return 0 on success, negative error code on failure\n */\nstatic int module_fullname_build(char *dst, const char *name)\n{\n    int rc;\n    int i;\n\n    if (name == NULL)\n        return -EINVAL;\n\n    rc = snprintf(dst, MAX_MOD_NAMELEN - 1, \"librbh_mod_%s.so\", name);\n    if (rc >= MAX_MOD_NAMELEN)\n        return -ENAMETOOLONG;\n\n    for (i = 0; i < rc; i++)\n        dst[i] = tolower(dst[i]);\n\n    return 0;\n}\n\n/**\n * Resize module list and invoke module initialization code\n *\n * \\param[in]   name    Module short name\n *\n * \\return 0 on success, negative error code on failure\n */\nstatic int module_load(const char *name)\n{\n    char            mod_name[MAX_MOD_NAMELEN];\n    rbh_module_t   *new_objects;\n    int             rc;\n\n    rc = module_fullname_build(mod_name, name);\n    if (rc < 0)\n        return rc;\n\n    assert(mod_count >= 0);\n\n    new_objects = (rbh_module_t *) calloc(mod_count + 1, sizeof(rbh_module_t));\n    if (new_objects == NULL)\n        return -ENOMEM;\n\n    rc = module_load_from_file(mod_name, &new_objects[0]);\n    if (rc < 0)\n        goto err_out;\n\n    memcpy(&new_objects[1], mod_list, mod_count * sizeof(rbh_module_t));\n    mod_count++;\n\n    free(mod_list);\n    mod_list = new_objects;\n\n    return 0;\n\n err_out:\n    free(new_objects);\n    return rc;\n}\n\n/**\n * Release resources associated to a single module. Note that the mod_list\n * is not resized.\n *\n * \\param[in, out]  mod Module descriptor to release\n *\n * \\return 0 on success, negative error code on failure\n */\nstatic int module_unload(rbh_module_t *mod)\n{\n    if (mod->name != NULL)\n        DisplayLog(LVL_DEBUG, MODULE_TAG, \"Unloading module %s\", mod->name);\n\n    if (mod->sym_hdl == NULL) {\n        DisplayLog(LVL_VERB, MODULE_TAG, \"Module already unloaded, ignoring\");\n        return 0;   /* -EALREADY ? */\n    }\n\n    dlclose(mod->sym_hdl);\n    mod->sym_hdl = NULL;\n    return 0;\n}\n\n/**\n * Release all resources associated to dynamic modules\n *\n * \\return 0 on success, negative error code on failure\n */\nint module_unload_all(void)\n{\n    int i;\n    int rc;\n    int rc_save = 0;\n\n    assert(mod_count >= 0);\n\n    for (i = 0; i < mod_count; i++) {\n        rc = module_unload(&mod_list[i]);\n        if (rc != 0 && rc_save == 0)\n            rc_save = rc;\n    }\n\n    free(mod_list);\n    mod_count = 0;\n\n    return rc_save;\n}\n\n/**\n * Get a module descriptor, load the corresponding module if needed.\n *\n * \\param[in]   mod_name    Module short name\n *\n * \\return NULL on error, pointer to an allocated/initialized module\n *         descriptor on success.\n */\nstatic rbh_module_t *module_get(const char *mod_name)\n{\n    int i;\n    int rc;\n\n    assert(mod_count >= 0);\n\n again:\n    for (i = 0; i < mod_count; i++) {\n        if (strcasecmp(mod_list[i].name, mod_name) == 0)\n            return &mod_list[i];\n    }\n\n    rc = module_load(mod_name);\n    if (rc == 0)\n        goto again;\n\n    return NULL;\n}\n\naction_func_t module_get_action(const char *name)\n{\n    char             mod_name[MAX_MOD_NAMELEN];\n    char            *prefix;\n    rbh_module_t    *mod;\n\n    prefix = strchr(name, '.');\n    if (prefix == NULL)\n        return NULL;\n\n    memcpy(mod_name, name, prefix - name);\n    mod_name[prefix - name] = '\\0';\n\n    mod = module_get(mod_name);\n    if (mod == NULL || mod->mod_ops.mod_get_action == NULL)\n        return NULL;\n\n    return mod->mod_ops.mod_get_action(name);\n}\n\nstatus_manager_t *module_get_status_manager(const char *name)\n{\n    rbh_module_t    *mod;\n\n    mod = module_get(name);\n    if (mod == NULL || mod->mod_ops.mod_get_status_manager == NULL)\n        return NULL;\n\n    return mod->mod_ops.mod_get_status_manager();\n}\n\naction_scheduler_t *module_get_scheduler(const char *name)\n{\n    char             mod_name[MAX_MOD_NAMELEN];\n    char            *prefix;\n    rbh_module_t    *mod;\n\n    prefix = strchr(name, '.');\n    if (prefix == NULL)\n        return NULL;\n\n    memcpy(mod_name, name, prefix - name);\n    mod_name[prefix - name] = '\\0';\n\n    mod = module_get(mod_name);\n    if (mod == NULL || mod->mod_ops.mod_get_scheduler == NULL)\n        return NULL;\n\n    return mod->mod_ops.mod_get_scheduler(name);\n}\n"
  },
  {
    "path": "src/common/rbh_params.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * @file   rbh_params.c\n * @author Thomas Leibovici\n * @author Henri Doreau\n * @brief  Handling a generic list of key/values.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_params.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n\n#include <malloc.h>\n#include <string.h>\n#include <errno.h>\n\n#define PARAMS_TAG \"rbh_params\"\n\nvoid rbh_params_free(struct rbh_params *params)\n{\n    if (params->param_set == NULL)\n        return;\n\n    g_hash_table_destroy(params->param_set);\n    params->param_set = NULL;\n}\n\nconst char *rbh_param_get(const struct rbh_params *params, const char *key)\n{\n    if (params == NULL || params->param_set == NULL)\n        return NULL;\n\n    return g_hash_table_lookup(params->param_set, key);\n}\n\nint rbh_param_set(struct rbh_params *params, const char *key,\n                  const char *value, bool override)\n{\n    if (params->param_set == NULL) {\n        params->param_set = g_hash_table_new_full(g_str_hash, g_str_equal, free,\n                                                  free);\n        if (params->param_set == NULL)\n            return -ENOMEM;\n    }\n\n    if (!override && (g_hash_table_lookup(params->param_set, key) != NULL))\n        return -EEXIST;\n\n    /* use ght_replace, so that previous key and values are freed */\n    g_hash_table_replace(params->param_set, strdup(key), strdup(value));\n\n    return 0;\n}\n\nint rbh_list2params(struct rbh_params *params, const char **list,\n                    bool key_values)\n{\n    const char **c;\n\n    if (unlikely(params == NULL || list == NULL))\n        return -EINVAL;\n\n    /* allowate the hash table if necessary */\n    if (params->param_set == NULL) {\n        /* don't provide a free function for value as we only push\n         * static const strings to it */\n        params->param_set = g_hash_table_new_full(g_str_hash, g_str_equal, free,\n                                                  key_values ? free : NULL);\n        if (params->param_set == NULL)\n            return -ENOMEM;\n    }\n\n    if (key_values)\n        for (c = list; *c != NULL; c += 2)\n            g_hash_table_replace(params->param_set, strdup(c[0]), strdup(c[1]));\n    else\n        for (c = list; *c != NULL; c++)\n            /* Value must not be NULL */\n            g_hash_table_replace(params->param_set, strdup(*c), \"\");\n\n    return 0;\n}\n\n/** argument type for serialization functions */\nstruct serialize_args {\n    GString                 *out_str;\n    const struct rbh_params *exclude_set;\n    rbh_param_flags_e        flags;\n};\n\n/** escape the given delimiter in source string */\nstatic char *escape_delim(char *str, char delim, bool *free_it)\n{\n    char *c;\n\n    if (strchr(str, delim) == NULL) {\n        *free_it = false;\n        return str;\n    }\n\n    c = malloc(2 * strlen(str) + 1);\n    strcpy(c, str);\n    str = c;\n    *free_it = true;\n\n    while ((c = strchr(c, delim)) != NULL) {\n        /* shift the end of the string (including delimiter) */\n        memmove(c + 1, c, strlen(c) + 1);\n        /* escape it */\n        *c = '\\\\';\n        c += 2;\n    }\n\n    return str;\n}\n\n/** append a parameter to a CSV parameter serialization */\nstatic int param2csv(const char *key, const char *val, void *udata)\n{\n    struct serialize_args *args = (struct serialize_args *)udata;\n    bool free_key = false;\n    bool free_val = false;\n\n    /* skip ignored attrs */\n    if (rbh_param_get(args->exclude_set, key) != NULL)\n        return 0;\n\n    /* if there is a comma in key or value, escape it */\n    key = escape_delim((char *)key, ',', &free_key);\n    val = escape_delim((char *)val, ',', &free_val);\n\n    /* add comma delimiter if needed */\n    if (!GSTRING_EMPTY(args->out_str))\n        g_string_append(args->out_str,\n                        (args->flags & RBH_PARAM_COMPACT) ? \",\" : \", \");\n\n    /* append key=value */\n    g_string_append_printf(args->out_str, \"%s=%s\", key, val);\n\n    if (free_key)\n        free((char *)key);\n    if (free_val)\n        free((char *)val);\n\n    return 0;\n}\n\nint rbh_params_serialize(const struct rbh_params *params,\n                         GString                 *str,\n                         const struct rbh_params *exclude_set,\n                         rbh_param_flags_e        flags)\n{\n    struct serialize_args args = {\n        .out_str        = str,\n        .exclude_set    = exclude_set,\n        .flags          = flags\n    };\n\n    if (!(flags & RBH_PARAM_CSV))\n        return -ENOTSUP;\n\n    return rbh_params_foreach(params, param2csv, (void *)&args);\n}\n\nint rbh_params_foreach(const struct rbh_params *params, rbh_params_iter_t cb,\n                       void *udata)\n{\n    GHashTableIter  iter;\n    gpointer        key;\n    gpointer        value;\n    int             rc = 0;\n\n    if (params == NULL)\n        return -EINVAL;\n\n    if (params->param_set == NULL)\n        return 0;\n\n    g_hash_table_iter_init(&iter, params->param_set);\n    while (g_hash_table_iter_next(&iter, &key, &value)) {\n        rc = cb((const char *)key, (const char *)value, udata);\n        if (rc != 0)\n            break;\n    }\n\n    return rc;\n}\n\n/** helper callback to duplicate a parameter set */\nstatic int add_cb(const char *key, const char *val, void *udata)\n{\n    return rbh_param_set((action_params_t *)udata, key, val, true);\n}\n\nint rbh_params_copy(struct rbh_params *tgt, const struct rbh_params *src)\n{\n    return rbh_params_foreach(src, add_cb, tgt);\n}\n"
  },
  {
    "path": "src/common/uidgidcache.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n * Copyright 2016 Cray Inc. All rights reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n *\n * \\file    $RCSfile: uidgidcache.c,v $\n * \\author  $Author: leibovic $\n * \\date    $Date: 2008/02/15 10:37:38 $\n * \\brief   Cache user and groups relatives information.\n *\n * Cache user and groups relative information.\n *\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"uidgidcache.h\"\n#include \"RW_Lock.h\"\n#include \"rbh_logs.h\"\n#include \"Memory.h\"\n\n#if HAVE_STRING_H\n#   include <string.h>\n#endif\n\n#include <stdio.h>\n#include <unistd.h>\n\n#include <glib.h>\n\n/* -------------- parameters ------------ */\n\n/* init buffer size for storing alt groups for a user */\nsize_t alt_groups_sz;\n\n/* init buffer size for group members for a group */\nsize_t group_memb_sz;\n\n#define LOGTAG  \"UidGidCache\"\n\n/* -------------- cache and hashtables management ------------ */\n\ntypedef struct pw_cacheent__ {\n    struct passwd pw;\n} pw_cacheent_t;\n\ntypedef struct gr_cacheent__ {\n    struct group gr;\n} gr_cacheent_t;\n\n/* cache of PW entries */\nstatic struct {\n    rw_lock_t lock;\n    GHashTable *cache;\n} pw_hash;\n\n/* cache of group entries */\nstatic struct {\n    rw_lock_t lock;\n    GHashTable *cache;\n} gr_hash;\n\n/* stats about the cache */\nunsigned int pw_nb_set = 0;\nunsigned int pw_nb_get = 0;\nunsigned int gr_nb_set = 0;\nunsigned int gr_nb_get = 0;\n\n/* ------------ exported functions ------------ */\n\n/* Initialization of pwent and grent caches */\nint InitUidGid_Cache(void)\n{\n    long res;\n\n    /* initialize locks on hash table slots */\n    rw_lock_init(&pw_hash.lock);\n    pw_hash.cache = g_hash_table_new(NULL, NULL);\n\n    rw_lock_init(&gr_hash.lock);\n    gr_hash.cache = g_hash_table_new(NULL, NULL);\n\n    /* Try to size the memory needed to get the strings for getpwuid_r\n     * and getgrgid_r. */\n    res = sysconf(_SC_GETPW_R_SIZE_MAX);\n    if (res == -1 || res > 4096)\n        alt_groups_sz = 4096;\n    else\n        alt_groups_sz = res;\n\n    res = sysconf(_SC_GETGR_R_SIZE_MAX);\n    if (res == -1 || res > 4096)\n        group_memb_sz = 4096;\n    else\n        group_memb_sz = res;\n\n    return 0;\n}\n\n/* get user name for the given uid */\nconst struct passwd *GetPwUid(uid_t owner)\n{\n    struct passwd *result;\n    pw_cacheent_t *p_pwcacheent;\n    pw_cacheent_t *entry2;\n    int            rc;\n    char          *buffer;\n    int            buf_size;\n\n    /* is the entry in the cache? */\n    P_r(&pw_hash.lock);\n    p_pwcacheent =\n        g_hash_table_lookup(pw_hash.cache, (void *)(uintptr_t) owner);\n    V_r(&pw_hash.lock);\n\n    if (p_pwcacheent)\n        pw_nb_get++;\n\n    /* if no, allocate a pw cache entry\n     * and ask the system to fill it */\n    if (p_pwcacheent == NULL) {\n        /* entry allocation */\n        p_pwcacheent = calloc(1, sizeof(pw_cacheent_t));\n        if (p_pwcacheent == NULL)\n            return NULL;\n\n        buf_size = alt_groups_sz;\n        buffer = malloc(buf_size);\n        if (buffer == NULL)\n            goto out_free;\n\n retry:\n        rc = getpwuid_r(owner, &p_pwcacheent->pw, buffer, buf_size, &result);\n        if (rc != 0 || result == NULL) {\n            /* try with larger buff */\n            if (rc == ERANGE) {\n                buf_size *= 2;\n                DisplayLog(LVL_FULL, LOGTAG,\n                           \"got ERANGE error from getpwuid_r: trying with buf_size=%u\",\n                           buf_size);\n                buffer = realloc(buffer, buf_size);\n                if (buffer == NULL)\n                    goto out_free;\n                else\n                    goto retry;\n            }\n            if (rc != 0 && rc != ENOENT && rc != ESRCH &&\n                rc != EBADF && rc != EPERM)\n                DisplayLog(LVL_CRIT, LOGTAG, \"ERROR %d in getpwuid_r: %s\",\n                           rc, strerror(rc));\n            goto out_free;\n        }\n\n        /* We only care about the name */\n        p_pwcacheent->pw.pw_name = strdup(p_pwcacheent->pw.pw_name);\n        if (p_pwcacheent->pw.pw_name == NULL)\n            goto out_free;\n\n        p_pwcacheent->pw.pw_passwd = NULL;\n        p_pwcacheent->pw.pw_gecos = NULL;\n        p_pwcacheent->pw.pw_dir = NULL;\n        p_pwcacheent->pw.pw_shell = NULL;\n\n        free(buffer);\n\n        /* insert it to hash table */\n        P_w(&pw_hash.lock);\n\n        /* Another thread may have inserted it in the meantime. Check\n         * again. */\n        entry2 = g_hash_table_lookup(pw_hash.cache, (void *)(uintptr_t) owner);\n        if (entry2) {\n            free(p_pwcacheent->pw.pw_name);\n            free(p_pwcacheent);\n            p_pwcacheent = entry2;\n            pw_nb_get++;\n        } else {\n            g_hash_table_insert(pw_hash.cache,\n                                (void *)(uintptr_t) p_pwcacheent->pw.pw_uid,\n                                p_pwcacheent);\n            pw_nb_set++;\n        }\n        V_w(&pw_hash.lock);\n    }\n\n    return &p_pwcacheent->pw;\n\n out_free:\n    if (p_pwcacheent != NULL) {\n        free(buffer);\n        free(p_pwcacheent);\n    }\n    return NULL;\n}\n\nconst struct group *GetGrGid(gid_t grid)\n{\n    struct group  *result;\n    gr_cacheent_t *p_grcacheent;\n    gr_cacheent_t *entry2;\n    int            rc;\n    char          *buffer;\n    int            buf_size;\n\n    /* is the entry in the cache? */\n    P_r(&gr_hash.lock);\n    p_grcacheent = g_hash_table_lookup(gr_hash.cache, (void *)(uintptr_t) grid);\n    V_r(&gr_hash.lock);\n\n    if (p_grcacheent)\n        gr_nb_get++;\n\n    /* if no, allocate a gr cache entry\n     * and ask the system to fill it */\n    if (p_grcacheent == NULL) {\n        /* entry allocation */\n        p_grcacheent = calloc(1, sizeof(gr_cacheent_t));\n        if (p_grcacheent == NULL)\n            return NULL;\n\n        buf_size = group_memb_sz;\n        buffer = malloc(buf_size);\n        if (buffer == NULL)\n            goto out_free;\n\n retry:\n        rc = getgrgid_r(grid, &p_grcacheent->gr, buffer, buf_size, &result);\n        if (rc != 0 || result == NULL) {\n            /* try with larger buff */\n            if (rc == ERANGE) {\n                buf_size *= 2;\n                DisplayLog(LVL_FULL, LOGTAG,\n                           \"got ERANGE error from getgrgid_r: trying with buf_size=%u\",\n                           buf_size);\n                buffer = realloc(buffer, buf_size);\n                if (buffer == NULL)\n                    goto out_free;\n                else\n                    goto retry;\n            }\n\n            if (rc != 0 && rc != ENOENT && rc != ESRCH &&\n                rc != EBADF && rc != EPERM)\n                DisplayLog(LVL_CRIT, LOGTAG, \"ERROR %d in getgrgid_r : %s\",\n                           rc, strerror(rc));\n\n            /* gid not found */\n            goto out_free;\n        }\n\n        /* We only care about the name */\n        p_grcacheent->gr.gr_name = strdup(p_grcacheent->gr.gr_name);\n        if (p_grcacheent->gr.gr_name == NULL)\n            goto out_free;\n\n        p_grcacheent->gr.gr_passwd = NULL;\n        p_grcacheent->gr.gr_mem = NULL;\n\n        free(buffer);\n\n        /* insert it to hash table */\n        P_w(&gr_hash.lock);\n\n        /* Another thread may have inserted it in the meantime. Check\n         * again. */\n        entry2 = g_hash_table_lookup(gr_hash.cache, (void *)(uintptr_t) grid);\n        if (entry2) {\n            free(p_grcacheent->gr.gr_name);\n            free(p_grcacheent);\n            p_grcacheent = entry2;\n            gr_nb_get++;\n        } else {\n            g_hash_table_insert(gr_hash.cache,\n                                (void *)(uintptr_t) p_grcacheent->gr.gr_gid,\n                                p_grcacheent);\n            gr_nb_set++;\n        }\n        V_w(&gr_hash.lock);\n    }\n\n    return &p_grcacheent->gr;\n\n out_free:\n    if (p_grcacheent != NULL) {\n        free(buffer);\n        free(p_grcacheent);\n    }\n    return NULL;\n}\n"
  },
  {
    "path": "src/common/update_params.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"update_params.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include <time.h>\n#include <errno.h>\n\n#define TAG \"UpdtParams\"\n\n#define OLD_UPDT_PARAMS_BLOCK      \"db_update_policy\"\n#define UPDT_PARAMS_BLOCK          \"db_update_params\"\n\n/* exported variable available to all modules */\nupdt_params_t updt_params;\n\nstatic void set_default_update_params(void *module_config)\n{\n    updt_params_t *params = (updt_params_t *)module_config;\n\n    params->md.when = UPDT_ALWAYS;\n#ifdef _HAVE_FID\n    params->path.when = UPDT_ON_EVENT_PERIODIC;\n    params->path.period_min = 0;\n    params->path.period_max = 3600;\n#endif\n    params->fileclass.when = UPDT_ALWAYS;\n}\n\nstatic void write_default_update_params(FILE *output)\n{\n    print_begin_block(output, 0, UPDT_PARAMS_BLOCK, NULL);\n    print_line(output, 1, \"md_update        : always;\");\n#ifdef _HAVE_FID\n    print_line(output, 1, \"path_update      : on_event_periodic(0,1h);\");\n#endif\n    print_line(output, 1, \"fileclass_update : always;\");\n    print_end_block(output, 0);\n}\n\nstatic void write_update_params_template(FILE *output)\n{\n    print_begin_block(output, 0, UPDT_PARAMS_BLOCK, NULL);\n    print_line(output, 1,\n               \"# possible policies for refreshing metadata and path in database:\");\n    print_line(output, 1,\n               \"#   never: get the information once, then never refresh it\");\n    print_line(output, 1,\n               \"#   always: always update entry info when processing it\");\n    print_line(output, 1, \"#   on_event: only update on related event\");\n    print_line(output, 1, \"#   periodic(interval): only update periodically\");\n    print_line(output, 1,\n               \"#   on_event_periodic(min_interval,max_interval)= on_event + periodic\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Updating of file metadata\");\n    print_line(output, 1, \"md_update = always ;\");\n#ifdef _HAVE_FID\n    print_line(output, 1, \"# Updating file path in database\");\n    print_line(output, 1, \"path_update = on_event_periodic(0,1h) ;\");\n#endif\n    print_line(output, 1, \"# File classes matching\");\n    print_line(output, 1, \"fileclass_update = always ;\");\n\n    print_end_block(output, 0);\n}\n\nstatic inline const char *update_param2str(updt_param_item_t *pol,\n                                           char *buffer)\n{\n    char tmpbuf1[100];\n    char tmpbuf2[100];\n\n    switch (pol->when) {\n    case UPDT_NEVER:\n        return \"never\";\n    case UPDT_ALWAYS:\n        return \"always\";\n    case UPDT_ON_EVENT:\n        return \"on_event\";\n    case UPDT_ON_EVENT_PERIODIC:\n        FormatDurationFloat(tmpbuf1, sizeof(tmpbuf1), pol->period_min);\n        FormatDurationFloat(tmpbuf2, sizeof(tmpbuf2), pol->period_max);\n        sprintf(buffer, \"on_event_periodic(%s,%s)\", tmpbuf1, tmpbuf2);\n        return buffer;\n    case UPDT_PERIODIC:\n        FormatDurationFloat(tmpbuf1, sizeof(tmpbuf1), pol->period_max);\n        sprintf(buffer, \"periodic(%s)\", tmpbuf1);\n        return buffer;\n    default:\n        return \"???\";\n    }\n}\n\nstatic int read_update_item(updt_param_item_t *item, const char *str,\n                            char **options, unsigned int nb_options,\n                            char *msg_out)\n{\n    memset(item, 0, sizeof(updt_param_item_t));\n\n    if (!strcasecmp(str, \"never\"))\n        item->when = UPDT_NEVER;\n    else if (!strcasecmp(str, \"always\"))\n        item->when = UPDT_ALWAYS;\n    else if (!strcasecmp(str, \"on_event\"))\n        item->when = UPDT_ON_EVENT;\n    else if (!strcasecmp(str, \"periodic\")) {\n        time_t timeval;\n\n        item->when = UPDT_PERIODIC;\n        if ((nb_options != 1) || !options || !options[0]) {\n            strcpy(msg_out,\n                   \"1 argument is expected for periodic update parameter. \"\n                   \"E.g. periodic(30s);\");\n            return EINVAL;\n        }\n        /* read argument as a duration */\n        timeval = str2duration(options[0]);\n        if (timeval == -1) {\n            sprintf(msg_out, \"Invalid value for periodic update parameter: \"\n                    \"duration expected. E.g. periodic(5min);\");\n            return EINVAL;\n        }\n        item->period_min = item->period_max = timeval;\n    } else if (!strcasecmp(str, \"on_event_periodic\")) {\n        time_t timeval1, timeval2;\n\n        item->when = UPDT_ON_EVENT_PERIODIC;\n        if ((nb_options != 2) || !options || !options[0] || !options[1]) {\n            strcpy(msg_out,\n                   \"2 arguments are expected for on_event_periodic update parameter. \"\n                   \"E.g. on_event_periodic(1s,30s);\");\n            return EINVAL;\n        }\n        /* read argument as a duration */\n        timeval1 = str2duration(options[0]);\n        timeval2 = str2duration(options[1]);\n        if ((timeval1 == -1) || (timeval2 == -1)) {\n            sprintf(msg_out,\n                    \"Invalid value for on_event_periodic update parameter: \"\n                    \"durations expected. E.g. on_event_periodic(1s,5min);\");\n            return EINVAL;\n        }\n        item->period_min = timeval1;\n        item->period_max = timeval2;\n    } else {\n        sprintf(msg_out,\n                \"Invalid update parameter '%s' (expected: never, always, \"\n                \"on_event, periodic(<interval>), on_event_periodic(<intvl1>,<intvl2>)\",\n                str);\n        return EINVAL;\n    }\n\n    return 0;\n}\n\nstatic int read_update_params(config_file_t config, void *module_config,\n                              char *msg_out)\n{\n    updt_params_t   *params = (updt_params_t *) module_config;\n    int              rc;\n    char             tmpstr[1024];\n    char           **options = NULL;\n    unsigned int     nb_options = 0;\n    config_item_t    updt_block;\n\n    static const char *update_allow[] = {\n        \"md_update\",\n#ifdef _HAVE_FID\n        \"path_update\",\n#endif\n        \"fileclass_update\",\n        NULL\n    };\n\n    /* get db_update_params block */\n\n    /* check the new name first */\n    rc = get_cfg_block(config, UPDT_PARAMS_BLOCK, &updt_block, msg_out);\n    if (rc == ENOENT) {\n        /* try with the deprecated name */\n        rc = get_cfg_block(config, OLD_UPDT_PARAMS_BLOCK, &updt_block, msg_out);\n        if (rc == ENOENT)\n            /* not mandatory: no error */\n            return 0;\n        else if (rc != 0)\n            return rc;\n        /* found the old name */\n        DisplayLog(LVL_CRIT, TAG, \"WARNING: block name '\"\n                   OLD_UPDT_PARAMS_BLOCK \"' is deprecated. Use '\"\n                   UPDT_PARAMS_BLOCK \"' instead\");\n    }\n\n    /* get parameters from this block */\n    rc = GetStringParam(updt_block, UPDT_PARAMS_BLOCK, \"md_update\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr),\n                        &options, &nb_options, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        /* parse the parameter */\n        rc = read_update_item(&params->md, tmpstr, options, nb_options,\n                              msg_out);\n        if (rc)\n            return rc;\n    }\n#ifdef _HAVE_FID\n    rc = GetStringParam(updt_block, UPDT_PARAMS_BLOCK, \"path_update\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr),\n                        &options, &nb_options, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        /* parse the parameter */\n        rc = read_update_item(&params->path, tmpstr, options, nb_options,\n                              msg_out);\n        if (rc)\n            return rc;\n    }\n#endif\n\n    /* get parameters from this block */\n    rc = GetStringParam(updt_block, UPDT_PARAMS_BLOCK, \"fileclass_update\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr),\n                        &options, &nb_options, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        /* parse the parameter */\n        rc = read_update_item(&params->fileclass, tmpstr, options, nb_options,\n                              msg_out);\n        if (rc)\n            return rc;\n\n        if ((params->fileclass.when == UPDT_ON_EVENT) ||\n            (params->fileclass.when == UPDT_ON_EVENT_PERIODIC)) {\n            sprintf(msg_out, \"Parameter not supported for fileclass update: \"\n                    \"'never', 'always' or 'periodic' expected\");\n            return EINVAL;\n        }\n    }\n\n    CheckUnknownParameters(updt_block, UPDT_PARAMS_BLOCK, update_allow);\n    return 0;\n}\n\nstatic int reload_update_params(void *module_config)\n{\n    char buff1[256];\n    char buff2[256];\n    updt_params_t *params = (updt_params_t *) module_config;\n\n    if ((updt_params.md.when != params->md.when)\n        || (updt_params.md.period_min != params->md.period_min)\n        || (updt_params.md.period_max != params->md.period_max)) {\n        DisplayLog(LVL_EVENT, TAG,\n                   UPDT_PARAMS_BLOCK \"::md_update updated: %s->%s\",\n                   update_param2str(&updt_params.md, buff1),\n                   update_param2str(&params->md, buff2));\n        updt_params.md = params->md;\n    }\n#ifdef _HAVE_FID\n    if ((updt_params.path.when != params->path.when)\n        || (updt_params.path.period_min != params->path.period_min)\n        || (updt_params.path.period_max != params->path.period_max)) {\n        DisplayLog(LVL_EVENT, TAG,\n                   UPDT_PARAMS_BLOCK \"::path_update updated: %s->%s\",\n                   update_param2str(&updt_params.path, buff1),\n                   update_param2str(&params->path, buff2));\n        updt_params.path = params->path;\n    }\n#endif\n\n    if ((updt_params.fileclass.when != params->fileclass.when)\n        || (updt_params.fileclass.period_min != params->fileclass.period_min)\n        || (updt_params.fileclass.period_max != params->fileclass.period_max)) {\n        DisplayLog(LVL_EVENT, TAG,\n                   UPDT_PARAMS_BLOCK \"::fileclass_update updated: %s->%s\",\n                   update_param2str(&updt_params.fileclass, buff1),\n                   update_param2str(&params->fileclass, buff2));\n        updt_params.fileclass = params->fileclass;\n    }\n\n    return 0;\n}\n\nstatic int update_params_set(void *module_config, bool reload)\n{\n    updt_params_t *conf = (updt_params_t *) module_config;\n\n    if (reload)\n        return reload_update_params(module_config);\n\n    updt_params = *conf;\n    return 0;\n}\n\nstatic void *updt_param_new(void)\n{\n    return calloc(1, sizeof(updt_params_t));\n}\n\nstatic void updt_param_free(void *cfg)\n{\n    if (cfg != NULL)\n        free(cfg);\n}\n\n/* export config functions */\nmod_cfg_funcs_t updt_params_hdlr = {\n    .module_name = \"updt params\",\n    .new = updt_param_new,\n    .free = updt_param_free,\n    .set_default = set_default_update_params,\n    .read = read_update_params,\n    .set_config = update_params_set,\n    .write_default = write_default_update_params,\n    .write_template = write_update_params_template,\n};\n\n/**\n *  Check if the fileclass needs to be updated\n */\nbool need_fileclass_update(const attr_set_t *p_attrs)\n{\n    bool        is_set = false;\n    time_t      last = 0;\n    const char *match = \"\";\n\n    is_set = ATTR_MASK_TEST(p_attrs, class_update)\n        && ATTR_MASK_TEST(p_attrs, fileclass);\n    if (is_set) {\n        last = ATTR(p_attrs, class_update);\n        match = ATTR(p_attrs, fileclass);\n    }\n\n    /* check for periodic fileclass matching */\n    if (!is_set) {\n        DisplayLog(LVL_FULL, TAG, \"Need to update fileclass (not set)\");\n        return true;\n    } else if (updt_params.fileclass.when == UPDT_ALWAYS) {\n        DisplayLog(LVL_FULL, TAG, \"Need to update fileclass \"\n                   \"(policy is 'always update')\");\n        return true;\n    } else if (updt_params.fileclass.when == UPDT_NEVER) {\n        DisplayLog(LVL_FULL, TAG, \"No fileclass update \"\n                   \"(policy is 'never update')\");\n        return false;\n    } else if (updt_params.fileclass.when == UPDT_PERIODIC) {\n        if (time(NULL) - last >= updt_params.fileclass.period_max) {\n            DisplayLog(LVL_FULL, TAG, \"Need to update fileclass \"\n                       \"(out-of-date) (last match=%\" PRI_TT \")\", last);\n            return true;\n        } else {\n            /* retrieve previous fileclass */\n            DisplayLog(LVL_FULL, TAG, \"Previously matched fileclass '%s'\"\n                       \" is still valid (last match=%\" PRI_TT \")\", match, last);\n            return false;\n        }\n    }\n    RBH_BUG(\"Unexpected case: 'update_fileclass' cannot be determined\");\n    return -1;\n}\n\n/**\n *  Check if path or md needs to be updated\n *  \\param p_allow_event [out] if set to TRUE, the path\n *         must be updated on related event.\n */\nbool need_info_update(const attr_set_t *p_attrs, bool *update_if_event,\n                      type_info_t type_info)\n{\n    bool        do_update = false;\n    bool        is_set = false;\n    time_t      last = 0;\n    const char *why = \"<unexpected>\";\n    const char *what = \"\";\n    updt_param_item_t pol;\n\n    if (update_if_event != NULL)\n        *update_if_event = false;\n\n    if (type_info == UPDT_MD) {\n        pol = updt_params.md;\n        what = \"metadata\";\n        is_set = ATTR_MASK_TEST(p_attrs, md_update);\n        if (is_set)\n            last = ATTR(p_attrs, md_update);\n    }\n#ifdef _HAVE_FID\n    else if (type_info == UPDT_PATH) {\n        what = \"POSIX path\";\n        pol = updt_params.path;\n        is_set = ATTR_MASK_TEST(p_attrs, path_update);\n        if (is_set)\n            last = ATTR(p_attrs, path_update);\n    }\n#endif\n    else {\n        RBH_BUG(\"Unsupported info type\");\n        return -1;\n    }\n\n    if (!is_set) {\n        do_update = true;\n        why = \"not in DB/never updated\";\n    }\n    /* Need to update the path if it is partial */\n    else if (ATTR_MASK_TEST(p_attrs, fullpath) &&\n             ATTR(p_attrs, fullpath)[0] != '/') {\n        do_update = true;\n        why = \"partial path in DB\";\n    } else if (pol.when == UPDT_ALWAYS) {\n        do_update = true;\n        why = \"policy is 'always update'\";\n    } else if (pol.when == UPDT_NEVER) {\n        do_update = false;\n    } else if (pol.when == UPDT_ON_EVENT) {\n        do_update = false;\n        if (update_if_event != NULL)\n            *update_if_event = true;\n    } else if (pol.when == UPDT_PERIODIC) {\n        if (time(NULL) - last >= pol.period_max) {\n            do_update = true;\n            why = \"expired\";\n        } else {\n            do_update = false;\n        }\n    } else if (pol.when == UPDT_ON_EVENT_PERIODIC) {\n        /* if the update is too recent, do not update.\n         * if the update is too old, force update.\n         * else, update on path-related event. */\n        if (time(NULL) - last < pol.period_min) {\n            do_update = false;\n        } else if (time(NULL) - last >= pol.period_max) {\n            do_update = true;\n            why = \"expired\";\n        } else {    /* allow update on event */\n\n            do_update = false;\n            if (update_if_event != NULL)\n                *update_if_event = true;\n        }\n    } else {\n        DisplayLog(LVL_CRIT, TAG, \"Unknown update policy %#x\", pol.when);\n        return -1;\n    }\n\n    if (do_update)\n        DisplayLog(LVL_FULL, TAG, \"Update of %s: reason=%s, \"\n                   \"last_update=%\" PRI_TT, what, why, last);\n\n    return do_update;\n}\n"
  },
  {
    "path": "src/entry_processor/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nnoinst_LTLIBRARIES=libentryproc.la\n\nlibentryproc_la_SOURCES=entry_proc_impl.c entry_proc_tools.c entry_proc_tools.h \\\n\t\t\tstd_pipeline.c diff_pipeline.c entry_proc_hash.c\n\ncheck_PROGRAMS=test_hash\nTESTS=test_hash\n\n# automake issue workaround\n# see: https://www.gnu.org/software/automake/manual/html_node/Objects-created-both-with-libtool-and-without.html\nnoinst_LTLIBRARIES+=libtesthash.la\nlibtesthash_la_SOURCES=entry_proc_hash.c\n\ntest_hash_SOURCES=test_hash.c\ntest_hash_LDADD=libtesthash.la\ntest_hash_LDFLAGS=-Xlinker \"--allow-shlib-undefined\" -Xlinker \"--unresolved-symbols=ignore-all\"\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/entry_processor/diff_pipeline.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Common pipeline functions\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"entry_processor.h\"\n#include \"entry_proc_tools.h\"\n#include \"Memory.h\"\n#include \"status_manager.h\"\n#include <errno.h>\n#include <time.h>\n#include <unistd.h>\n\n/** Indicate if the error code means that the entry is missing */\nstatic inline bool err_missing(int rc)\n{\n    int abs_rc = abs(rc);\n\n    return (abs_rc == ENOENT) || (abs_rc == ESTALE);\n}\n\n\n#define diff_arg ((diff_arg_t *)entry_proc_arg)\n#define diff_mask (diff_arg->diff_mask)\n\n/* forward declaration of EntryProc functions of pipeline */\nstatic int EntryProc_get_fid(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_get_info_db(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_get_info_fs(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_report_diff(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_apply(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_batch_apply(struct entry_proc_op_t **, int, lmgr_t *);\nstatic int EntryProc_report_rm(struct entry_proc_op_t *, lmgr_t *);\n\n/* forward declaration to check batchable operations for db_apply stage */\nstatic bool dbop_is_batchable(struct entry_proc_op_t *,\n                              struct entry_proc_op_t *, attr_mask_t *);\n\n/* pipeline stages */\nenum {\n    STAGE_GET_FID = 0,\n    STAGE_GET_INFO_DB,\n    STAGE_GET_INFO_FS,\n    STAGE_REPORT_DIFF,\n    STAGE_APPLY,\n    STAGE_REPORT_RM,\n\n    PIPELINE_STAGE_COUNT    /* keep it at last */\n};\n\nconst pipeline_descr_t diff_pipeline_descr = {\n    .stage_count = PIPELINE_STAGE_COUNT,\n    .GET_ID = STAGE_GET_FID,\n    .GET_INFO_DB = STAGE_GET_INFO_DB,\n    .GET_INFO_FS = STAGE_GET_INFO_FS,\n    .GC_OLDENT = STAGE_REPORT_RM,\n    .DB_APPLY = STAGE_APPLY\n};\n\n/** pipeline stages definition */\npipeline_stage_t diff_pipeline[] = {\n    {STAGE_GET_FID, \"STAGE_GET_FID\", EntryProc_get_fid, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    {STAGE_GET_INFO_DB, \"STAGE_GET_INFO_DB\", EntryProc_get_info_db, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC | STAGE_FLAG_ID_CONSTRAINT, 0},\n    {STAGE_GET_INFO_FS, \"STAGE_GET_INFO_FS\", EntryProc_get_info_fs, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    /* must be sequential to avoid line interlacing */\n    {STAGE_REPORT_DIFF, \"STAGE_REPORT_DIFF\", EntryProc_report_diff, NULL, NULL,\n     STAGE_FLAG_SEQUENTIAL | STAGE_FLAG_SYNC, 1},\n    {STAGE_APPLY, \"STAGE_APPLY\", EntryProc_apply,\n     EntryProc_batch_apply, dbop_is_batchable,  /* batched ops management */\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    /* this step is for displaying removed entries when\n     * starting/ending a FS scan. */\n    {STAGE_REPORT_RM, \"STAGE_REPORT_RM\", EntryProc_report_rm, NULL, NULL,\n     STAGE_FLAG_SEQUENTIAL | STAGE_FLAG_SYNC, 1}\n};\n\n/**\n * For entries from FS scan, we must get the associated entry ID.\n */\nint EntryProc_get_fid(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n#ifdef _HAVE_FID\n    int rc;\n    entry_id_t tmp_id;\n    char buff[RBH_PATH_MAX];\n    char *path;\n\n    /* 2 possible options: get fid using parent_fid/name or from fullpath */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n        && ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n        BuildFidPath(&ATTR(&p_op->fs_attrs, parent_id), buff);\n        long len = strlen(buff);\n        sprintf(buff + len, \"/%s\", ATTR(&p_op->fs_attrs, name));\n        path = buff;\n    } else if (ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n        path = ATTR(&p_op->fs_attrs, fullpath);\n    } else {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error: not enough information to get fid: parent_id/name or fullpath needed\");\n        EntryProcessor_Acknowledge(p_op, -1, true);\n        return EINVAL;\n    }\n\n    /* perform path2fid */\n    rc = Lustre_GetFidFromPath(path, &tmp_id);\n\n    /* Workaround for Lustre 2.3: if parent is root, llapi_path2fid returns\n     * -EINVAL (see LU-3245).\n     * In this case, get fid from full path.\n     */\n    if ((rc == -EINVAL) && ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n        path = ATTR(&p_op->fs_attrs, fullpath);\n        rc = Lustre_GetFidFromPath(path, &tmp_id);\n    }\n\n    if (rc) {\n        /* remove the operation from pipeline */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d acknowledging stage STAGE_GET_FID.\", rc);\n    } else {\n        EntryProcessor_SetEntryId(p_op, &tmp_id);\n\n        /* go to GET_INFO_DB stage */\n        rc = EntryProcessor_Acknowledge(p_op, STAGE_GET_INFO_DB, false);\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d acknowledging stage STAGE_GET_FID.\", rc);\n    }\n    return rc;\n#else\n    DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n               \"Error: unexpected stage in a filesystem with no fid: STAGE_GET_FID.\");\n    EntryProcessor_Acknowledge(p_op, -1, true);\n    return EINVAL;\n#endif\n}\n\n/**\n * check if the entry exists in the database and what info\n * must be retrieved.\n */\nint EntryProc_get_info_db(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc = 0;\n    int next_stage = -1;    /* -1 = skip */\n    attr_mask_t attr_allow_cached = null_mask;\n    attr_mask_t attr_need_fresh = null_mask;\n    uint32_t status_scope = 0;  /* status mask only */\n    attr_mask_t tmp;\n\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n\n    /* check if entry is in policies scope */\n    add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                             &status_scope);\n\n    /* XXX also retrieve needed attributes to check the scope? */\n\n    /* get diff attributes from DB and FS (to allow comparison) */\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &diff_mask);\n\n    tmp = attr_mask_and_not(&diff_mask, &p_op->fs_attrs.attr_mask);\n    p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n    if (entry_proc_conf.detect_fake_mtime)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_creation_time);\n\n    attr_allow_cached = attrs_for_status_mask(status_scope, false);\n    attr_need_fresh = attrs_for_status_mask(status_scope, true);\n    /* XXX check if entry is in policy scope? */\n\n    /* what must be retrieved from DB: */\n    tmp = attr_mask_and_not(&attr_allow_cached, &p_op->fs_attrs.attr_mask);\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n\n    /* no dircount for non-dirs */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type) &&\n        !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR)) {\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_dircount);\n    }\n\n    /* don't get stripe for non-files */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n        && strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE) != 0) {\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_stripe_items);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n\n    /* no readlink for non symlinks */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type)) {\n        if (!strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK))\n            /* check if symlink's contents is known */\n            attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_link);\n        else\n            attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_link);\n    }\n\n    if (!attr_mask_is_null(p_op->db_attr_need)) {\n        p_op->db_attrs.attr_mask = p_op->db_attr_need;\n        rc = ListMgr_Get(lmgr, &p_op->entry_id, &p_op->db_attrs);\n\n        if (rc == DB_SUCCESS) {\n            p_op->db_exists = 1;\n        } else if (rc == DB_NOT_EXISTS) {\n            p_op->db_exists = 0;\n            ATTR_MASK_INIT(&p_op->db_attrs);\n        } else {\n            /* ERROR */\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d retrieving entry \" DFID \" from DB: %s.\", rc,\n                       PFID(&p_op->entry_id), lmgr_err2str(rc));\n            p_op->db_exists = 0;\n            ATTR_MASK_INIT(&p_op->db_attrs);\n        }\n    } else {\n        p_op->db_exists = ListMgr_Exists(lmgr, &p_op->entry_id);\n    }\n\n    /* get status for all policies with a matching scope */\n    add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                             &p_op->fs_attr_need.status);\n    tmp = attr_mask_and_not(&attr_need_fresh, &p_op->fs_attrs.attr_mask);\n    p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n    if (!p_op->db_exists) {\n        /* new entry */\n        p_op->db_op_type = OP_TYPE_INSERT;\n\n        /* set creation time if it was not set by scan module */\n        if (!ATTR_MASK_TEST(&p_op->fs_attrs, creation_time)) {\n            ATTR_MASK_SET(&p_op->fs_attrs, creation_time);\n            /* XXX min(atime,mtime,ctime)? */\n            ATTR(&p_op->fs_attrs, creation_time) = time(NULL);\n        }\n#ifdef _LUSTRE\n        if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n            && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE)\n            /* only if it was not retrieved during the scan */\n            && !(ATTR_MASK_TEST(&p_op->fs_attrs, stripe_info)\n                 && ATTR_MASK_TEST(&p_op->fs_attrs, stripe_items))) {\n            attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n            attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n        }\n#endif\n\n        /* readlink for symlinks (if not already known) */\n        if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n            && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK)\n            && !ATTR_MASK_TEST(&p_op->fs_attrs, link)) {\n            attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n        } else {\n            attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n        }\n\n#ifdef ATTR_INDEX_status /** @FIXME RBHv3 drop old-style status reference */\n        if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n#ifdef _LUSTRE_HSM\n            && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE))\n#elif defined (_HSM_LITE)\n            && (strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR) != 0)\n            && !p_op->extra_info.not_supp)\n#endif\n        {\n            p_op->fs_attr_need |= ATTR_MASK_status;\n#ifdef _HSM_LITE\n            p_op->fs_attr_need |= (attr_need_fresh & ~p_op->fs_attrs.attr_mask);\n#endif\n        }\n        else\n        {\n            p_op->extra_info.not_supp = 1;\n            p_op->fs_attr_need &= ~ATTR_MASK_status;\n        }\n#endif\n        next_stage = STAGE_GET_INFO_FS;\n    } else {\n        p_op->db_op_type = OP_TYPE_UPDATE;\n\n#ifdef ATTR_INDEX_status /** @FIXME RBHv3 drop old-style status reference */\n        /* only if status is in diff_mask */\n        if (diff_mask & ATTR_MASK_status) {\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n#ifdef _LUSTRE_HSM\n                && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE))\n#elif defined (_HSM_LITE)\n                && (strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR) != 0)\n                && !p_op->extra_info.not_supp)\n#endif\n            {\n                p_op->fs_attr_need |= ATTR_MASK_status;\n#ifdef _HSM_LITE\n                p_op->fs_attr_need |=\n                    (attr_need_fresh & ~p_op->fs_attrs.attr_mask);\n#endif\n            }\n            else\n            {\n                p_op->extra_info.not_supp = 1;\n                p_op->fs_attr_need &= ~ATTR_MASK_status;\n            }\n        }\n#endif\n\n        if (attr_mask_test_index(&diff_mask, ATTR_INDEX_link)) {\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)) {    /* likely set */\n                if (strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK))\n                    /* non-link */\n                    attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n                else {\n                    /* link */\n#ifdef _LUSTRE\n                    /* already known (in DB or FS) */\n                    if (ATTR_FSorDB_TEST(p_op, link))\n                        attr_mask_unset_index(&p_op->fs_attr_need,\n                                              ATTR_INDEX_link);\n                    else    /* not known */\n                        attr_mask_set_index(&p_op->fs_attr_need,\n                                            ATTR_INDEX_link);\n#else\n                    /* For non-lustre filesystems, inodes may be recycled,\n                     * so re-read link even if it is is DB */\n                    if (ATTR_MASK_TEST(&p_op->fs_attrs, link))\n                        attr_mask_unset_index(&p_op->fs_attr_need,\n                                              ATTR_INDEX_link);\n                    else\n                        attr_mask_set_index(&p_op->fs_attr_need,\n                                            ATTR_INDEX_link);\n#endif\n                }\n            }\n        }\n\n        /* get parent_id+name, if not set during scan (eg. for root\n         * directory) */\n        if (!ATTR_MASK_TEST(&p_op->fs_attrs, name))\n            attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_name);\n        if (!ATTR_MASK_TEST(&p_op->fs_attrs, parent_id))\n            attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_parent_id);\n\n#ifdef _LUSTRE\n        /* only if stripe is in diff_mask || db_apply */\n        if ((diff_mask.std & (ATTR_MASK_stripe_info | ATTR_MASK_stripe_items))\n            || (diff_arg->apply == APPLY_DB)) {\n            /* get stripe only for files */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n                && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE)\n                && !strcmp(global_config.fs_type, \"lustre\")) {\n                check_stripe_info(p_op, lmgr);\n            }\n        }\n#endif\n        next_stage = STAGE_GET_INFO_FS;\n    }\n\n    if (next_stage == -1)\n        /* drop the entry */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n    else\n        /* go to next pipeline step */\n        rc = EntryProcessor_Acknowledge(p_op, next_stage, false);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error %d acknowledging stage %s.\", rc,\n                   stage_info->stage_name);\n    return rc;\n}\n\nint EntryProc_get_info_fs(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    char tmp_buf[RBH_NAME_MAX];\n\n#ifdef _HAVE_FID\n    char path[RBH_PATH_MAX];\n\n    BuildFidPath(&p_op->entry_id, path);\n#else\n    char *path;\n    if (ATTR_FSorDB_TEST(p_op, fullpath)) {\n        path = ATTR_FSorDB(p_op, fullpath);\n    } else {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Entry path is needed for retrieving file info\");\n        return EINVAL;\n    }\n#endif\n\n    DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n               DFID \": Getattr=%u, Getpath=%u, Readlink=%u\"\n               \", Getstatus(%s), Getstripe=%u\",\n               PFID(&p_op->entry_id), NEED_GETATTR(p_op) ? 1 : 0,\n               NEED_GETPATH(p_op) ? 1 : 0, NEED_READLINK(p_op) ? 1 : 0,\n               name_status_mask(p_op->fs_attr_need.status, tmp_buf,\n                                sizeof(tmp_buf)), NEED_GETSTRIPE(p_op) ? 1 : 0);\n\n    /* don't retrieve info which is already fresh */\n    p_op->fs_attr_need =\n        attr_mask_and_not(&p_op->fs_attr_need, &p_op->fs_attrs.attr_mask);\n\n    /* scans: never need to get attr (provided in operation) */\n\n#if defined(_LUSTRE) && defined(_HAVE_FID)\n    /* may be needed if parent information is missing */\n    if (NEED_GETPATH(p_op)) {\n        if (path_check_update(&p_op->entry_id, path, &p_op->fs_attrs,\n                              p_op->fs_attr_need) == PCR_ORPHAN) {\n            /* ignore entries not in the namespace */\n            goto skip_record;\n        }\n    }\n#endif\n\n    if (entry_proc_conf.detect_fake_mtime\n        && ATTR_FSorDB_TEST(p_op, creation_time)\n        && ATTR_MASK_TEST(&p_op->fs_attrs, last_mod)) {\n        check_and_warn_fake_mtime(p_op);\n    }\n#ifdef _LUSTRE\n    /* getstripe only for files */\n    if (NEED_GETSTRIPE(p_op)\n        && ATTR_FSorDB_TEST(p_op, type)\n        && strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_FILE) != 0) {\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n\n    if (NEED_GETSTRIPE(p_op)) {\n        /* get entry stripe */\n        rc = File_GetStripeByPath(path,\n                                  &ATTR(&p_op->fs_attrs, stripe_info),\n                                  &ATTR(&p_op->fs_attrs, stripe_items));\n        if (rc) {\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_info);\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_items);\n        } else {\n            ATTR_MASK_SET(&p_op->fs_attrs, stripe_info);\n            ATTR_MASK_SET(&p_op->fs_attrs, stripe_items);\n        }\n    }   /* get_stripe needed */\n#endif\n\n    if (NEED_ANYSTATUS(p_op)) {\n        int i;\n        sm_instance_t *smi;\n        /** attrs from FS+DB */\n        attr_set_t merged_attrs = ATTR_SET_INIT;\n        /** attributes + status */\n        attr_set_t new_attrs = ATTR_SET_INIT;\n\n        ListMgr_MergeAttrSets(&merged_attrs, &p_op->fs_attrs, 1);\n        ListMgr_MergeAttrSets(&merged_attrs, &p_op->db_attrs, 0);\n\n        /* match policy scopes according to newly set information:\n         * remove needed status from mask and append the updated one. */\n        p_op->fs_attr_need.status &= ~all_status_mask();\n        /* FIXME this fails if scope attributes are missing */\n        add_matching_scopes_mask(&p_op->entry_id, &merged_attrs, false,\n                                 &p_op->fs_attr_need.status);\n\n        i = 0;\n        while ((smi = get_sm_instance(i)) != NULL) {\n            ATTR_MASK_INIT(&new_attrs);\n\n            if (NEED_GETSTATUS(p_op, i)) {\n                if (smi->sm->get_status_func != NULL) {\n                    /* this also check if entry is ignored for this policy */\n                    rc = smi->sm->get_status_func(smi, &p_op->entry_id,\n                                                  &merged_attrs, &new_attrs);\n                    if (err_missing(rc)) {\n                        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                                   \"Entry %s no longer exists\", path);\n                        /* changelog: an UNLINK event will be raised,\n                         *      so we ignore current record\n                         * scan: entry will be garbage collected at the end of\n                         *      the scan */\n                        goto skip_record;\n                    } else if (rc != 0) {\n                        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                                   \"Failed to get status for %s (%s status manager): error %d\",\n                                   path, smi->sm->name, rc);\n                    } else {\n                        /* merge/update attributes */\n                        ListMgr_MergeAttrSets(&p_op->fs_attrs, &new_attrs,\n                                              true);\n                    }\n                    /* free allocated resources, once merged */\n                    ListMgr_FreeAttrs(&new_attrs);\n                }\n            }\n            i++;\n        }\n        /* free allocated structs in merged attributes */\n        ListMgr_FreeAttrs(&merged_attrs);\n    }\n\n    /* readlink only for symlinks */\n    if (NEED_READLINK(p_op) && ATTR_FSorDB_TEST(p_op, type)\n        && strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_LINK) != 0)\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n\n    if (NEED_READLINK(p_op)) {\n        ssize_t len = readlink(path, ATTR(&p_op->fs_attrs, link), RBH_PATH_MAX);\n\n        if (len >= 0) {\n            ATTR_MASK_SET(&p_op->fs_attrs, link);\n\n            /* add final '\\0' on success */\n            if (len >= RBH_PATH_MAX)\n                ATTR(&p_op->fs_attrs, link)[len - 1] = '\\0';\n            else\n                ATTR(&p_op->fs_attrs, link)[len] = '\\0';\n        } else\n            DisplayLog(LVL_MAJOR, ENTRYPROC_TAG, \"readlink failed on %s: %s\",\n                       path, strerror(errno));\n    }\n\n    /** FIXME some special files should be ignored i.e. not inserted in DB. */\n\n    /* print diff */\n    rc = EntryProcessor_Acknowledge(p_op, STAGE_REPORT_DIFF, false);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage.\",\n                   rc);\n    return rc;\n\n skip_record:\n    /* remove the operation from pipeline */\n    rc = EntryProcessor_Acknowledge(p_op, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage.\",\n                   rc);\n    return rc;\n}\n\n/* report diff and clean unchenged attributes */\nint EntryProc_report_diff(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n    int rc;\n\n    /* once set, never change creation time */\n    if (p_op->db_op_type != OP_TYPE_INSERT)\n        ATTR_MASK_UNSET(&p_op->fs_attrs, creation_time);\n\n    /* Only keep fields that changed */\n    if (p_op->db_op_type == OP_TYPE_UPDATE) {\n        attr_mask_t tmp;\n        attr_mask_t loc_diff_mask =\n            ListMgr_WhatDiff(&p_op->fs_attrs, &p_op->db_attrs);\n\n        /* In scan mode, always keep md_update and path_update,\n         * to avoid their cleaning at the end of the scan.\n         * Also keep name and parent as they are keys in DNAMES table.\n         */\n        attr_mask_t to_keep = {.std =\n                ATTR_MASK_parent_id | ATTR_MASK_name, 0, 0LL };\n\n        /* the mask to be displayed > diff_mask (include to_keep flags) */\n        attr_mask_t display_mask = attr_mask_and(&diff_mask, &loc_diff_mask);\n\n        /* keep fullpath if parent or name changed (friendly display) */\n        if (loc_diff_mask.std & (ATTR_MASK_parent_id | ATTR_MASK_name)) {\n            to_keep.std |= ATTR_MASK_fullpath;\n            display_mask.std |= ATTR_MASK_fullpath;\n        }\n#ifdef HAVE_CHANGELOGS\n        if (!p_op->extra_info.is_changelog_record)\n#endif\n            to_keep.std |= (ATTR_MASK_md_update | ATTR_MASK_path_update);\n\n        /* remove other unchanged attrs or attrs not in db mask */\n        tmp = attr_mask_or(&loc_diff_mask, &to_keep);\n        tmp = attr_mask_or_not(&tmp, &p_op->db_attrs.attr_mask);\n        p_op->fs_attrs.attr_mask =\n            attr_mask_and(&p_op->fs_attrs.attr_mask, &tmp);\n\n#ifdef _LUSTRE\n        if (p_op->db_stripe_ok) {\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_info);\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, stripe_items)) {\n                ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_items);\n                free_stripe_items(&ATTR(&p_op->fs_attrs, stripe_items));\n            }\n        }\n#endif\n\n        /* nothing changed => noop */\n        if (attr_mask_is_null(p_op->fs_attrs.attr_mask)) {\n            /* no op */\n            p_op->db_op_type = OP_TYPE_NONE;\n        } else if (!attr_mask_is_null(attr_mask_and(&loc_diff_mask, &diff_mask))\n                   && !attr_mask_is_null(display_mask)) {\n            GString *attrchg = g_string_new(NULL);\n\n            /* revert change: reverse display */\n            if (diff_arg->apply == APPLY_FS) {\n                /* attr from FS */\n                print_attrs(attrchg, &p_op->fs_attrs, display_mask, 1);\n                printf(\"-\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n\n                /* attr from DB */\n                print_attrs(attrchg, &p_op->db_attrs, display_mask, 1);\n                printf(\"+\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n            } else {\n                /* attr from DB */\n                print_attrs(attrchg, &p_op->db_attrs, display_mask, 1);\n                printf(\"-\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n\n                /* attr from FS */\n                print_attrs(attrchg, &p_op->fs_attrs, display_mask, 1);\n                printf(\"+\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n            }\n            g_string_free(attrchg, TRUE);\n        }\n    } else if (!attr_mask_is_null(diff_mask)) {\n        if (p_op->db_op_type == OP_TYPE_INSERT) {\n            if (diff_arg->apply == APPLY_FS) {\n                /* revert change: reverse display */\n                if (ATTR_FSorDB_TEST(p_op, fullpath))\n                    printf(\"--\" DFID \" path=%s\\n\", PFID(&p_op->entry_id),\n                           ATTR_FSorDB(p_op, fullpath));\n                else\n                    printf(\"--\" DFID \"\\n\", PFID(&p_op->entry_id));\n            } else {\n                GString *attrnew = g_string_new(NULL);\n\n                print_attrs(attrnew, &p_op->fs_attrs, p_op->fs_attrs.attr_mask,\n                            1);\n                printf(\"++\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrnew->str);\n\n                g_string_free(attrnew, TRUE);\n            }\n        } else if ((p_op->db_op_type == OP_TYPE_REMOVE_LAST) ||\n                   (p_op->db_op_type == OP_TYPE_REMOVE_ONE)\n                   || (p_op->db_op_type == OP_TYPE_SOFT_REMOVE)) {\n            /* actually: never happens */\n\n            if (diff_arg->apply == APPLY_FS) {\n                GString *attrnew = g_string_new(NULL);\n\n                /* revert change: reverse display */\n                print_attrs(attrnew, &p_op->db_attrs, p_op->db_attrs.attr_mask,\n                            1);\n                printf(\"++\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrnew->str);\n\n                g_string_free(attrnew, TRUE);\n            } else {\n                if (ATTR_FSorDB_TEST(p_op, fullpath))\n                    printf(\"--\" DFID \" path=%s\\n\", PFID(&p_op->entry_id),\n                           ATTR_FSorDB(p_op, fullpath));\n                else\n                    printf(\"--\" DFID \"\\n\", PFID(&p_op->entry_id));\n            }\n        }\n    }\n\n    if (diff_arg->apply == APPLY_DB)\n        attr_mask_unset_readonly(&p_op->fs_attrs.attr_mask);\n\n    /* always go to APPLY step, at least to tag the entry */\n    rc = EntryProcessor_Acknowledge(p_op, STAGE_APPLY, false);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error acknowledging stage %s\",\n                   stage_info->stage_name);\n    return rc;\n}\n\n/* forward declaration to check batchable operations for db_apply stage */\nstatic bool dbop_is_batchable(struct entry_proc_op_t *first, struct\n                              entry_proc_op_t *next,\n                              attr_mask_t *full_attr_mask)\n{\n    /* batch nothing if not applying to DB */\n    if ((diff_arg->apply != APPLY_DB) || (pipeline_flags & RUNFLG_DRY_RUN))\n        return false;\n\n    if (first->db_op_type != OP_TYPE_INSERT\n        && first->db_op_type != OP_TYPE_UPDATE\n        && first->db_op_type != OP_TYPE_NONE)\n        return false;\n    else if (first->db_op_type != next->db_op_type)\n        return false;\n    /* starting from here, db_op_type is the same for the 2 operations */\n    /* all NOOP operations can be batched */\n    else if (first->db_op_type == OP_TYPE_NONE)\n        return true;\n    /* different masks can be mixed, as long as attributes for each table are\n     * the same or 0. Ask the list manager about that. */\n    else if (lmgr_batch_compat(*full_attr_mask, next->fs_attrs.attr_mask)) {\n        *full_attr_mask =\n            attr_mask_or(full_attr_mask, &next->fs_attrs.attr_mask);\n        return true;\n    } else\n        return false;\n}\n\n/**\n * Perform an operation on database.\n */\nint EntryProc_apply(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n\n    if ((diff_arg->apply == APPLY_DB) && !(pipeline_flags & RUNFLG_DRY_RUN)) {\n        /* insert to DB */\n        switch (p_op->db_op_type) {\n        case OP_TYPE_NONE:\n            /* noop */\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"NoOp(\" DFID \")\",\n                       PFID(&p_op->entry_id));\n            rc = 0;\n            break;\n\n        case OP_TYPE_INSERT:\n#ifdef _HAVE_FID\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Insert(\" DFID \")\",\n                       PFID(&p_op->entry_id));\n#endif\n            rc = ListMgr_Insert(lmgr, &p_op->entry_id, &p_op->fs_attrs, false);\n            break;\n\n        case OP_TYPE_UPDATE:\n#ifdef _HAVE_FID\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Update(\" DFID \")\",\n                       PFID(&p_op->entry_id));\n#endif\n            rc = ListMgr_Update(lmgr, &p_op->entry_id, &p_op->fs_attrs);\n            break;\n\n        case OP_TYPE_REMOVE_ONE:\n#ifdef _HAVE_FID\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Remove(\" DFID \")\",\n                       PFID(&p_op->entry_id));\n#endif\n            rc = ListMgr_Remove(lmgr, &p_op->entry_id, &p_op->fs_attrs, false);\n            break;\n\n        case OP_TYPE_REMOVE_LAST:\n#ifdef _HAVE_FID\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Remove(\" DFID \")\",\n                       PFID(&p_op->entry_id));\n#endif\n            rc = ListMgr_Remove(lmgr, &p_op->entry_id, &p_op->fs_attrs, true);\n            break;\n\n        case OP_TYPE_SOFT_REMOVE:\n            if (log_config.debug_level >= LVL_DEBUG) {\n                attr_mask_t tmp = null_mask;\n                attr_mask_t tmp2 = null_mask;\n                GString *gs = g_string_new(NULL);\n\n                tmp.std =\n                    ATTR_MASK_fullpath | ATTR_MASK_parent_id | ATTR_MASK_name;\n                tmp2 = sm_softrm_mask();\n                tmp = attr_mask_or(&tmp, &tmp2);\n\n                print_attrs(gs, &p_op->fs_attrs, tmp, true);\n                DisplayLog(LVL_DEBUG, ENTRYPROC_TAG, \"SoftRemove(\" DFID \",%s)\",\n                           PFID(&p_op->entry_id), gs->str);\n\n                g_string_free(gs, TRUE);\n            }\n\n            ATTR_MASK_SET(&p_op->fs_attrs, rm_time);\n            ATTR(&p_op->fs_attrs, rm_time) = time(NULL);\n            rc = ListMgr_SoftRemove(lmgr, &p_op->entry_id, &p_op->fs_attrs);\n            break;\n        default:\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Unhandled DB operation type: %d\", p_op->db_op_type);\n            rc = -1;\n        }\n\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d performing database operation: %s.\", rc,\n                       lmgr_err2str(rc));\n    } else if (diff_arg->db_tag) {\n        /* tag the entry in the DB */\n        rc = ListMgr_TagEntry(lmgr, diff_arg->db_tag, &p_op->entry_id);\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d performing database operation: %s.\", rc,\n                       lmgr_err2str(rc));\n    }\n\n    if (diff_arg->apply == APPLY_FS) {\n        attr_mask_t tmp;\n\n        /* all changes must be reverted. So, insert=>rm, rm=>create, ... */\n        /* FIXME as this step is parallel, how to manage file creation while\n         * parent directory is not created?\n         * Same issue for unlink & rmdir */\n        switch (p_op->db_op_type) {\n        case OP_TYPE_INSERT:\n\n#ifdef _HAVE_FID\n            /* if fullpath is not set, but parent and name are set,\n             * use parent/name as the fullpath (for fids only) */\n            if (!ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)\n                && ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n                && ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n                char *str = ATTR(&p_op->fs_attrs, fullpath);\n                BuildFidPath(&ATTR(&p_op->fs_attrs, parent_id), str);\n                long len = strlen(str);\n                sprintf(str + len, \"/%s\", ATTR(&p_op->fs_attrs, name));\n                ATTR_MASK_SET(&p_op->fs_attrs, fullpath);\n            }\n#endif\n\n            /* unlink or rmdir */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n                && ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n                if (!strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR)) {\n                    /* rmdir */\n                    DisplayReport(\"%srmdir(%s)\",\n                                  (pipeline_flags & RUNFLG_DRY_RUN) ?\n                                  \"(dry-run) \" : \"\", ATTR(&p_op->fs_attrs,\n                                                          fullpath));\n                    if (!(pipeline_flags & RUNFLG_DRY_RUN)) {\n                        if (rmdir(ATTR(&p_op->fs_attrs, fullpath)))\n                            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                                       \"rmdir(%s) failed: %s\",\n                                       ATTR(&p_op->fs_attrs, fullpath),\n                                       strerror(errno));\n                    }\n                } else {\n                    /* unlink */\n                    DisplayReport(\"%sunlink(%s)\",\n                                  (pipeline_flags & RUNFLG_DRY_RUN) ?\n                                  \"(dry-run) \" : \"\", ATTR(&p_op->fs_attrs,\n                                                          fullpath));\n                    if (!(pipeline_flags & RUNFLG_DRY_RUN)) {\n                        if (unlink(ATTR(&p_op->fs_attrs, fullpath)))\n                            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                                       \"unlink(%s) failed: %s\",\n                                       ATTR(&p_op->fs_attrs, fullpath),\n                                       strerror(errno));\n                    }\n                }\n            } else {\n                DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                           \"Cannot remove entry: type or path is unknown\");\n            }\n            break;\n        case OP_TYPE_UPDATE:\n            tmp =\n                attr_mask_and(&p_op->db_attrs.attr_mask,\n                              &p_op->fs_attrs.attr_mask);\n            tmp = attr_mask_and(&tmp, &diff_mask);\n\n            /*attributes to be changed: p_op->db_attrs.attr_mask\n             *                       & p_op->fs_attrs.attr_mask & diff_mask */\n            rc = ApplyAttrs(&p_op->entry_id, &p_op->db_attrs, &p_op->fs_attrs,\n                            tmp, pipeline_flags & RUNFLG_DRY_RUN);\n            break;\n\n        default:\n            /* no attr update: insert or remove */\n            ;\n        }\n    }\n\n    rc = EntryProcessor_Acknowledge(p_op, -1, true);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n    return rc;\n}\n\n/**\n * Perform a batch of operations on the database.\n */\nint EntryProc_batch_apply(struct entry_proc_op_t **ops, int count,\n                          lmgr_t *lmgr)\n{\n    int i, rc = 0;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[ops[0]->pipeline_stage];\n    entry_id_t **ids = NULL;\n    attr_set_t **attrs = NULL;\n\n    /* allocate arrays of ids and attrs */\n    ids = MemCalloc(count, sizeof(*ids));\n    if (!ids)\n        return -ENOMEM;\n    attrs = MemCalloc(count, sizeof(*attrs));\n    if (!attrs) {\n        rc = -ENOMEM;\n        goto free_ids;\n    }\n    for (i = 0; i < count; i++) {\n        ids[i] = &ops[i]->entry_id;\n        attrs[i] = &ops[i]->fs_attrs;\n    }\n\n    /* insert to DB */\n    switch (ops[0]->db_op_type) {\n    case OP_TYPE_NONE:\n        /* noop */\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"NoOp(%u ops: \" DFID \"...)\", count,\n                   PFID(ids[0]));\n        rc = 0;\n        break;\n\n    case OP_TYPE_INSERT:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"BatchInsert(%u ops: \" DFID \"...)\",\n                   count, PFID(ids[0]));\n        rc = ListMgr_BatchInsert(lmgr, ids, attrs, count, false);\n        break;\n\n    case OP_TYPE_UPDATE:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"BatchUpdate(%u ops: \" DFID \"...)\",\n                   count, PFID(ids[0]));\n        rc = ListMgr_BatchInsert(lmgr, ids, attrs, count, true);\n        break;\n\n    default:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Unexpected operation for batch op: %d\", ops[0]->db_op_type);\n        rc = -1;\n    }\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error %d performing batch database operation: %s.\", rc,\n                   lmgr_err2str(rc));\n\n    rc = EntryProcessor_AcknowledgeBatch(ops, count, -1, true);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    MemFree(attrs);\n free_ids:\n    MemFree(ids);\n    return rc;\n}\n\n/* called for each untagged entry */\nstatic void no_tag_cb(const entry_id_t *p_id)\n{\n    if (diff_arg->apply == APPLY_FS)\n        /* XXX no rm callback is supposed to be called for FS apply */\n        printf(\"++\" DFID \"\\n\", PFID(p_id));\n    else\n        printf(\"--\" DFID \"\\n\", PFID(p_id));\n}\n\n#ifdef _HSM_LITE\nstatic int hsm_recover(lmgr_t *lmgr, entry_id_t *p_id, attr_set_t *p_oldattr)\n{\n    recov_status_t st;\n    entry_id_t new_id;\n    attr_set_t new_attrs;\n    int rc;\n    const char *status_str;\n\n    /* try to recover from backend */\n\n    /** FIXME use undelete function from a status manager */\n    st = RS_ERROR;\n    //st = rbhext_recover(p_id, p_oldattr, &new_id, &new_attrs, NULL);\n    switch (st) {\n    case RS_FILE_OK:\n    case RS_FILE_EMPTY:\n    case RS_NON_FILE:\n    case RS_FILE_DELTA:\n\n        attr_mask_unset_readonly(&new_attrs.attr_mask);\n        rc = ListMgr_Replace(lmgr, p_id, p_oldattr, &new_id, &new_attrs,\n                             true, true);\n        if (rc) {\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Failed to replace entry \"\n                       DFID \" with \" DFID \" (%s) in DB.\",\n                       PFID(p_id), PFID(&new_id), ATTR(&new_attrs, fullpath));\n            goto clean_entry;\n        }\n\n        status_str = \"?\";\n        if (st == RS_FILE_OK)\n            status_str = \"up-to-date file\";\n        else if (st == RS_FILE_EMPTY)\n            status_str = \"empty file\";\n        else if (st == RS_FILE_DELTA)\n            status_str = \"old file data\";\n        else if (st == RS_NON_FILE)\n            status_str = \"non-file\";\n\n        DisplayReport(\"%s successfully recovered (%s)\",\n                      ATTR(&new_attrs, fullpath), status_str);\n        return 0;\n\n    case RS_NOBACKUP:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"No backup available for entry '%s'\", ATTR(p_oldattr,\n                                                              fullpath));\n        goto clean_entry;\n    case RS_ERROR:\n    default:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Failed to restore entry '%s' (status=%d)\", ATTR(p_oldattr,\n                                                                    fullpath),\n                   st);\n        goto clean_entry;\n    }\n\n clean_entry:\n    /* clean new entry (inconsistent) */\n    if (!strcmp(ATTR(p_oldattr, type), STR_TYPE_DIR))\n        rc = rmdir(ATTR(p_oldattr, fullpath));\n    else\n        rc = unlink(ATTR(p_oldattr, fullpath));\n    if (rc)\n        DisplayLog(LVL_EVENT, ENTRYPROC_TAG, \"cleanup: unlink/rmdir failed: %s\",\n                   strerror(errno));\n\n    /* failure */\n    return -1;\n}\n#endif\n\nstatic int std_recover(lmgr_t *lmgr, entry_id_t *p_id, attr_set_t *p_oldattr)\n{\n    entry_id_t new_id;\n    attr_set_t new_attrs;\n    int rc;\n\n    rc = create_from_attrs(p_oldattr, &new_attrs, &new_id, false, false);\n    if (rc) {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Failed to create entry '%s' (status=%d)\", ATTR(p_oldattr,\n                                                                   fullpath),\n                   rc);\n        goto clean_entry;\n    }\n#ifdef _LUSTRE\n#ifndef _MDT_SPECIFIC_LOVEA\n    if (diff_arg->lovea_file) {\n        if (!ATTR_MASK_TEST(&new_attrs, fullpath)) {\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Fullpath needed to write into lovea_file\");\n        } else {\n            /* associate old stripe objects to new object id */\n            char buff[4096];\n            ssize_t sz = BuildLovEA(&new_id, p_oldattr, buff, 4096);\n            if (sz > 0) {\n                int i;\n                char output[4096];\n                char relpath[RBH_PATH_MAX];\n                char *curr = output;\n\n                if (relative_path(ATTR(&new_attrs, fullpath),\n                                  global_config.fs_path,\n                                  relpath) == 0) {\n\n                    /* write as a single line to avoid mixing them */\n                    curr += sprintf(curr, \"%s \", relpath);\n\n                    /* write output for set_lovea tool */\n                    for (i = 0; i < sz; i++)\n                        curr += sprintf(curr, \"%02hhx\", buff[i]);\n                    sprintf(curr, \"\\n\");\n\n                    fprintf(diff_arg->lovea_file, \"%s\", output);\n\n                    /* XXX overwrite stripe info in new attrs? */\n      //            ATTR(&new_attrs, stripe_info) = ATTR(p_oldattr, stripe_info);\n      //            ATTR(&new_attrs, stripe_items) = ATTR(p_oldattr, stripe_items);\n                }\n            }\n        }\n    }\n    if (diff_arg->fid_remap_file) {\n        /* print for each stripe: ost index, stripe_number, object id,\n         * old fid, new fid */\n        if (ATTR_MASK_TEST(p_oldattr, stripe_items)) {\n            int i;\n            stripe_items_t *pstripe = &ATTR(p_oldattr, stripe_items);\n            for (i = 0; i < pstripe->count; i++) {\n                fprintf(diff_arg->fid_remap_file,\n                        \"%u %u %\" PRIu64 \" \" DFID \" \" DFID \"\\n\",\n                        pstripe->stripe[i].ost_idx, i,\n                        pstripe->stripe[i].obj_id, PFID(p_id), PFID(&new_id));\n            }\n        }\n    }\n#endif\n#endif\n\n    /* insert the new entry to the DB */\n    attr_mask_unset_readonly(&new_attrs.attr_mask);\n    rc = ListMgr_Replace(lmgr, p_id, p_oldattr, &new_id, &new_attrs,\n                         true, true);\n    if (rc) {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Failed to replace entry \"\n                   DFID \" with \" DFID \" (%s) in DB.\",\n                   PFID(p_id), PFID(&new_id), ATTR(&new_attrs, fullpath));\n        goto clean_entry;\n    }\n\n    return 0;\n\n clean_entry:\n    /* clean new entry (inconsistent) */\n    if (!strcmp(ATTR(p_oldattr, type), STR_TYPE_DIR))\n        rc = rmdir(ATTR(p_oldattr, fullpath));\n    else\n        rc = unlink(ATTR(p_oldattr, fullpath));\n    if (rc)\n        DisplayLog(LVL_EVENT, ENTRYPROC_TAG, \"cleanup: unlink/rmdir failed: %s\",\n                   strerror(errno));\n\n    /* failure */\n    return -1;\n}\n\nstatic int EntryProc_report_rm(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n    lmgr_filter_t filter;\n    filter_value_t val;\n    rm_cb_func_t cb = NULL;\n\n    /* callback func for diff display */\n    if (!attr_mask_is_null(diff_mask))\n        cb = no_tag_cb;\n\n    /* If gc_entries or gc_names are not set,\n     * this is just a special op to wait for pipeline flush.\n     * => don't clean old entries */\n    if (p_op->gc_entries || p_op->gc_names) {\n        /* call MassRemove only if APPLY_DB is set */\n        if ((diff_arg->apply == APPLY_DB)\n            && !(pipeline_flags & RUNFLG_DRY_RUN)) {\n            lmgr_simple_filter_init(&filter);\n\n            if (p_op->gc_entries) {\n                val.value.val_uint = ATTR(&p_op->fs_attrs, md_update);\n                lmgr_simple_filter_add(&filter, ATTR_INDEX_md_update,\n                                       LESSTHAN_STRICT, val, 0);\n            }\n\n            if (p_op->gc_names) {\n                /* use the same timestamp for cleaning paths that have not been\n                 * seen during the scan */\n                val.value.val_uint = ATTR(&p_op->fs_attrs, md_update);\n                lmgr_simple_filter_add(&filter, ATTR_INDEX_path_update,\n                                       LESSTHAN_STRICT, val, 0);\n            }\n\n            /* partial scan: remove non-updated entries from a subset of the\n             * namespace */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n                char tmp[RBH_PATH_MAX];\n                strcpy(tmp, ATTR(&p_op->fs_attrs, fullpath));\n                strcat(tmp, \"/*\");\n                val.value.val_str = tmp;\n                lmgr_simple_filter_add(&filter, ATTR_INDEX_fullpath, LIKE, val,\n                                       0);\n            }\n\n            /* force commit after this operation */\n            ListMgr_ForceCommitFlag(lmgr, true);\n\n            /* remove entries listed in previous scans */\n            if (has_deletion_policy())\n                /* @TODO fix for dirs, symlinks, ... */\n                rc = ListMgr_MassSoftRemove(lmgr, &filter, time(NULL), cb);\n            else\n                rc = ListMgr_MassRemove(lmgr, &filter, cb);\n\n            /* /!\\ TODO : entries must be removed from backend too */\n\n            lmgr_simple_filter_free(&filter);\n\n            if (rc)\n                DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                           \"Error: ListMgr MassRemove operation failed with code %d.\",\n                           rc);\n        } else if (diff_arg->db_tag) {\n            /* list untagged entries (likely removed from filesystem) */\n            struct lmgr_iterator_t *it;\n            entry_id_t id;\n            attr_set_t attrs;\n\n            it = ListMgr_ListUntagged(lmgr, diff_arg->db_tag, NULL);\n\n            if (it == NULL) {\n                DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                           \"Error: ListMgr_ListUntagged operation failed.\");\n            } else {\n                attr_mask_t getattr_mask = { 0 };\n\n                if (diff_arg->apply == APPLY_FS) {\n                    /* all possible info */\n                    getattr_mask.std = ~0;\n                    getattr_mask.status = ~0;\n                    getattr_mask.sm_info = ~0LL;\n                } else\n                    getattr_mask.std = ATTR_MASK_fullpath;\n\n                attrs.attr_mask = getattr_mask;\n                while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n                    if (diff_arg->apply == APPLY_FS) {\n                        GString *attrnew = g_string_new(NULL);\n\n                        /* FS apply: reverse display */\n                        print_attrs(attrnew, &attrs, null_mask, 1);\n                        printf(\"++\" DFID \" %s\\n\", PFID(&id), attrnew->str);\n\n                        g_string_free(attrnew, TRUE);\n\n                        /* create or recover it (even without HSM mode) */\n#ifdef _HSM_LITE\n                        if (diff_arg->recov_from_backend) {\n                            /* try to recover the entry from the backend */\n                            DisplayReport(\"%srecover(%s)\",\n                                          (pipeline_flags & RUNFLG_DRY_RUN) ?\n                                          \"(dry-run) \" : \"\", ATTR(&attrs,\n                                                                  fullpath));\n                            /** FIXME use undelete function from status\n                             * manager */\n                            if (!(pipeline_flags & RUNFLG_DRY_RUN))\n                                hsm_recover(lmgr, &id, &attrs);\n                        } else\n#endif\n                        {\n                            /* create the file with no stripe and generate\n                             * lovea information to be set on MDT */\n                            DisplayReport(\"%screate(%s)\",\n                                          (pipeline_flags & RUNFLG_DRY_RUN) ?\n                                          \"(dry-run) \" : \"\", ATTR(&attrs,\n                                                                  fullpath));\n                            if (!(pipeline_flags & RUNFLG_DRY_RUN))\n                                std_recover(lmgr, &id, &attrs);\n                        }\n                    } else {    /* apply=db */\n\n                        if (ATTR_MASK_TEST(&attrs, fullpath))\n                            printf(\"--\" DFID \" path=%s\\n\", PFID(&id),\n                                   ATTR(&attrs, fullpath));\n                        else\n                            printf(\"--\" DFID \"\\n\", PFID(&id));\n                    }\n\n                    ListMgr_FreeAttrs(&attrs);\n\n                    /* prepare next call */\n                    attrs.attr_mask = getattr_mask;\n                }\n\n                ListMgr_CloseIterator(it);\n            }\n\n            /* can now destroy the tag */\n            rc = ListMgr_DestroyTag(lmgr, diff_arg->db_tag);\n            if (rc)\n                DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                           \"Error: ListMgr_DestroyTag operation failed (rc=%d)\",\n                           rc);\n        }\n    }\n\n    /* must call callback function in any case, to unblock the scan */\n    if (p_op->callback_func) {\n        /* Perform callback to info collector */\n        p_op->callback_func(lmgr, p_op, p_op->callback_param);\n    }\n    // update last scan end time moved to callback\n\n    /* unset force commit flag */\n    ListMgr_ForceCommitFlag(lmgr, false);\n\n    rc = EntryProcessor_Acknowledge(p_op, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    return rc;\n\n}\n"
  },
  {
    "path": "src/entry_processor/entry_proc_hash.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/* A file ID (or Lustre FID) hash table. The hash table consists in a\n * fixed number of bucket, keyed on the ID, containing a linked list\n * of operation entries.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"entry_proc_tools.h\"\n#include \"entry_proc_hash.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_misc.h\"\n#include <pthread.h>\n#include <stdlib.h>\n#include <stdio.h>\n\n/* List of prime numbers of different magnitudes, to size the hash table with a\n * suitable value according to max element count */\nstatic const unsigned int primes[] = { 7919, 15187, 32119, 64157, 128021,\n                                       256019, 512009, 999983, 2000003, 4006007,\n                                       8004091, 16000463, 32000251, 0};\n\n/** Return a suitable hash table size for the given entry count */\nunsigned int max_count_to_hash_size(unsigned int max_count)\n{\n    const unsigned int *slot;\n\n    for (slot = primes; slot[1] != 0; slot++) {\n        if (max_count <= *slot)\n            return *slot;\n    }\n    /* last value */\n    return *slot;\n}\n\n/** Creates and return a new hash table */\nstruct id_hash *id_hash_init(const unsigned int hash_size, bool use_lock)\n{\n    unsigned int i;\n    struct id_hash *hash;\n\n    hash =\n        MemAlloc(sizeof(struct id_hash) +\n                 hash_size * sizeof(struct id_hash_slot));\n    if (!hash) {\n        DisplayLog(LVL_MAJOR, \"Entry_Hash\",\n                   \"Can't allocate new hash table with %d slots\", hash_size);\n    }\n\n    for (i = 0; i < hash_size; i++) {\n        struct id_hash_slot *slot = &hash->slot[i];\n\n        if (use_lock)\n            pthread_mutex_init(&slot->lock, NULL);\n        rh_list_init(&slot->list);\n        slot->count = 0;\n    }\n\n    hash->hash_size = hash_size;\n\n    return hash;\n}\n\nvoid id_hash_stats(struct id_hash *id_hash, const char *log_str)\n{\n    unsigned int i, total, min, max;\n    double avg;\n\n    total = 0;\n    min = max = id_hash->slot[0].count;\n\n    for (i = 0; i < id_hash->hash_size; i++) {\n        const struct id_hash_slot *slot = &id_hash->slot[i];\n\n        total += slot->count;\n\n        if (slot->count < min)\n            min = slot->count;\n        if (slot->count > max)\n            max = slot->count;\n    }\n\n    avg = (double)total / (0.0 + id_hash->hash_size);\n    DisplayLog(LVL_MAJOR, \"STATS\",\n               \"%s: %u (hash min=%u/max=%u/avg=%.1f)\", log_str,\n               total, min, max, avg);\n\n#ifdef _DEBUG_HASH\n    /* more than 50% of difference between hash lists ! Dump all values. */\n    if ((max - min) > ((max + 1) / 2)) {\n        unsigned int nb_min = 0;\n        unsigned int nb_max = 0;\n\n        for (i = 0; i < id_hash->hash_size; i++) {\n            const struct id_hash_slot *slot = &id_hash->slot[i];\n\n            if (slot->count == min)\n                nb_min++;\n            else if (slot->count == max)\n                nb_max++;\n        }\n        DisplayLog(LVL_MAJOR, \"DebugHash\",\n                   \"nb slots with min/max count: %u/%u (total=%u)\", nb_min,\n                   nb_max, id_hash->hash_size);\n    }\n#endif\n\n}\n\nvoid id_hash_dump(struct id_hash *id_hash, bool parent)\n{\n    unsigned int i;\n    entry_proc_op_t *op;\n\n    /* dump all values */\n    printf(\"==\\n\");\n    for (i = 0; i < id_hash->hash_size; i++) {\n        struct id_hash_slot *slot = &id_hash->slot[i];\n\n        P(slot->lock);\n        if (!parent) {\n            rh_list_for_each_entry(op, &slot->list, id_hash_list)\n                printf(\"[%u] \" DFID \"\\n\", i, PFID(&op->entry_id));\n        } else {\n            rh_list_for_each_entry(op, &slot->list, name_hash_list)\n                printf(\"[%u] \" DFID \"/%s:\" DFID \"\\n\", i,\n                       PFID(&ATTR(&op->fs_attrs, parent_id)),\n                       ATTR(&op->fs_attrs, name), PFID(&op->entry_id));\n        }\n        V(slot->lock);\n    }\n}\n"
  },
  {
    "path": "src/entry_processor/entry_proc_impl.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Implementation of pipeline management\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"entry_processor.h\"\n#include \"entry_proc_tools.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"list.h\"\n#include <semaphore.h>\n#include <pthread.h>\n#include <errno.h>\n#include <stdlib.h>\n\nstatic sem_t pipeline_token;\n\n/* each stage of the pipeline consist of the following information: */\ntypedef struct __list_by_stage__ {\n    struct rh_list_head entries;\n    unsigned int nb_threads;    /* number of threads working on this stage */\n    unsigned int nb_unprocessed_entries; /**< number of entries to be processed\n                                          * in this list */\n    unsigned int nb_current_entries;    /**< number of entries being processed\n                                          * in the list */\n    unsigned int nb_processed_entries;  /**< number of entries processed in this\n                                         * list */\n    unsigned long long total_processed; /**< total number of processed entries\n                                         * since start */\n    unsigned long long nb_batches;  /* number of batched steps */\n    unsigned long long total_batched_entries; /**< total number of entries\n                                               * processed as batches */\n    struct timeval total_processing_time;   /**< total amount of time for\n                                             * processing entries at this\n                                             * stage */\n    pthread_mutex_t stage_mutex;\n} list_by_stage_t;\n\n/* Note1: nb_current_entries + nb_unprocessed_entries + nb_processed_entries\n *         = nb entries at a given step */\n/* stages mutex must always be taken from lower stage to upper to avoid\n * deadlocks */\n\nstatic list_by_stage_t *pipeline = NULL;\n\n/* EXPORTED VARIABLES: current pipeline in operation */\npipeline_stage_t *entry_proc_pipeline = NULL;\npipeline_descr_t entry_proc_descr = { 0 };\n\nvoid *entry_proc_arg = NULL;\n\nstatic pthread_mutex_t work_avail_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic pthread_cond_t work_avail_cond = PTHREAD_COND_INITIALIZER;\nunsigned int nb_waiting_threads = 0;\n\n/* termination mecanism  */\nstatic pthread_mutex_t terminate_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic pthread_cond_t terminate_cond = PTHREAD_COND_INITIALIZER;\n\nstatic enum { NONE = 0, FLUSH = 1, BREAK = 2 } terminate_flag = NONE;\nstatic int nb_finished_threads = 0;\n\n/* forward declarations */\nstatic entry_proc_op_t **EntryProcessor_GetNextOp(int *count);\nstatic void print_op_stats(entry_proc_op_t *p_op, unsigned int stage,\n                           const char *what);\n\ntypedef struct worker_info__ {\n    unsigned int index;\n    pthread_t thread_id;\n    lmgr_t lmgr;\n} worker_info_t;\n\nstatic worker_info_t *worker_params = NULL;\n\n#ifdef _DEBUG_ENTRYPROC\nstatic void dump_entry_op(entry_proc_op_t *p_op)\n{\n#ifdef _HAVE_FID\n    if (p_op->entry_id_is_set)\n        printf(\"id=\" DFID \"\\n\", PFID(&p_op->entry_id));\n#endif\n    /* mask is always set, even if fs/db_attrs is not set */\n    if (ATTR_FSorDB_TEST(p_op, fullpath))\n        printf(\"path=%s\\n\", ATTR_FSorDB(p_op, fullpath));\n\n    if (p_op->extra_info.is_changelog_record)\n        printf(\"log_rec=%s\\n\",\n               changelog_type2str(p_op->extra_info.log_record.p_log_rec->\n                                  cr_type));\n\n    printf(\"stage=%u, being processed=%u, db_exists=%u, \"\n           \"id is referenced=%u, db_op_type=%u\\n\",\n         p_op->pipeline_stage, p_op->being_processed, p_op->db_exists,\n         p_op->id_is_referenced, p_op->db_op_type);\n    printf(\"start proc time=%u.%06u\\n\",\n           (unsigned int)p_op->timestamp.start_processing_time.tv_sec,\n           (unsigned int)p_op->timestamp.start_processing_time.tv_usec);\n    printf(\"next=%p, prev=%p\\n\", p_op->list.next, p_op->list.prev);\n}\n#endif\n\n/* worker thread for pipeline */\nstatic void *entry_proc_worker_thr(void *arg)\n{\n    entry_proc_op_t **list_op;\n    int rc;\n    worker_info_t *myinfo = (worker_info_t *) arg;\n    int count;\n\n    DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Starting pipeline worker thread #%u\",\n               myinfo->index);\n\n    /* create connection to database */\n    rc = ListMgr_InitAccess(&myinfo->lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Pipeline worker #%u could not connect to ListMgr. Exiting.\",\n                   myinfo->index);\n        exit(1);\n    }\n\n    while ((list_op = EntryProcessor_GetNextOp(&count)) != NULL) {\n        const pipeline_stage_t *stage_info =\n            &entry_proc_pipeline[list_op[0]->pipeline_stage];\n        if (count == 1) {\n            /* preferably call single entry function, if it exists */\n            if (stage_info->stage_function)\n                stage_info->stage_function(list_op[0], &myinfo->lmgr);\n            /* else, call batch function if it exists */\n            else if (stage_info->stage_batch_function)\n                stage_info->stage_batch_function(list_op, count, &myinfo->lmgr);\n            else\n                /* no function! */\n                RBH_BUG(\"No function is defined for a pipeline step\");\n        } else if (count > 1) {\n            /* call batch function, if it exists */\n            if (stage_info->stage_batch_function)\n                stage_info->stage_batch_function(list_op, count, &myinfo->lmgr);\n            else\n                /* no batch function! */\n                RBH_BUG(\"Batched returned whereas no batch function is \"\n                        \"defined for this stage\");\n        } else\n            RBH_BUG(\"Empty operation list returned\");\n\n        MemFree(list_op);\n    }\n\n    if (!terminate_flag)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error: EntryProcessor_GetNextOp returned NULL but no termination signal has been received!!!\");\n\n    /* All operations have been processed. Now flushing DB operations and\n     * closing connection. */\n    ListMgr_CloseAccess(&myinfo->lmgr);\n\n    /* notify thread's termination */\n    P(terminate_lock);\n    nb_finished_threads++;\n    /* always notify waiting thread, so it writes a log about the number of\n     * waiting threads/ops */\n    pthread_cond_signal(&terminate_cond);\n    V(terminate_lock);\n\n    DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n               \"Pipeline worker thread #%u terminated\", myinfo->index);\n\n    pthread_exit(NULL);\n    return NULL;\n\n}\n\n#ifdef _BENCH_PIPELINE\nstatic pipeline_descr_t bench_pipeline_descr = { 0 };   /* to be set */\n\nstatic pipeline_stage_t *bench_pipeline = NULL; /* to be allocated */\n\nstatic int EntryProc_noop(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    /* last stage ? */\n    if (p_op->pipeline_stage < bench_pipeline_descr.stage_count - 1)\n        rc = EntryProcessor_Acknowledge(p_op, p_op->pipeline_stage + 1, false);\n    else {\n        if (p_op->callback_func)\n            p_op->callback_func(lmgr, p_op, p_op->callback_param);\n\n        /* last stage, remove from the pipeline */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n    }\n    return rc;\n}\n\nstatic int mk_bench_pipeline(unsigned int stages)\n{\n    int i;\n    bench_pipeline_descr.stage_count = stages;\n    bench_pipeline = MemCalloc(stages, sizeof(pipeline_stage_t));\n    if (bench_pipeline == NULL)\n        return -ENOMEM;\n    for (i = 0; i < stages; i++) {\n        bench_pipeline[i].stage_index = i;\n        bench_pipeline[i].stage_name = \"stage_bench\";\n        bench_pipeline[i].stage_function = EntryProc_noop;\n        bench_pipeline[i].stage_batch_function = NULL;\n        bench_pipeline[i].test_batchable = NULL;\n        bench_pipeline[i].stage_flags = STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC;\n        bench_pipeline[i].max_thread_count = 0; /* unlimited */\n    }\n\n    if (stages > 2) {\n        bench_pipeline[1].stage_flags |= STAGE_FLAG_ID_CONSTRAINT;\n        bench_pipeline_descr.DB_APPLY = stages - 1;\n    }\n    return 0;\n}\n#endif\n\n/**\n *  Initialize entry processor pipeline\n */\nint EntryProcessor_Init(pipeline_flavor_e flavor, run_flags_t flags, void *arg)\n{\n    int i;\n\n    pipeline_flags = flags;\n    entry_proc_arg = arg;\n\n#ifdef _BENCH_PIPELINE\n    int rc;\n\n    /* in this case, arg points to stage count */\n    rc = mk_bench_pipeline(*((int *)arg));\n    if (rc)\n        return rc;\n    entry_proc_pipeline = bench_pipeline;   /* pointer */\n    entry_proc_descr = bench_pipeline_descr;    /* full copy */\n#else\n    switch (flavor) {\n    case STD_PIPELINE:\n        entry_proc_pipeline = std_pipeline; /* pointer */\n        entry_proc_descr = std_pipeline_descr;  /* full copy */\n        /* arg is a diff_mask */\n        break;\n    case DIFF_PIPELINE:\n        entry_proc_pipeline = diff_pipeline;    /* pointer */\n        entry_proc_descr = diff_pipeline_descr; /* full copy */\n        /* arg is a diff_arg */\n        break;\n    default:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Pipeline flavor not supported\");\n        return EINVAL;\n    }\n#endif\n\n    DisplayLog(LVL_FULL, \"EntryProc_Config\", \"nb_threads=%u\",\n               entry_proc_conf.nb_thread);\n    DisplayLog(LVL_FULL, \"EntryProc_Config\", \"max_batch_size=%u\",\n               entry_proc_conf.max_batch_size);\n    for (i = 0; i < entry_proc_descr.stage_count; i++) {\n        if (entry_proc_pipeline[i].stage_flags & STAGE_FLAG_SEQUENTIAL)\n            DisplayLog(LVL_FULL, \"EntryProc_Config\", \"%s: sequential\",\n                       entry_proc_pipeline[i].stage_name);\n        else if (entry_proc_pipeline[i].stage_flags & STAGE_FLAG_PARALLEL)\n            DisplayLog(LVL_FULL, \"EntryProc_Config\", \"%s: parallel\",\n                       entry_proc_pipeline[i].stage_name);\n        else if (entry_proc_pipeline[i].stage_flags & STAGE_FLAG_MAX_THREADS)\n            DisplayLog(LVL_FULL, \"EntryProc_Config\", \"%s: %u threads max\",\n                       entry_proc_pipeline[i].stage_name,\n                       entry_proc_pipeline[i].max_thread_count);\n    }\n\n    pipeline =\n        (list_by_stage_t *) MemCalloc(entry_proc_descr.stage_count,\n                                      sizeof(list_by_stage_t));\n    if (!pipeline)\n        return ENOMEM;\n\n    if (entry_proc_conf.match_classes && policies.fileset_count == 0) {\n        DisplayLog(LVL_EVENT, ENTRYPROC_TAG,\n                   \"No fileclass defined in configuration, disabling fileclass matching.\");\n        entry_proc_conf.match_classes = false;\n    }\n\n    /* If a limit of pending operations is specified, initialize a token */\n    if (entry_proc_conf.max_pending_operations > 0)\n        sem_init(&pipeline_token, 0, entry_proc_conf.max_pending_operations);\n\n    for (i = 0; i < entry_proc_descr.stage_count; i++) {\n        memset(&pipeline[i], 0, sizeof(*pipeline));\n        rh_list_init(&pipeline[i].entries);\n#ifdef _DEBUG_ENTRYPROC\n        printf(\"entry list for stage %u: list=%p, next=%p, prev=%p\\n\",\n               i, &pipeline[i].entries, pipeline[i].entries.next,\n               pipeline[i].entries.prev);\n#endif\n        timerclear(&pipeline[i].total_processing_time);\n        pthread_mutex_init(&pipeline[i].stage_mutex, NULL);\n    }\n\n    /* init id constraint manager */\n    if (id_constraint_init())\n        return -1;\n\n    /* start workers */\n\n    worker_params =\n        (worker_info_t *) MemCalloc(entry_proc_conf.nb_thread,\n                                    sizeof(worker_info_t));\n    if (!worker_params)\n        return ENOMEM;\n\n    for (i = 0; i < entry_proc_conf.nb_thread; i++) {\n        worker_params[i].index = i;\n        if (pthread_create(&worker_params[i].thread_id,\n                           NULL, entry_proc_worker_thr, &worker_params[i]) != 0)\n        {\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error: Could not start worker thread\");\n            return errno;\n        }\n    }\n\n#ifdef _DEBUG_ENTRYPROC\n    EntryProcessor_DumpCurrentStages();\n#endif\n\n    return 0;\n}\n\n/**\n * This function adds a new operation, allocated through\n * GetNewEntryProc_op(), to the queue. All fields have been set to 0\n * or a proper value.\n */\nvoid EntryProcessor_Push(entry_proc_op_t *p_entry)\n{\n    int i;\n    unsigned int insert_stage;\n\n    /* if a limit of pending operations is specified, wait for a token */\n    if (entry_proc_conf.max_pending_operations > 0)\n        sem_wait(&pipeline_token);\n\n    /* We must always insert it in the first stage, to keep\n     * the good ordering of entries.\n     * Except if all stages between stage0 and insert_stage are empty\n     */\n\n    /* by default, insert stage is entry stage\n     * except if there is a non empty stage before\n     */\n    insert_stage = p_entry->pipeline_stage;\n\n    /* take all locks for stage0 to insert_stage or first non empty stage */\n    for (i = 0; i <= p_entry->pipeline_stage; i++) {\n        P(pipeline[i].stage_mutex);\n\n        if (!rh_list_empty(&pipeline[i].entries)) {\n            insert_stage = i;\n            break;\n        }\n    }\n\n#ifdef _DEBUG_ENTRYPROC\n    if (insert_stage != p_entry->pipeline_stage)\n        printf(\"INSERT STAGE (%u) != PUSH STAGE(%u)\\n\", insert_stage,\n               p_entry->pipeline_stage);\n#endif\n\n    /* If the stage has an ID_CONSTRAINT and ID is set, register entry */\n    if ((entry_proc_pipeline[insert_stage].\n         stage_flags & STAGE_FLAG_ID_CONSTRAINT)\n        && p_entry->entry_id_is_set) {\n        id_constraint_register(p_entry, false);\n    }\n#ifdef _DEBUG_ENTRYPROC\n    printf(\"inserting to stage %u: list=%p, next=%p, prev=%p\\n\",\n           insert_stage, &pipeline[insert_stage].entries,\n           pipeline[insert_stage].entries.next,\n           pipeline[insert_stage].entries.prev);\n#endif\n\n    /* insert entry */\n    rh_list_add_tail(&p_entry->list, &pipeline[insert_stage].entries);\n\n    if (insert_stage < p_entry->pipeline_stage)\n        pipeline[insert_stage].nb_processed_entries++;\n    else\n        pipeline[insert_stage].nb_unprocessed_entries++;\n\n    /* release all lists lock */\n    for (i = 0; i <= insert_stage; i++)\n        V(pipeline[i].stage_mutex);\n\n    /* there is a new entry to be processed ! (signal only if threads\n     * are waiting) */\n    P(work_avail_lock);\n    if (nb_waiting_threads > 0)\n        pthread_cond_signal(&work_avail_cond);\n    V(work_avail_lock);\n\n}   /* EntryProcessor_Push */\n\n/*\n * Move terminated operations to next stage.\n * The source stage is locked.\n */\nstatic int move_stage_entries(const unsigned int source_stage_index)\n{\n    entry_proc_op_t *p_first = NULL;\n    entry_proc_op_t *p_last = NULL;\n    entry_proc_op_t *p_curr = NULL;\n    int count = 0;\n    int i;\n    unsigned int pipeline_stage_min;\n    unsigned int insert_stage;\n    struct rh_list_head rem;\n    list_by_stage_t *pl;\n\n    /* nothing to do if we are already at last step */\n    if (source_stage_index >= entry_proc_descr.stage_count - 1)\n        return 0;\n\n    pl = &pipeline[source_stage_index];\n\n    /* is there at least 1 entry to be moved ? */\n    if (rh_list_empty(&pl->entries))\n        goto out;\n\n    p_first = rh_list_first_entry(&pl->entries, entry_proc_op_t, list);\n    if (p_first->being_processed\n        || (p_first->pipeline_stage <= source_stage_index))\n        goto out;\n    pipeline_stage_min = p_first->pipeline_stage;\n\n    p_last = p_first;\n    count = 1;\n\n    /* check next entries  */\n    for (p_curr = rh_list_entry(p_first->list.next, entry_proc_op_t, list);\n         &p_curr->list != &pl->entries;\n         p_curr = rh_list_entry(p_curr->list.next, entry_proc_op_t, list)) {\n        if (!p_curr->being_processed\n            && (p_curr->pipeline_stage > source_stage_index)) {\n            if (p_curr->pipeline_stage < pipeline_stage_min)\n                pipeline_stage_min = p_curr->pipeline_stage;\n            count++;\n            p_last = p_curr;\n        } else {\n            break;\n        }\n    }\n\n    /* remove entries from current list */\n    rh_list_cut_head(&pl->entries, &p_last->list, &rem);\n\n    /* change entry count */\n    pl->nb_processed_entries -= count;\n\n    /* by default, insert stage is pipeline_stage_min\n     * except if there is a non empty stage before\n     */\n    insert_stage = pipeline_stage_min;\n\n    /* take all locks from next stage to insert_stage\n     * or first non-empty stage */\n    for (i = source_stage_index + 1; i <= pipeline_stage_min; i++) {\n        P(pipeline[i].stage_mutex);\n\n        /* make sure this stage has correctly been flushed */\n        if (!rh_list_empty(&pipeline[i].entries))\n            move_stage_entries(i);\n\n        if (!rh_list_empty(&pipeline[i].entries)) {\n            insert_stage = i;\n            break;\n        }\n    }\n\n#ifdef _DEBUG_ENTRYPROC\n    if (insert_stage != pipeline_stage_min) {\n        printf(\"Entries to be moved: %u\\n\", count);\n        printf(\"INSERT STAGE (%u) != NEXT STAGE MIN(%u)\\n\", insert_stage,\n               pipeline_stage_min);\n        printf(\"STAGE[%u].FIRST=%s, stage=%u\\n\", insert_stage,\n               ATTR(&rh_list_first_entry\n                    (&pipeline[insert_stage].entries, entry_proc_op_t,\n                     list)->fs_attrs, fullpath),\n               rh_list_first_entry(&pipeline[insert_stage].entries,\n                                   entry_proc_op_t, list)->pipeline_stage);\n        printf(\"STAGE[%u].LAST=%s, stage=%u\\n\", insert_stage,\n               ATTR(&rh_list_last_entry\n                    (&pipeline[insert_stage].entries, entry_proc_op_t,\n                     list)->fs_attrs, fullpath),\n               rh_list_last_entry(&pipeline[insert_stage].entries,\n                                  entry_proc_op_t, list)->pipeline_stage);\n    }\n#endif\n\n    /* If the stage has an ID_CONSTRAINT, register entries */\n    if (entry_proc_pipeline[insert_stage].\n        stage_flags & STAGE_FLAG_ID_CONSTRAINT) {\n        rh_list_for_each_entry(p_curr, &rem, list) {\n#ifdef _DEBUG_ENTRYPROC\n            if (p_curr->id_is_referenced\n                || (!p_curr->entry_id_is_set && p_curr->pipeline_stage != 6)) {\n                printf(\"moving entry %p \" DFID\n                       \" from stage %u to %u, id is ref? %u, id_is_set? %u\\n\",\n                       p_curr, PFID(&p_curr->entry_id), source_stage_index,\n                       insert_stage, p_curr->id_is_referenced ? 0 : 1,\n                       p_curr->entry_id_is_set ? 0 : 1);\n            }\n#endif\n            if (!p_curr->id_is_referenced && p_curr->entry_id_is_set) {\n                id_constraint_register(p_curr, false);\n            }\n        }\n    }\n\n#ifdef _DEBUG_ENTRYPROC\n    DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"move_stage_entries: insert stage %u\",\n               insert_stage);\n#endif\n\n    /* check update info depending on this list */\n    /* TODO: can we merge that with the previous loop ? */\n    rh_list_for_each_entry(p_curr, &rem, list) {\n        /* no need to take a lock to check 'p_curr->pipeline_stage',\n         * because if we moved this entry, this means it is not in use.\n         * And no thread can process it for now because the list is locked.\n         */\n        if (insert_stage < p_curr->pipeline_stage)\n            pipeline[insert_stage].nb_processed_entries++;\n        else\n            pipeline[insert_stage].nb_unprocessed_entries++;\n    }\n\n    /* insert entry list */\n    rh_list_splice_tail(&pipeline[insert_stage].entries, &rem);\n\n    /* release all lists lock (except the source one) */\n    for (i = source_stage_index + 1; i <= insert_stage; i++)\n        V(pipeline[i].stage_mutex);\n\n out:\n    return count;\n}   /* move_stage_entries */\n\n/**\n * Return an entry to be processed.\n * This entry is tagged \"being_processed\" and stage info is updated.\n * @param p_empty Output Boolean. In the case no entry is returned,\n *        this indicates if it is because the pipeline is empty.\n */\nstatic entry_proc_op_t **next_work_avail(bool *p_empty, int *op_count)\n{\n    entry_proc_op_t *p_curr;\n    int i;\n    int tot_entries = 0;\n\n    if (terminate_flag == BREAK)\n        return NULL;\n\n    *p_empty = true;\n\n    /* check every stage from the last to the first */\n    for (i = entry_proc_descr.stage_count - 1; i >= 0; i--) {\n        list_by_stage_t *pl = &pipeline[i];\n\n        /* entries have not been processed at this stage. */\n        P(pl->stage_mutex);\n\n        /* Accumulate the number of entries in the upper stages. */\n        tot_entries +=\n            pl->nb_current_entries + pl->nb_unprocessed_entries +\n            pl->nb_processed_entries;\n\n        if (pl->nb_unprocessed_entries == 0) {\n            V(pl->stage_mutex);\n#ifdef _DEBUG_ENTRYPROC\n            printf(\"Stage[%u] - thread %#lx - no waiting entries\\n\", i,\n                   pthread_self());\n#endif\n            continue;\n        }\n\n        if (entry_proc_pipeline[i].stage_flags & STAGE_FLAG_SEQUENTIAL) {\n            /*\n             * If there is already an operation being processed,\n             * nothing can be done at this stage.\n             */\n            if (pl->nb_threads != 0) {\n                *p_empty = false;\n                V(pl->stage_mutex);\n#ifdef _DEBUG_ENTRYPROC\n                printf(\"Stage[%u] - thread %#lx - a thread is already working \"\n                       \"on this sequential stage\\n\", i, pthread_self());\n#endif\n                continue;\n            }\n\n            /* In case of a sequential operation, the only entry that can be\n             * processed is the first entry in this stage.\n             */\n            rh_list_for_each_entry(p_curr, &pl->entries, list) {\n                /* the pipeline is not empty */\n                *p_empty = false;\n\n                if (p_curr->pipeline_stage == i) {\n\n                    /* satisfies ID_CONSTRAINT ?  */\n\n                    if ((entry_proc_pipeline[i].\n                         stage_flags & STAGE_FLAG_ID_CONSTRAINT)\n                        && p_curr->entry_id_is_set) {\n                        if (!id_constraint_is_first_op(p_curr)) {\n                            DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n                                       \"=============== Not the first unprocessed operation for this id ============\");\n                            /* This is not the first unprocessed operation for\n                             * this id */\n                            V(pl->stage_mutex);\n                            return NULL;\n                        }\n                        /* else: entry can be added */\n                    }\n\n                    /* sanity check */\n                    if (p_curr->being_processed) {\n                        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                                   \"Error: INCONSISTENCY: nb_threads running this step is 0 whereas an entry is being processed !!!\");\n                        V(pl->stage_mutex);\n                        return NULL;\n                    }\n\n                    /* tag the entry and update stage info */\n                    pl->nb_unprocessed_entries--;\n                    pl->nb_current_entries++;\n                    pl->nb_threads++;\n                    p_curr->being_processed = 1;\n\n                    V(pl->stage_mutex);\n\n                    entry_proc_op_t **listop =\n                        MemAlloc(sizeof(entry_proc_op_t *));\n                    if (listop) {\n                        *listop = p_curr;\n                        *op_count = 1;\n                    }\n                    return listop;\n                }\n            }\n        }\n        /* @TODO check configuration for max threads */\n        else if ((entry_proc_pipeline[i].stage_flags & STAGE_FLAG_MAX_THREADS)\n                 || (entry_proc_pipeline[i].stage_flags\n                     & STAGE_FLAG_PARALLEL)) {\n            if ((entry_proc_pipeline[i].max_thread_count != 0)\n                && (pl->nb_threads\n                    >= entry_proc_pipeline[i].max_thread_count)) {\n                *p_empty = false;\n                /* thread quota for this stage is at maximum */\n                V(pl->stage_mutex);\n\n#ifdef _DEBUG_ENTRYPROC\n                printf(\"Stage[%u] - thread %#lx - thread quota reached (%u)\\n\",\n                       i, pthread_self(),\n                       entry_proc_pipeline[i].max_thread_count);\n#endif\n                continue;\n            }\n\n            if (entry_proc_pipeline[i].stage_flags & STAGE_FLAG_FORCE_SEQ) {\n                /* One thread is processing an operation, and that one\n                 * must be the only one in this stage. */\n                V(pl->stage_mutex);\n                continue;\n            }\n\n            /* check entries at this stage */\n            rh_list_for_each_entry(p_curr, &pl->entries, list) {\n                /* the pipeline is not empty */\n                *p_empty = false;\n\n                /* Special case when the op doesn't have an ID, but\n                 * the stage has a constraint. */\n                if (!p_curr->entry_id_is_set &&\n                    entry_proc_pipeline[i].\n                    stage_flags & STAGE_FLAG_ID_CONSTRAINT) {\n                    /* Do not process past this entry, unless it's the\n                     * first in list and the rest of the pipeline is\n                     * empty. */\n                    if (p_curr ==\n                        rh_list_first_entry(&pl->entries, entry_proc_op_t, list)\n                        && tot_entries - pl->nb_unprocessed_entries == 0) {\n                        /* This is the first entry, and there is no\n                         * other entry being processed in this or the\n                         * upper stages. So we can process it */\n                        entry_proc_pipeline[i].stage_flags |=\n                            STAGE_FLAG_FORCE_SEQ;\n                    } else {\n                        break;\n                    }\n                }\n\n                /* manage id constraints (except for special operations) */\n                if ((entry_proc_pipeline[i].\n                     stage_flags & STAGE_FLAG_ID_CONSTRAINT)\n                    && p_curr->entry_id_is_set) {\n                    /* skip entries that are already at superior stage\n                     * (or processed) */\n                    if ((p_curr->pipeline_stage > i)\n                        || (p_curr->being_processed)) {\n                        if (!p_curr->id_is_referenced) {\n                            DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                                       \"WARNING: Unregistered operation at higher stage\");\n                            id_constraint_register(p_curr, false);\n                        }\n#ifdef _DEBUG_ENTRYPROC\n                        printf(\"Stage[%u] - thread %#lx - \"\n                               \"entry at higher stage (%u) or is being \"\n                               \"processed (%s) \\n\",\n                                i, pthread_self(), p_curr->pipeline_stage,\n                                bool2str(p_curr->being_processed));\n#endif\n                        continue;\n                    }\n\n                    /* is this the first operation for this id ? */\n                    if (!id_constraint_is_first_op(p_curr))\n                        continue;\n                } else if (p_curr->being_processed\n                           || p_curr->pipeline_stage > i) {\n                    /* check next entry */\n#ifdef _DEBUG_ENTRYPROC\n                    printf(\"Stage[%u] - thread %#lx - \"\n                           \"entry being processed or at higher stage\\n\",\n                           i, pthread_self());\n#endif\n                    continue;\n                }\n\n                /* this entry can be processed */\n                /* tag the entry and update stage info */\n                pl->nb_unprocessed_entries--;\n                pl->nb_current_entries++;\n                pl->nb_threads++;\n                p_curr->being_processed = 1;\n\n                entry_proc_op_t **listop =\n                    MemCalloc(entry_proc_conf.max_batch_size,\n                              sizeof(entry_proc_op_t *));\n                if (!listop)\n                    return NULL;\n                listop[0] = p_curr;\n                *op_count = 1;\n\n                /* check if this stage is batchable */\n                if (entry_proc_conf.max_batch_size > 1\n                    && entry_proc_pipeline[i].test_batchable != NULL\n                    && entry_proc_pipeline[i].stage_batch_function != NULL) {\n                    entry_proc_op_t *p_next;\n                    attr_mask_t batch_mask = p_curr->fs_attrs.attr_mask;\n\n                    rh_list_for_each_entry_after(p_next, &pl->entries, p_curr,\n                                                 list) {\n                        if (*op_count >= entry_proc_conf.max_batch_size)\n                            break;\n                        else if (p_next->being_processed\n                                 || (p_next->pipeline_stage != i))\n                            /* entry is already beeing processed or is at\n                             * a different stage */\n                            break;\n\n                        if (entry_proc_pipeline[i].\n                            test_batchable(p_curr, p_next, &batch_mask)) {\n                            pl->nb_unprocessed_entries--;\n                            pl->nb_current_entries++;\n                            p_next->being_processed = 1;\n\n                            listop[*op_count] = p_next;\n                            (*op_count)++;\n                        } else\n                            /* stop at first non-batchable entry */\n                            break;\n                    }\n                }\n\n                V(pl->stage_mutex);\n\n                return listop;\n            }\n\n        } else {\n            /* unspecified stage flag */\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error: stage flag not specified !!!\");\n            V(pl->stage_mutex);\n            return NULL;\n        }\n\n        /* end of current stage */\n        V(pl->stage_mutex);\n\n    }   /*end for */\n\n#ifdef _DEBUG_ENTRYPROC\n    printf(\"empty=%d\\n\", *p_empty);\n#endif\n\n    /* nothing found */\n    return NULL;\n}\n\n/**\n * This function returns the next operation to be processed\n * according to pipeline stage/ordering constrains.\n */\nstatic entry_proc_op_t **EntryProcessor_GetNextOp(int *count)\n{\n    bool is_empty;\n    entry_proc_op_t **list_op;\n    int i;\n    *count = 0;\n\n    P(work_avail_lock);\n    nb_waiting_threads++;\n\n    while ((list_op = next_work_avail(&is_empty, count)) == NULL) {\n        if ((terminate_flag == BREAK)\n            || ((terminate_flag == FLUSH) && is_empty)) {\n            nb_waiting_threads--;\n\n            /* maybe other threads can also terminate ? */\n            if (nb_waiting_threads > 0)\n                pthread_cond_signal(&work_avail_cond);\n\n            V(work_avail_lock);\n\n            return NULL;\n        }\n#ifdef _DEBUG_ENTRYPROC\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Thread %#lx: no work available\",\n                   pthread_self());\n#endif\n        pthread_cond_wait(&work_avail_cond, &work_avail_lock);\n    }\n\n    nb_waiting_threads--;\n\n    /* maybe other entries can be processed after this one ? */\n    if (nb_waiting_threads > 0)\n        pthread_cond_signal(&work_avail_cond);\n\n    V(work_avail_lock);\n\n    gettimeofday(&(list_op[0]->timestamp.start_processing_time), NULL);\n    for (i = 1; i < *count; i++)\n        list_op[i]->timestamp.start_processing_time =\n            list_op[0]->timestamp.start_processing_time;\n\n    return list_op;\n}\n\n/**\n * Release an entry op.\n */\nvoid EntryProcessor_Release(entry_proc_op_t *p_op)\n{\n    /* @todo free entry_info */\n\n    /* free specific info */\n\n    if (p_op->extra_info_is_set && (p_op->extra_info_free_func != NULL)) {\n        p_op->extra_info_free_func(&p_op->extra_info);\n    }\n\n    ListMgr_FreeAttrs(&p_op->fs_attrs);\n    ListMgr_FreeAttrs(&p_op->db_attrs);\n\n    /* free the memory */\n    MemFree(p_op);\n}\n\n/**\n * Acknownledge a batch of operations.\n */\nint EntryProcessor_AcknowledgeBatch(entry_proc_op_t **ops, unsigned int count,\n                                    unsigned int next_stage, bool remove)\n{\n    const unsigned int curr_stage = ops[0]->pipeline_stage;\n    list_by_stage_t *pl = &pipeline[curr_stage];\n    int nb_moved;\n    struct timeval now, diff;\n    int i;\n\n    gettimeofday(&now, NULL);\n    timersub(&now, &ops[0]->timestamp.start_processing_time, &diff);\n\n    /* lock current stage */\n    P(pl->stage_mutex);\n\n    /* update stats */\n    pl->nb_processed_entries += count;\n    pl->nb_current_entries -= count;\n    pl->total_processed += count;\n\n    if (count > 1) {\n        pl->nb_batches++;\n        pl->total_batched_entries += count;\n    }\n    pl->nb_threads--;\n    timeradd(&diff, &pl->total_processing_time, &pl->total_processing_time);\n\n    for (i = 0; i < count; i++) {\n        /* sanity check */\n        if ((!remove) && (ops[i]->pipeline_stage >= next_stage)) {\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"CRITICAL: entry is already\"\n                       \" in a higher pipeline stage %u >= %u !!!\",\n                       ops[i]->pipeline_stage, next_stage);\n\n            V(pl->stage_mutex);\n            RBH_BUG(\"Entry is already in a higher pipeline stage.\");\n        }\n\n        /* update their status */\n        ops[i]->being_processed = 0;\n        ops[i]->pipeline_stage = next_stage;\n\n        /* remove the entry, if it must be */\n        if (remove) {\n            /* update stage info. */\n            pl->nb_processed_entries--;\n            rh_list_del_init(&ops[i]->list);\n\n            /* remove entry constraints on this id */\n            if (ops[i]->id_is_referenced)\n                id_constraint_unregister(ops[i]);\n        }\n    }\n\n    /* We're done with the entries in that stage. */\n\n    /* check if entries are to be moved from this stage */\n    nb_moved = move_stage_entries(curr_stage);\n\n    /* unlock current stage */\n    V(pl->stage_mutex);\n\n    /* There may have some work to do in any case:\n     * - if current entry has been removed (it may block other operations)\n     * - if entries have moved (they are available for next stage)\n     * - if the current step had a limited number of threads\n     * Note: if operation is sequential, it should be the last of its step\n     * so it must have been moved.\n     */\n    /* @TODO check configuration for max_thread_count */\n    if (remove || (nb_moved > 0)\n        || (entry_proc_pipeline[curr_stage].max_thread_count != 0)) {\n        P(work_avail_lock);\n        if (nb_waiting_threads > 0)\n            pthread_cond_signal(&work_avail_cond);\n        V(work_avail_lock);\n    }\n\n    /* free entry resources if asked */\n    if (remove) {\n        for (i = 0; i < count; i++) {\n            /* If a limit of pending operations is specified, release a token */\n            if (entry_proc_conf.max_pending_operations > 0)\n                sem_post(&pipeline_token);\n\n            EntryProcessor_Release(ops[i]);\n        }\n    }\n\n    return 0;\n}\n\n/**\n * Advise that the entry is ready for next step of the pipeline.\n * @param next_stage The next stage to be performed for this entry\n * @param remove This flag indicates that the entry must be removed\n *        from pipeline (basically after the last step).\n */\nint EntryProcessor_Acknowledge(entry_proc_op_t *p_op, unsigned int next_stage,\n                               bool remove)\n{\n    return EntryProcessor_AcknowledgeBatch(&p_op, 1, next_stage, remove);\n}\n\nstatic const char *entry_status_str(entry_proc_op_t *p_op, unsigned int stage)\n{\n    if (p_op->being_processed)\n        return \"processing\";\n    else if (p_op->pipeline_stage < stage)\n        return \"ERROR: entry at previous stage!!!\";\n    else if (p_op->pipeline_stage == stage)\n        return \"waiting\";\n    else if (p_op->pipeline_stage > stage)\n        return \"done\";\n    else\n        return \"ERROR: uncovered case /!\\\\\";\n}\n\nstatic void print_op_stats(entry_proc_op_t *p_op, unsigned int stage,\n                           const char *what)\n{\n#ifdef HAVE_CHANGELOGS\n    if (p_op->extra_info.is_changelog_record) {\n        DisplayLog(LVL_EVENT, \"STATS\",\n                   \"%-14s: %s: changelog record #%llu, fid=\" DFID \", status=%s\",\n                   strchr(entry_proc_pipeline[stage].stage_name, '_') + 1, what,\n                   p_op->extra_info.log_record.p_log_rec->cr_index,\n                   PFID(&p_op->extra_info.log_record.p_log_rec->cr_tfid),\n                   entry_status_str(p_op, stage));\n    } else\n#endif\n    if (ATTR_FSorDB_TEST(p_op, fullpath)) {\n        DisplayLog(LVL_EVENT, \"STATS\", \"%-14s: %s: %s, status=%s\",\n                   strchr(entry_proc_pipeline[stage].stage_name, '_') + 1, what,\n                   ATTR_FSorDB(p_op, fullpath), entry_status_str(p_op, stage));\n    } else if (p_op->entry_id_is_set) {\n        DisplayLog(LVL_EVENT, \"STATS\", \"%-14s: %s: \" DFID \", status=%s\",\n                   strchr(entry_proc_pipeline[stage].stage_name, '_') + 1, what,\n                   PFID(&p_op->entry_id), entry_status_str(p_op, stage));\n    } else\n        DisplayLog(LVL_EVENT, \"STATS\", \"%-14s: %s: special op, status=%s\",\n                   strchr(entry_proc_pipeline[stage].stage_name, '_') + 1, what,\n                   entry_status_str(p_op, stage));\n}\n\nvoid EntryProcessor_DumpCurrentStages(void)\n{\n    unsigned int i;\n    double tpe = 0.0;\n    bool is_pending_op = false;\n    unsigned int nb_get, nb_ins, nb_upd, nb_rm;\n\n    if (!entry_proc_pipeline)\n        return; /* not initialized */\n\n    /* no locks here, because it's just for information */\n\n    if (TestDisplayLevel(LVL_MAJOR)) {\n\n        DisplayLog(LVL_MAJOR, \"STATS\",\n                   \"==== EntryProcessor Pipeline Stats ===\");\n        DisplayLog(LVL_MAJOR, \"STATS\", \"Idle threads: %u\", nb_waiting_threads);\n\n        id_constraint_stats();\n\n        DisplayLog(LVL_MAJOR, \"STATS\",\n                   \"%-18s | Wait | Curr | Done |     Total | ms/op |\", \"Stage\");\n\n        for (i = 0; i < entry_proc_descr.stage_count; i++) {\n            P(pipeline[i].stage_mutex);\n\n            if (pipeline[i].total_processed != 0)\n                tpe =\n                    ((1000.0 * pipeline[i].total_processing_time.tv_sec) +\n                     (1E-3 * pipeline[i].total_processing_time.tv_usec)) /\n                    (double)(pipeline[i].total_processed);\n            else\n                tpe = 0.0;\n\n            if (pipeline[i].nb_batches > 0)\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%2u: %-14s |%5u | %4u | %4u | %9llu | %5.2f | %.2f%% batched (avg batch size: %.1f)\",\n                           i, strchr(entry_proc_pipeline[i].stage_name, '_') + 1, /* removes STAGE_ */\n                           pipeline[i].nb_unprocessed_entries,\n                           pipeline[i].nb_current_entries,\n                           pipeline[i].nb_processed_entries,\n                           pipeline[i].total_processed, tpe,\n                           pipeline[i].total_processed ? 100.0 *\n                           (float)pipeline[i].total_batched_entries /\n                           (float)pipeline[i].total_processed : 0.0,\n                           (float)pipeline[i].total_batched_entries /\n                           (float)pipeline[i].nb_batches);\n            else\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%2u: %-14s |%5u | %4u | %4u | %9llu | %5.2f |\",\n                           i, strchr(entry_proc_pipeline[i].stage_name, '_') + 1, /* removes STAGE_ */\n                           pipeline[i].nb_unprocessed_entries,\n                           pipeline[i].nb_current_entries,\n                           pipeline[i].nb_processed_entries,\n                           pipeline[i].total_processed, tpe);\n\n            /* reset stats so the displayed performance is per period */\n            memset(&pipeline[i].total_processing_time, 0,\n                   sizeof(pipeline[i].total_processing_time));\n            pipeline[i].total_processed = 0;\n            pipeline[i].total_batched_entries = 0;\n            pipeline[i].nb_batches = 0;\n\n            V(pipeline[i].stage_mutex);\n\n            if (!rh_list_empty(&pipeline[i].entries))\n                is_pending_op = true;\n        }\n        nb_get = nb_ins = nb_upd = nb_rm = 0;\n        for (i = 0; i < entry_proc_conf.nb_thread; i++) {\n            if (worker_params) {\n                nb_get += worker_params[i].lmgr.nbop[OPIDX_GET];\n                nb_ins += worker_params[i].lmgr.nbop[OPIDX_INSERT];\n                nb_upd += worker_params[i].lmgr.nbop[OPIDX_UPDATE];\n                nb_rm += worker_params[i].lmgr.nbop[OPIDX_RM];\n            }\n        }\n        DisplayLog(LVL_MAJOR, \"STATS\", \"DB ops: get=%u/ins=%u/upd=%u/rm=%u\",\n                   nb_get, nb_ins, nb_upd, nb_rm);\n    }\n\n    if (TestDisplayLevel(LVL_EVENT)) {\n        if (is_pending_op) {\n            DisplayLog(LVL_EVENT, \"STATS\", \"--- Pipeline stage details ---\");\n            /* pipeline stage details */\n            for (i = 0; i < entry_proc_descr.stage_count; i++) {\n                P(pipeline[i].stage_mutex);\n                if (!rh_list_empty(&pipeline[i].entries)) {\n                    entry_proc_op_t *op1, *op2;\n                    op1 =\n                        rh_list_first_entry(&pipeline[i].entries,\n                                            entry_proc_op_t, list);\n                    op2 =\n                        rh_list_last_entry(&pipeline[i].entries,\n                                           entry_proc_op_t, list);\n\n                    if (op1 != op2) {\n                        print_op_stats(op1, i, \"first\");\n                        print_op_stats(op2, i, \"last\");\n                    } else\n                        print_op_stats(op1, i, \"(1 op)\");\n                }\n                V(pipeline[i].stage_mutex);\n\n            }   /* end for */\n        }   /* end if pending op */\n    }\n}\n\nentry_proc_op_t *EntryProcessor_Get(void)\n{\n    /* allocate a new pipeline entry */\n    entry_proc_op_t *p_entry;\n\n    p_entry = (entry_proc_op_t *) MemCalloc(1, sizeof(entry_proc_op_t));\n\n    if (!p_entry)\n        return NULL;\n\n    /* nothing is set */\n    ATTR_MASK_INIT(&p_entry->db_attrs);\n    ATTR_MASK_INIT(&p_entry->fs_attrs);\n\n    extra_info_init(&p_entry->extra_info);\n\n    return p_entry;\n}\n\n/* helper for counting the number of operations in pipeline */\nstatic unsigned int count_nb_ops(void)\n{\n    int i;\n    unsigned int total = 0;\n\n    for (i = 0; i < entry_proc_descr.stage_count; i++) {\n        total += pipeline[i].nb_current_entries\n            + pipeline[i].nb_unprocessed_entries\n            + pipeline[i].nb_processed_entries;\n    }\n\n    return total;\n}\n\n/**\n * Terminate EntryProcessor\n * \\param flush_ops: wait the queue to be flushed\n */\nint EntryProcessor_Terminate(bool flush_ops)\n{\n\n    P(terminate_lock);\n\n    /* set termination flag (if not already set) */\n    if (flush_ops) {\n        if (terminate_flag < FLUSH)\n            terminate_flag = FLUSH;\n    } else {\n        if (terminate_flag < BREAK)\n            terminate_flag = BREAK;\n    }\n\n    DisplayLog(LVL_DEBUG, ENTRYPROC_TAG, \"EntryProcessor shutdown mode: %s\",\n               terminate_flag == BREAK ? \"BREAK\" : \"FLUSH\");\n\n    /* force idle thread to wake up */\n    pthread_cond_broadcast(&work_avail_cond);\n\n    /* wait for all workers to process all pipeline entries and terminate */\n    while (nb_finished_threads < entry_proc_conf.nb_thread) {\n        if (terminate_flag == FLUSH)\n            DisplayLog(LVL_VERB, ENTRYPROC_TAG,\n                       \"Waiting for entry processor pipeline flush: still %u operations to be done, %u threads running\",\n                       count_nb_ops(),\n                       entry_proc_conf.nb_thread - nb_finished_threads);\n        else if (terminate_flag == BREAK)\n            DisplayLog(LVL_VERB, ENTRYPROC_TAG,\n                       \"Waiting for current operations to end: still %u threads running\",\n                       entry_proc_conf.nb_thread - nb_finished_threads);\n\n        pthread_cond_wait(&terminate_cond, &terminate_lock);\n    }\n\n    V(terminate_lock);\n\n    DisplayLog(LVL_EVENT, ENTRYPROC_TAG, \"Pipeline successfully flushed\");\n\n    EntryProcessor_DumpCurrentStages();\n\n    return 0;\n}\n\n/*\n * A stage was blocked waiting for an operation to get its FID. This\n * is now done, so unblock the stage.\n */\nvoid EntryProcessor_Unblock(int stage)\n{\n    P(pipeline[stage].stage_mutex);\n\n    /* and unset the block. */\n    entry_proc_pipeline[stage].stage_flags &= ~STAGE_FLAG_FORCE_SEQ;\n\n    V(pipeline[stage].stage_mutex);\n}\n"
  },
  {
    "path": "src/entry_processor/entry_proc_tools.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Misc tools for managing entry processor pipeline\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"entry_proc_tools.h\"\n#include \"entry_proc_hash.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_misc.h\"\n#include <pthread.h>\n#include <stdlib.h>\n#include <stdio.h>\n\n/* configuration for this module */\nentry_proc_config_t entry_proc_conf;\nint pipeline_flags = 0;\n\n/* hash table for storing references to ids */\nstatic struct id_hash *id_hash;\n/* hash table for storing references to parent_id/name */\nstatic struct id_hash *name_hash;\n\n/** initialize id constraint manager */\nint id_constraint_init(void)\n{\n    /* get the suggested hash size for the max record count */\n    size_t size = max_count_to_hash_size(\n        entry_proc_conf.max_pending_operations);\n\n    id_hash = id_hash_init(size, true);\n    name_hash = id_hash_init(size, true);\n    /* exiting the process releases hash resources */\n    return (id_hash == NULL || name_hash == NULL) ? -1 : 0;\n}\n\n/**\n * This is called to register the operation (with the ordering of pipeline)\n * Normal operation is to register at the tail.\n * @return ID_OK if the entry can be processed.\n *         ID_MISSING if the ID is not set in p_op structure\n *         ID_ALREADY if the op_structure has already been registered\n */\nint id_constraint_register(entry_proc_op_t *p_op, int at_head)\n{\n    struct id_hash_slot *slot;\n\n    if (!p_op->entry_id_is_set)\n        return ID_MISSING;\n\n    /* compute id hash value */\n    slot = get_hash_slot(id_hash, &p_op->entry_id);\n\n    P(slot->lock);\n\n    if (at_head)\n        rh_list_add(&p_op->id_hash_list, &slot->list);\n    else\n        rh_list_add_tail(&p_op->id_hash_list, &slot->list);\n\n    slot->count++;\n    p_op->id_is_referenced = 1;\n\n    V(slot->lock);\n\n    /* also lock parent_id/name */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, parent_id) &&\n        ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n        slot = get_name_hash_slot(name_hash, &ATTR(&p_op->fs_attrs, parent_id),\n                                  ATTR(&p_op->fs_attrs, name));\n        P(slot->lock);\n\n        if (at_head)\n            rh_list_add(&p_op->name_hash_list, &slot->list);\n        else\n            rh_list_add_tail(&p_op->name_hash_list, &slot->list);\n\n        slot->count++;\n        p_op->name_is_referenced = 1;\n\n        V(slot->lock);\n    }\n\n    return ID_OK;\n}\n\n#ifdef HAVE_CHANGELOGS\n#define op_name(_op)    ((_op)->extra_info.is_changelog_record ? \\\n                              changelog_type2str((_op)\\\n                              ->extra_info.log_record.p_log_rec->cr_type) : \\\n                              \"scan_op\")\n#else\n#define op_name(_op)    \"scan_op\"\n#endif\n\n/**\n * Test if a given operation is the first to be processed.\n */\nbool id_constraint_is_first_op(entry_proc_op_t *p_op_in)\n{\n    entry_proc_op_t *op;\n    struct id_hash_slot *slot;\n    int is_first = -1;  /* not set */\n\n    /* compute id hash value */\n    slot = get_hash_slot(id_hash, &p_op_in->entry_id);\n\n    P(slot->lock);\n    rh_list_for_each_entry(op, &slot->list, id_hash_list) {\n        if (entry_id_equal(&p_op_in->entry_id, &op->entry_id)) {\n            /* found an operation with the given id */\n            if (op == p_op_in)\n                is_first = 1;\n            else {\n                is_first = 0;\n                DisplayLog(LVL_FULL, \"IdConstraint\",\n                           \"Pending operation with the same id: \" DFID\n                           \" (%s). next op: %s\", PFID(&op->entry_id),\n                           op_name(op), op_name(p_op_in));\n            }\n            break;\n        }\n    }\n    V(slot->lock);\n\n    if (is_first == 0)\n        /* for sure, there is another operation on the same id before\n         * this one */\n        return false;\n\n    /* sanity check: registered operation was not found??? */\n    if ((is_first == -1) && (p_op_in->id_is_referenced))\n        RBH_BUG(\"Registered operation was not found in id_constraint hash\");\n\n    /* Entry may be the first (or is not registered).\n     * Additional check of parent/name constraint: */\n    if (ATTR_MASK_TEST(&p_op_in->fs_attrs, parent_id) &&\n        ATTR_MASK_TEST(&p_op_in->fs_attrs, name)) {\n        slot =\n            get_name_hash_slot(name_hash, &ATTR(&p_op_in->fs_attrs, parent_id),\n                               ATTR(&p_op_in->fs_attrs, name));\n        P(slot->lock);\n        rh_list_for_each_entry(op, &slot->list, name_hash_list) {\n            if (entry_id_equal\n                (&ATTR(&p_op_in->fs_attrs, parent_id),\n                 &ATTR(&op->fs_attrs, parent_id))\n                && !strcmp(ATTR(&p_op_in->fs_attrs, name),\n                           ATTR(&op->fs_attrs, name))) {\n                if (op == p_op_in)\n                    is_first = 1;\n                else {\n                    is_first = 0;\n                    DisplayLog(LVL_FULL, \"IdConstraint\",\n                               \"Pending operation with the same parent/name: \"\n                               DFID \"/%s (%s). next op: %s\",\n                               PFID(&ATTR(&p_op_in->fs_attrs, parent_id)),\n                               ATTR(&p_op_in->fs_attrs, name), op_name(op),\n                               op_name(p_op_in));\n                }\n                break;\n            }\n        }\n        V(slot->lock);\n    }\n\n    /* if is_first = 0 => not first */\n    /* if is_first = -1: not found => not first */\n    /* just return TRUE if is_first = 1 */\n    return (is_first == 1);\n}\n\n/**\n * This removes the current reference to an id when the operation is removed.\n */\nint id_constraint_unregister(entry_proc_op_t *p_op)\n{\n    struct id_hash_slot *slot;\n\n    if (!p_op->entry_id_is_set)\n        return ID_MISSING;\n\n    if (!p_op->id_is_referenced)\n        return ID_NOT_EXISTS;\n\n    slot = get_hash_slot(id_hash, &p_op->entry_id);\n\n    /* Remove the entry */\n    P(slot->lock);\n\n    rh_list_del(&p_op->id_hash_list);\n    p_op->id_is_referenced = 0;\n    slot->count--;\n\n    V(slot->lock);\n\n    if (p_op->name_is_referenced) {\n        if (ATTR_MASK_TEST(&p_op->fs_attrs, parent_id) &&\n            ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n            slot =\n                get_name_hash_slot(name_hash, &ATTR(&p_op->fs_attrs, parent_id),\n                                   ATTR(&p_op->fs_attrs, name));\n            /* Remove the entry */\n            P(slot->lock);\n\n            rh_list_del(&p_op->name_hash_list);\n            p_op->name_is_referenced = 0;\n            slot->count--;\n\n            V(slot->lock);\n        } else {\n            DisplayLog(LVL_MAJOR, \"IdConstraint\", \"WARNING: cannot unregister \"\n                       \"entry with no parent/name but with a registered name!\");\n        }\n    }\n\n    return ID_OK;\n}\n\nvoid id_constraint_stats(void)\n{\n    id_hash_stats(id_hash, \"Id constraints count\");\n    id_hash_stats(name_hash, \"Name constraints count\");\n}\n\nvoid id_constraint_dump(void)\n{\n    id_hash_dump(id_hash, false);\n    id_hash_dump(name_hash, true);\n}\n\n/* ------------ Config management functions --------------- */\n\n#define ENTRYPROC_CONFIG_BLOCK  \"EntryProcessor\"\n#define ALERT_BLOCK \"Alert\"\n\nstatic void entry_proc_cfg_set_default(void *module_config)\n{\n    entry_proc_config_t *conf = (entry_proc_config_t *) module_config;\n\n    if (lmgr_parallel_batches())\n        conf->nb_thread = 16;\n    else\n        conf->nb_thread = 10;\n\n    conf->max_pending_operations = 100;\n    conf->max_batch_size = 100;\n    conf->match_classes = true;\n\n    conf->detect_fake_mtime = false;\n}\n\nstatic void entry_proc_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, ENTRYPROC_CONFIG_BLOCK, NULL);\n\n    if (lmgr_parallel_batches())\n        print_line(output, 1, \"nb_threads             :  16\");\n    else\n        print_line(output, 1, \"nb_threads             :  10\");\n\n    print_line(output, 1, \"max_pending_operations :  100\");\n    print_line(output, 1, \"max_batch_size         :  100\");\n    print_line(output, 1, \"match_classes          :  yes\");\n    print_line(output, 1, \"detect_fake_mtime      :  no\");\n    print_end_block(output, 0);\n}\n\n#define CRITICAL_ERR_CHECK(_ptr_, _blkname_) do { if (!_ptr_) {\\\n            sprintf(msg_out, \"Internal error reading %s block in config file\", \\\n                    _blkname_); \\\n            return EFAULT; \\\n         }\\\n    } while (0)\n\n/** set expected values for the std pipeline\n * \\return the number of variables added to array\n */\nstatic int std_pipeline_arg_names(char **list, char *buffer)\n{\n    int i, c;\n    char *curr_buf = buffer;\n    unsigned int w;\n    c = 0;\n    for (i = 0; i < std_pipeline_descr.stage_count; i++) {\n        w = sprintf(curr_buf, \"%s_threads_max\", std_pipeline[i].stage_name);\n        list[i] = curr_buf;\n        curr_buf += w + 1;  /* written bytes + final null char */\n        c++;\n    }\n    return c;\n}\n\nstatic int load_pipeline_config(const pipeline_descr_t *descr,\n                                pipeline_stage_t *p,\n                                const entry_proc_config_t *conf,\n                                config_item_t entryproc_block, char *msg_out)\n{\n    int i, rc, tmpval;\n\n    for (i = 0; i < descr->stage_count; i++) {\n        char varname[256];\n\n        snprintf(varname, 256, \"%s_threads_max\", p[i].stage_name);\n\n        rc = GetIntParam(entryproc_block, ENTRYPROC_CONFIG_BLOCK, varname,\n                         PFLG_POSITIVE, &tmpval, NULL, NULL, msg_out);\n\n        if ((rc != 0) && (rc != ENOENT))\n            return rc;\n        else if ((rc != ENOENT) && (tmpval > 0)) {  /* 0: keep default */\n\n            if (p[i].stage_flags & STAGE_FLAG_PARALLEL) {\n                /* the stage is no longer parallel, it has a limited number of\n                 * threads */\n                p[i].stage_flags &= ~STAGE_FLAG_PARALLEL;\n                p[i].stage_flags |= STAGE_FLAG_MAX_THREADS;\n                p[i].max_thread_count = conf->nb_thread;\n            }\n            if (p[i].stage_flags & STAGE_FLAG_MAX_THREADS) {\n                /* if batching is enabled and simultaneous batches are not\n                 * allowed: ERROR */\n                if (!lmgr_parallel_batches() && (i == descr->DB_APPLY)\n                    && (conf->max_batch_size != 1) && (tmpval > 1)) {\n                    sprintf(msg_out,\n                            \"Wrong value for '%s': Parallelizing batched DB operations \"\n                            \"is not allowed when accounting is ON.\\n\"\n                            \"Remove this tuning, disable accounting (accounting = no)\"\n                            \" or disable batching (max_batch_size=1) to parallelize this stage.\",\n                            varname);\n                    return EINVAL;\n                }\n\n                if ((i == descr->DB_APPLY) && (conf->nb_thread > 1))\n                    /* don't starve other steps: max is nb_thread-1\n                     * (except if nb_thread = 1) */\n                    p[i].max_thread_count = MIN2(conf->nb_thread - 1, tmpval);\n                else\n                    /* nb_thread at most */\n                    p[i].max_thread_count = MIN2(conf->nb_thread, tmpval);\n            } else if ((p[i].stage_flags & STAGE_FLAG_SEQUENTIAL)\n                       && (tmpval != 1)) {\n                sprintf(msg_out,\n                        \"%s is sequential. Cannot use %u threads at this stage.\",\n                        p[i].stage_name, tmpval);\n                return EINVAL;\n            }\n        }\n    }\n\n    return 0;\n}\n\nstatic void set_default_pipeline_config(const pipeline_descr_t *descr,\n                                        pipeline_stage_t *p,\n                                        const entry_proc_config_t *conf)\n{\n    int i = descr->DB_APPLY;\n\n    if (p[i].stage_flags & STAGE_FLAG_PARALLEL) {\n        p[i].stage_flags &= ~STAGE_FLAG_PARALLEL;\n        p[i].stage_flags |= STAGE_FLAG_MAX_THREADS;\n\n        /* mode batching + parallel: 50% of threads at most for DB apply */\n        if (lmgr_parallel_batches() && conf->max_batch_size > 1) {\n            /* 10 => 4, 20 => 8... => remove 1/5, then /2 => *2/5 */\n            /* if nb thread < 4 => 1 */\n            if (conf->nb_thread < 4)\n                p[i].max_thread_count = 1;\n            else\n                p[i].max_thread_count = 2 * conf->nb_thread / 5;\n        } else {    /* not // + batching */\n\n            /* if nb thread = 1 or 2 => set the limit to 1\n             *                3      =>                  2\n             *                4-7    =>                  n-2\n             *                7+     =>                  80%\n             */\n            if (conf->nb_thread < 4)\n                p[i].max_thread_count = MAX2(conf->nb_thread - 1, 1);\n            else if (conf->nb_thread < 8)\n                p[i].max_thread_count = conf->nb_thread - 2;\n            else\n                p[i].max_thread_count = (8 * conf->nb_thread) / 10;\n        }\n    } else if (p[i].stage_flags & STAGE_FLAG_MAX_THREADS) {\n        /* ensure DB_APPLY threads <= nbthread - 1 */\n        if (p[i].max_thread_count > conf->nb_thread - 1)\n            p[i].max_thread_count = conf->nb_thread - 1;\n    }\n\n    /* if batching is enabled, DB_APPLY_THREAD_MAX = 1 */\n    if (!lmgr_parallel_batches() && conf->max_batch_size != 1) {\n        if (p[i].stage_flags & STAGE_FLAG_PARALLEL)\n            RBH_BUG(\"step should no big tagged as 'PARALLEL' at this point\");\n        else if (p[i].stage_flags & STAGE_FLAG_MAX_THREADS)\n            p[i].max_thread_count = 1;\n    }\n}\n\nstatic int entry_proc_cfg_read(config_file_t config, void *module_config,\n                               char *msg_out)\n{\n    int rc, blc_index;\n    entry_proc_config_t *conf = (entry_proc_config_t *) module_config;\n    unsigned int next_idx = 0;\n    config_item_t entryproc_block;\n\n    /* buffer to store arg names */\n    char *pipeline_names = NULL;\n    /* max size is max pipeline steps (<10) + other args (<6) */\n#define MAX_ENTRYPROC_ARGS 16\n    char *entry_proc_allowed[MAX_ENTRYPROC_ARGS] = { 0 };\n\n    const cfg_param_t cfg_params[] = {\n        {\"nb_threads\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL, &conf->nb_thread,\n         0},\n        {\"max_pending_operations\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->max_pending_operations, 0},\n        {\"max_batch_size\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->max_batch_size, 0},\n        {\"match_classes\", PT_BOOL, 0, &conf->match_classes, 0},\n        {\"detect_fake_mtime\", PT_BOOL, 0, &conf->detect_fake_mtime, 0},\n\n        END_OF_PARAMS\n    };\n\n    /* get EntryProcessor block */\n    rc = get_cfg_block(config, ENTRYPROC_CONFIG_BLOCK, &entryproc_block,\n                       msg_out);\n    if (rc == ENOENT) {\n        /* set default pipeline config */\n        set_default_pipeline_config(&std_pipeline_descr, std_pipeline, conf);\n        /* No error because no parameter is mandatory */\n        return 0;\n    }\n    if (rc)\n        return rc;\n\n    /* read std params */\n    rc = read_scalar_params(entryproc_block, ENTRYPROC_CONFIG_BLOCK, cfg_params,\n                            msg_out);\n    if (rc)\n        return rc;\n\n    /* should have at least 2 threads! */\n    if (conf->nb_thread == 1)\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\", \"WARNING: \"\n                   ENTRYPROC_CONFIG_BLOCK \" should have at least 2 threads to \"\n                   \"avoid pipeline step starvation!\");\n\n    /* look for '<stage>_thread_max' parameters (for all pipelines) */\n\n    /* Set default pipeline config according to EntryProc config\n     * FIXME this modifies the global config variable, even when reloading!\n     */\n    set_default_pipeline_config(&std_pipeline_descr, std_pipeline, conf);\n\n    rc = load_pipeline_config(&std_pipeline_descr, std_pipeline, conf,\n                              entryproc_block, msg_out);\n    if (rc)\n        return rc;\n\n    // TODO load_pipeline_config(&diff_pipeline_descr, &diff_pipeline);\n\n    /* TODO Check consistency of performance strategy:\n     * batching vs. multithread DB operations */\n\n    /* Warn about deprecated \"Alert\" blocks */\n    for (blc_index = 0; blc_index < rh_config_GetNbItems(entryproc_block);\n         blc_index++) {\n        char *block_name;\n        config_item_t curr_item;\n\n        curr_item = rh_config_GetItemByIndex(entryproc_block, blc_index);\n        CRITICAL_ERR_CHECK(curr_item, ENTRYPROC_CONFIG_BLOCK);\n\n        if (rh_config_ItemType(curr_item) != CONFIG_ITEM_BLOCK)\n            continue;\n\n        block_name = rh_config_GetBlockName(curr_item);\n        CRITICAL_ERR_CHECK(curr_item, ENTRYPROC_CONFIG_BLOCK);\n\n        if (!strcasecmp(block_name, ALERT_BLOCK)) {\n            DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                       \"WARNING: %s blocks are deprecated. \"\n                       \"Configure an alert policy instead (include 'alerts.inc').\",\n                       ALERT_BLOCK);\n        }\n    }\n\n    next_idx = 0;\n    entry_proc_allowed[next_idx++] = \"nb_threads\";\n    entry_proc_allowed[next_idx++] = \"max_pending_operations\";\n    entry_proc_allowed[next_idx++] = \"max_batch_size\";\n    entry_proc_allowed[next_idx++] = \"match_classes\";\n    entry_proc_allowed[next_idx++] = \"detect_fake_mtime\";\n\n    pipeline_names = malloc(16 * 256);  /* max 16 strings of 256 (oversized) */\n    if (!pipeline_names)\n        return ENOMEM;\n\n    /* fill arg list with pipeline step names */\n    next_idx +=\n        std_pipeline_arg_names(entry_proc_allowed + next_idx, pipeline_names);\n    //TODO\n    //next_idx += diff_pipeline_arg_names(entry_proc_allowed + next_idx,\n    //                                    pipeline_names + XXX?);\n\n    CheckUnknownParameters(entryproc_block, ENTRYPROC_CONFIG_BLOCK,\n                           (const char **)entry_proc_allowed);\n    free(pipeline_names);\n\n    return 0;\n}\n\nstatic int entry_proc_cfg_reload(entry_proc_config_t *conf)\n{\n    if (conf->nb_thread != entry_proc_conf.nb_thread)\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                   ENTRYPROC_CONFIG_BLOCK\n                   \"::nb_threads changed in config file, but cannot be modified dynamically\");\n\n    if (conf->max_pending_operations != entry_proc_conf.max_pending_operations)\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                   ENTRYPROC_CONFIG_BLOCK\n                   \"::max_pending_operations changed in config file, but cannot be modified dynamically\");\n\n    if (conf->max_batch_size != entry_proc_conf.max_batch_size) {\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                   ENTRYPROC_CONFIG_BLOCK\n                   \"::max_batch_size updated: '%u'->'%u'\",\n                   entry_proc_conf.max_batch_size, conf->max_batch_size);\n        entry_proc_conf.max_batch_size = conf->max_batch_size;\n    }\n\n    if (conf->match_classes != entry_proc_conf.match_classes) {\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                   ENTRYPROC_CONFIG_BLOCK \"::match_classes updated: '%s'->'%s'\",\n                   bool2str(entry_proc_conf.match_classes),\n                   bool2str(conf->match_classes));\n        entry_proc_conf.match_classes = conf->match_classes;\n    }\n\n    if (conf->detect_fake_mtime != entry_proc_conf.detect_fake_mtime) {\n        DisplayLog(LVL_MAJOR, \"EntryProc_Config\",\n                   ENTRYPROC_CONFIG_BLOCK\n                   \"::detect_fake_mtime updated: '%s'->'%s'\",\n                   bool2str(entry_proc_conf.detect_fake_mtime),\n                   bool2str(conf->detect_fake_mtime));\n        entry_proc_conf.detect_fake_mtime = conf->detect_fake_mtime;\n    }\n\n    if (entry_proc_conf.match_classes && (policies.fileset_count == 0)) {\n        DisplayLog(LVL_EVENT, \"EntryProc_Config\",\n                   \"No fileclass defined in configuration, disabling fileclass matching.\");\n        entry_proc_conf.match_classes = false;\n    }\n\n    return 0;\n}\n\nstatic int entry_proc_cfg_set(void *cfg, bool reload)\n{\n    entry_proc_config_t *config = cfg;\n\n    if (reload)\n        return entry_proc_cfg_reload(config);\n\n    entry_proc_conf = *config;\n    return 0;\n}\n\nstatic void entry_proc_cfg_write_template(FILE *output)\n{\n    int i;\n\n    print_begin_block(output, 0, ENTRYPROC_CONFIG_BLOCK, NULL);\n\n    print_line(output, 1,\n               \"# nbr of worker threads for processing pipeline tasks\");\n    print_line(output, 1, \"nb_threads = 16 ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# Max number of operations in the Entry Processor pipeline.\");\n    print_line(output, 1,\n               \"# If the number of pending operations exceeds this limit, \");\n    print_line(output, 1,\n               \"# info collectors are suspended until this count decreases\");\n    print_line(output, 1, \"max_pending_operations = 100 ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# max batched DB operations (1=no batching)\");\n    print_line(output, 1, \"max_batch_size = 100;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1,\n               \"# Optionnaly specify a maximum thread count for each stage of the pipeline:\");\n    print_line(output, 1, \"# <stagename>_threads_max = <n> (0: use default)\");\n    for (i = 0; i < std_pipeline_descr.stage_count; i++) {\n        if (i == std_pipeline_descr.DB_APPLY) {\n            print_line(output, 1,\n                       \"# Disable batching (max_batch_size=1) or accounting (accounting=no)\");\n            print_line(output, 1,\n                       \"# to allow parallelizing the following step:\");\n        }\n\n        if (std_pipeline[i].stage_flags & STAGE_FLAG_PARALLEL)\n            print_line(output, 1, \"# %s_threads_max\\t= 4 ;\",\n                       std_pipeline[i].stage_name);\n        else if (std_pipeline[i].stage_flags & STAGE_FLAG_MAX_THREADS)\n            print_line(output, 1, \"%s_threads_max\\t= %u ;\",\n                       std_pipeline[i].stage_name,\n                       std_pipeline[i].max_thread_count);\n    }\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# if set to 'no', classes will only be matched\");\n    print_line(output, 1,\n               \"# at policy application time (not during a scan or reading changelog)\");\n    print_line(output, 1, \"match_classes = yes;\");\n\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# Faking mtime to an old time causes the file to be migrated\");\n    print_line(output, 1,\n               \"# with top priority. Enabling this parameter detect this behavior\");\n    print_line(output, 1, \"# and doesn't allow  mtime < creation_time\");\n    print_line(output, 1, \"detect_fake_mtime = no;\");\n\n    print_end_block(output, 0);\n}\n\n/** try to convert a time_t to a human readable form */\nvoid time2human_helper(time_t t, const char *attr_name, char *str,\n                       size_t size, const struct entry_proc_op_t *p_op)\n{\n    struct tm res;\n\n    /* initialize as 'out of range' */\n    strncpy(str, \"<out of range>\", size);\n\n    if (localtime_r(&t, &res) != NULL)\n        strftime(str, size, \"%Y/%m/%d %T\", &res);\n    else if (ATTR_FSorDB_TEST(p_op, fullpath))\n        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                   \"Invalid or corrupted %s detected for %s: %lu\", attr_name,\n                   ATTR_FSorDB(p_op, fullpath), t);\n    else\n        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                   \"Invalid or corrupted %s detected for \" DFID \": %lu\",\n                   attr_name, PFID(&p_op->entry_id), t);\n}\n\nvoid check_and_warn_fake_mtime(const struct entry_proc_op_t *p_op)\n{\n    char mt[128];\n    char ct[128];\n\n    /* check if mtime is before estimated creation time */\n    if (ATTR(&p_op->fs_attrs, last_mod) < ATTR_FSorDB(p_op, creation_time)) {\n        time2human_helper(ATTR(&p_op->fs_attrs, last_mod), \"mtime\", mt,\n                          sizeof(mt), p_op);\n\n        time2human_helper(ATTR(&p_op->fs_attrs, creation_time), \"crtime\", ct,\n                          sizeof(ct), p_op);\n\n        if (ATTR_FSorDB_TEST(p_op, fullpath))\n            DisplayLog(LVL_VERB, ENTRYPROC_TAG,\n                       \"Fake mtime detected for '%s': mtime=%s, creation=%s\",\n                       ATTR_FSorDB(p_op, fullpath), mt, ct);\n        else\n            DisplayLog(LVL_VERB, ENTRYPROC_TAG,\n                       \"Fake mtime detected for \" DFID\n                       \": mtime=%s, creation=%s\", PFID(&p_op->entry_id), mt,\n                       ct);\n    }\n    /* a 24h delay can be explained by different timezones */\n    else if (ATTR(&p_op->fs_attrs, last_mod) > time(NULL) + 86400) {\n        time2human_helper(ATTR(&p_op->fs_attrs, last_mod), \"mtime\", mt,\n                          sizeof(mt), p_op);\n\n        if (ATTR_FSorDB_TEST(p_op, fullpath))\n            DisplayLog(LVL_EVENT, ENTRYPROC_TAG,\n                       \"Fake mtime detected for '%s': mtime=%s is in the future\",\n                       ATTR_FSorDB(p_op, fullpath), mt);\n        else\n            DisplayLog(LVL_EVENT, ENTRYPROC_TAG,\n                       \"Fake mtime detected for \" DFID\n                       \": mtime=%s is in the future\", PFID(&p_op->entry_id),\n                       mt);\n    }\n}\n\n#ifdef _LUSTRE\nvoid check_stripe_info(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n    /* Since Lustre2.4, entry striping can change (lfs swap_layouts,\n     * lfs hsm_release...) so scanning must update file stripe information. */\n    /* Possible cases:\n     * - File striping is not set in fs_attrs: check it exists in DB\n     *      If not, get stripe info from filesystem\n     * - File striping is set in fs_attrs:\n     *      - Check stripe validator in DB: if OK, don't update DB info\n     *      - if an error is reported, update with the new values.\n     */\n    if (!ATTR_MASK_TEST(&p_op->fs_attrs, stripe_info)) {\n#endif\n        /* check it exists in DB */\n        if (ListMgr_CheckStripe(lmgr, &p_op->entry_id, VALID_EXISTS) !=\n            DB_SUCCESS) {\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       DFID \": stripe information is missing/invalid in DB\",\n                       PFID(&p_op->entry_id));\n\n            /* don't need to get stripe if we already have fresh stripe info\n             * from FS */\n            if (!(ATTR_MASK_TEST(&p_op->fs_attrs, stripe_info)\n                  && ATTR_MASK_TEST(&p_op->fs_attrs, stripe_items))) {\n                attr_mask_set_index(&p_op->fs_attr_need,\n                                    ATTR_INDEX_stripe_info);\n                attr_mask_set_index(&p_op->fs_attr_need,\n                                    ATTR_INDEX_stripe_items);\n            }\n        } else {\n            /* Keep any stripe info in fs_attrs structure, so it is available\n             * for matching.\n             * However, flag it so they are not updated in DB. */\n             p_op->db_stripe_ok = true;\n        }\n\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n    } else if (ListMgr_CheckStripe(lmgr, &p_op->entry_id,\n                                   ATTR(&p_op->fs_attrs, stripe_info).validator)\n               == DB_SUCCESS) {\n        /* Keep the stripe info in fs_attrs structure, so it is available\n         * for matching.\n         * However, flag it so they are not updated in DB. */\n         p_op->db_stripe_ok = true;\n    } else  /* keep stripe info in fs_attrs, as it must be updated */\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": stripe information has changed\",\n                   PFID(&p_op->entry_id));\n#endif\n}\n#endif\n\nstatic void *entry_proc_cfg_new(void)\n{\n    return calloc(1, sizeof(entry_proc_config_t));\n}\n\nstatic void entry_proc_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\n/* export config functions */\nmod_cfg_funcs_t entry_proc_cfg_hdlr = {\n    .module_name = \"entry processor\",\n    .new = entry_proc_cfg_new,\n    .free = entry_proc_cfg_free,\n    .set_default = entry_proc_cfg_set_default,\n    .read = entry_proc_cfg_read,\n    .set_config = entry_proc_cfg_set,\n    .write_default = entry_proc_cfg_write_default,\n    .write_template = entry_proc_cfg_write_template\n};\n"
  },
  {
    "path": "src/entry_processor/entry_proc_tools.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Misc tools for managing entry processor pipeline\n */\n#ifndef _ENTRY_PROC_TOOLS_H\n#define _ENTRY_PROC_TOOLS_H\n\n#include \"entry_processor.h\"\n\ntypedef struct entry_proc_config_t {\n    unsigned int nb_thread;\n    unsigned int max_pending_operations;\n    unsigned int max_batch_size;\n\n    bool match_classes;\n\n    /* fake mtime in the past causes higher\n     * migration priority */\n    bool detect_fake_mtime;\n\n} entry_proc_config_t;\n\nextern entry_proc_config_t entry_proc_conf;\nextern int pipeline_flags;\n\n/** initialize id constraint manager */\nint id_constraint_init(void);\n\n#define ID_OK                   0\n#define ID_CONSTRAINT_VIOLATION 1\n#define ID_MISSING              2\n#define ID_NOT_EXISTS           3\n\n/**\n * This is called to register the operation (with the ordering of pipeline)\n * @return ID_OK if the entry can be processed.\n *         ID_MISSING if the ID is not set in p_op structure\n */\nint id_constraint_register(entry_proc_op_t *p_op, int at_head);\n\n/**\n * Indicate if a given operation is the first for a given id and parent/name.\n */\nbool id_constraint_is_first_op(entry_proc_op_t *p_op);\n\n/**\n * This removes the current reference to an id when the operation is removed.\n */\nint id_constraint_unregister(entry_proc_op_t *p_op);\n\n/* display info about id constraints management */\nvoid id_constraint_stats(void);\n/* dump all values */\nvoid id_constraint_dump(void);\n\nvoid time2human_helper(time_t t, const char *attr_name, char *str,\n                       size_t size, const struct entry_proc_op_t *p_op);\n\nvoid check_and_warn_fake_mtime(const struct entry_proc_op_t *p_op);\n\n#ifdef _LUSTRE\nvoid check_stripe_info(struct entry_proc_op_t *p_op, lmgr_t *lmgr);\n#endif\n\n#endif\n"
  },
  {
    "path": "src/entry_processor/std_pipeline.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Common pipeline functions\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"entry_processor.h\"\n#include \"entry_proc_tools.h\"\n#include \"Memory.h\"\n#include \"policy_rules.h\"\n#include \"update_params.h\"\n#include \"status_manager.h\"\n#include <errno.h>\n#include <time.h>\n#include <unistd.h>\n\n/** Indicate if the error code means that the entry is missing */\nstatic inline bool err_missing(int rc)\n{\n    int abs_rc = abs(rc);\n\n    return (abs_rc == ENOENT) || (abs_rc == ESTALE);\n}\n\n#define diff_mask (*((attr_mask_t *)entry_proc_arg))\n\n/* forward declaration of EntryProc functions of pipeline */\nstatic int EntryProc_get_fid(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_get_info_db(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_get_info_fs(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_pre_apply(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_db_apply(struct entry_proc_op_t *, lmgr_t *);\nstatic int EntryProc_db_batch_apply(struct entry_proc_op_t **, int, lmgr_t *);\n#ifdef HAVE_CHANGELOGS\nstatic int EntryProc_chglog_clr(struct entry_proc_op_t *, lmgr_t *);\n#endif\nstatic int EntryProc_rm_old_entries(struct entry_proc_op_t *, lmgr_t *);\n\n/* forward declaration to check batchable operations for db_apply stage */\nstatic bool dbop_is_batchable(struct entry_proc_op_t *,\n                              struct entry_proc_op_t *, attr_mask_t *);\n\n/** pipeline stages */\nenum {\n    STAGE_GET_FID = 0,\n    STAGE_GET_INFO_DB,\n    STAGE_GET_INFO_FS,\n    STAGE_PRE_APPLY,\n    STAGE_DB_APPLY,\n#ifdef HAVE_CHANGELOGS\n    STAGE_CHGLOG_CLR,\n#endif\n    STAGE_RM_OLD_ENTRIES,   /* special stage at the end of FS scan */\n\n    PIPELINE_STAGE_COUNT    /* keep last */\n};\n\n/** std_pipeline entry points */\nconst pipeline_descr_t std_pipeline_descr = {\n    .stage_count = PIPELINE_STAGE_COUNT,\n    .GET_ID = STAGE_GET_FID,\n    .GET_INFO_DB = STAGE_GET_INFO_DB,\n    .GET_INFO_FS = STAGE_GET_INFO_FS,\n    .GC_OLDENT = STAGE_RM_OLD_ENTRIES,\n    .DB_APPLY = STAGE_DB_APPLY,\n};\n\n/** pipeline stages definition and parameters */\npipeline_stage_t std_pipeline[] = {\n    {STAGE_GET_FID, \"STAGE_GET_FID\", EntryProc_get_fid, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    {STAGE_GET_INFO_DB, \"STAGE_GET_INFO_DB\", EntryProc_get_info_db, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC | STAGE_FLAG_ID_CONSTRAINT, 0},\n    {STAGE_GET_INFO_FS, \"STAGE_GET_INFO_FS\", EntryProc_get_info_fs, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    {STAGE_PRE_APPLY, \"STAGE_PRE_APPLY\", EntryProc_pre_apply, NULL, NULL,\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n    {STAGE_DB_APPLY, \"STAGE_DB_APPLY\", EntryProc_db_apply,\n     EntryProc_db_batch_apply, dbop_is_batchable,   /* batched ops management */\n#if defined(_SQLITE)\n     /* SQLite locks the whole file for modifications...\n      * So, 1 single threads is enough at this step.\n      */\n     STAGE_FLAG_MAX_THREADS | STAGE_FLAG_SYNC, 1},\n#else\n     STAGE_FLAG_PARALLEL | STAGE_FLAG_SYNC, 0},\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    /* only 1 thread here because committing records must be sequential\n     * (in the same order as changelog) */\n    /* XXX could this stage be batched, like DB_APPLY? */\n    {STAGE_CHGLOG_CLR, \"STAGE_CHGLOG_CLR\", EntryProc_chglog_clr, NULL, NULL,\n     STAGE_FLAG_SEQUENTIAL | STAGE_FLAG_SYNC, 1},\n\n    /* acknowledging records must be sequential,\n     * in the order of record ids\n     * @TODO change this depending on the mode the program is started.\n     */\n#endif\n    /* this step is for mass update / mass remove operations when\n     * starting/ending a FS scan. */\n    {STAGE_RM_OLD_ENTRIES, \"STAGE_RM_OLD_ENTRIES\", EntryProc_rm_old_entries,\n     NULL, NULL,\n     STAGE_FLAG_SEQUENTIAL | STAGE_FLAG_SYNC, 0}\n};\n\n/**\n * For entries from FS scan, we must get the associated entry ID.\n */\nint EntryProc_get_fid(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n#ifdef _HAVE_FID\n    int rc;\n    entry_id_t tmp_id;\n    char buff[RBH_PATH_MAX];\n    char *path;\n\n    /* 2 possible options: get fid using parent_fid/name or from fullpath */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n        && ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n        BuildFidPath(&ATTR(&p_op->fs_attrs, parent_id), buff);\n        long len = strlen(buff);\n        sprintf(buff + len, \"/%s\", ATTR(&p_op->fs_attrs, name));\n        path = buff;\n    } else if (ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n        path = ATTR(&p_op->fs_attrs, fullpath);\n    } else {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error: not enough information to get fid: \"\n                   \"parent_id/name or fullpath needed\");\n        EntryProcessor_Acknowledge(p_op, -1, true);\n        return EINVAL;\n    }\n\n    /* perform path2fid */\n    rc = Lustre_GetFidFromPath(path, &tmp_id);\n\n    /* Workaround for Lustre 2.3: if parent is root, llapi_path2fid returns\n     * -EINVAL (see LU-3245). In this case, get fid from full path.\n     */\n    if ((rc == -EINVAL) && ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n        path = ATTR(&p_op->fs_attrs, fullpath);\n        rc = Lustre_GetFidFromPath(path, &tmp_id);\n    }\n\n    if (rc) {\n        /* remove the operation from pipeline */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d acknowledging stage STAGE_GET_FID.\", rc);\n    } else {\n        EntryProcessor_SetEntryId(p_op, &tmp_id);\n\n        /* go to GET_INFO_DB stage */\n        rc = EntryProcessor_Acknowledge(p_op, STAGE_GET_INFO_DB, false);\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d acknowledging stage STAGE_GET_FID.\", rc);\n    }\n    return rc;\n#else\n    DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n               \"Error: unexpected stage in a filesystem with no fid: STAGE_GET_FID.\");\n    EntryProcessor_Acknowledge(p_op, -1, true);\n    return EINVAL;\n#endif\n}\n\n#ifdef HAVE_CHANGELOGS\n/* does the CL record gives a clue about object type? */\nstatic obj_type_t cl2type_clue(CL_REC_TYPE *logrec)\n{\n    switch (logrec->cr_type) {\n    case CL_CREATE:\n    case CL_CLOSE:\n    case CL_TRUNC:\n#ifdef HAVE_CL_LAYOUT\n    case CL_LAYOUT:\n#endif\n    case CL_HSM:\n        return TYPE_FILE;\n    case CL_MKDIR:\n    case CL_RMDIR:\n        return TYPE_DIR;\n    case CL_SOFTLINK:\n        return TYPE_LINK;\n    case CL_MKNOD:\n        return TYPE_CHR;    /* or other special type */\n    default:\n        return TYPE_NONE;\n    }\n}\n\n/** displays a warning if parent/name is missing whereas it should not */\nstatic inline void check_path_info(struct entry_proc_op_t *p_op,\n                                   const char *recname)\n{\n    /* name and parent should have been provided by the CREATE record */\n    if (!ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n        || !ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                   \"WARNING: name and parent should be set by %s record\",\n                   recname);\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_name);\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_parent_id);\n    }\n}\n\n/**\n * Infer information from the changelog record (status, ...).\n * \\return next pipeline step to be perfomed.\n */\nstatic int EntryProc_FillFromLogRec(struct entry_proc_op_t *p_op,\n                                    bool allow_md_updt)\n{\n    /* status mask to call changelog callbacks */\n    uint32_t cl_cb_status_mask = 0;\n    bool status_mask_set = false;\n\n    /* alias to the log record */\n    CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n    attr_mask_t status_mask_need = null_mask;\n    proc_action_e rec_action = PROC_ACT_NONE;\n\n    /* if this is a CREATE record, we know that its status is NEW. */\n    if (logrec->cr_type == CL_CREATE) {\n        /* not a symlink */\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n\n        /* name and parent should have been provided by the CREATE record */\n        check_path_info(p_op, \"CREATE\");\n\n        /* Sanity check: if the entry already exists in DB,\n         * it could come from a previous filesystem that has been reformatted.\n         * In this case, force a full update of the entry.\n         */\n        if (p_op->db_exists) {\n            DisplayLog(LVL_EVENT, ENTRYPROC_TAG,\n                       \"CREATE record on already existing entry \" DFID \"%s%s.\"\n                       \" This is normal if you scanned it previously.\",\n                       PFID(&p_op->entry_id),\n                       ATTR_MASK_TEST(&p_op->db_attrs, fullpath) ? \", path=\" :\n                       (ATTR_MASK_TEST(&p_op->db_attrs, name) ? \", name=\" : \"\"),\n                       ATTR_MASK_TEST(&p_op->db_attrs, fullpath) ?\n                       ATTR(&p_op->db_attrs, fullpath) :\n                       (ATTR_MASK_TEST(&p_op->db_attrs, name) ?\n                        ATTR(&p_op->db_attrs, name) : \"\"));\n\n            /* set insertion time, like for a new entry */\n            ATTR_MASK_SET(&p_op->fs_attrs, creation_time);\n            ATTR(&p_op->fs_attrs, creation_time)\n                = cltime2sec(logrec->cr_time);\n\n            /* force updating attributes */\n            p_op->fs_attr_need.std |= POSIX_ATTR_MASK | ATTR_MASK_stripe_info\n                                      | ATTR_MASK_projid;\n            /* get status for all policies with a matching scope */\n            add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                                     &p_op->fs_attr_need.status);\n\n            /* will use the same mask for calling changelog callbacks */\n            cl_cb_status_mask = p_op->fs_attr_need.status;\n            status_mask_set = true;\n        }\n    } else if (logrec->cr_type == CL_HARDLINK) {\n        /* The entry exists but not the name. We only have to\n         * create the name. */\n\n        /* name and parent should have been provided by the HARDLINK record */\n        check_path_info(p_op, \"HARDLINK\");\n    } else if ((logrec->cr_type == CL_MKDIR) || (logrec->cr_type == CL_RMDIR)) {\n        /* entry is a directory */\n        ATTR_MASK_SET(&p_op->fs_attrs, type);\n        strcpy(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR);\n\n        /* not a link */\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n\n        /* no stripe info for dirs */\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n\n        /* when a directory is created or deleted, directory is empty */\n        ATTR_MASK_SET(&p_op->fs_attrs, dircount);\n        ATTR(&p_op->fs_attrs, dircount) = 0;\n\n        /* path info should be set */\n        check_path_info(p_op, changelog_type2str(logrec->cr_type));\n    } else if (logrec->cr_type == CL_SOFTLINK) {\n        /* entry is a symlink */\n        ATTR_MASK_SET(&p_op->fs_attrs, type);\n        strcpy(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK);\n\n        /* need to get symlink content */\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n\n        /* no stripe info for symlinks */\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n#ifdef _LUSTRE_HSM\n    else if (logrec->cr_type == CL_HSM) {\n        /* not a link */\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n    }\n#endif\n    else if (logrec->cr_type == CL_UNLINK) {\n        /* name and parent should have been provided by the UNLINK record */\n        check_path_info(p_op, \"UNLINK\");\n    }\n#ifdef HAVE_CL_LAYOUT\n    else if (logrec->cr_type == CL_LAYOUT) {\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n#endif\n#ifdef _LUSTRE\n    else if (logrec->cr_type == CL_SETATTR) {\n        attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_projid);\n    }\n#endif\n\n    /* if the entry is already in DB, try to determine if something changed */\n    if (p_op->db_exists) {\n        if (logrec->cr_type == CL_EXT) {\n            /* in case of a rename, the path info must be set */\n            check_path_info(p_op, \"RENAME\");\n        }\n\n        /* get the new attributes, in case of a SATTR, HSM... */\n        if (allow_md_updt && ((logrec->cr_type == CL_MTIME)\n                              || (logrec->cr_type == CL_CTIME)\n                              || (logrec->cr_type == CL_CLOSE)\n                              || (logrec->cr_type == CL_TRUNC)\n                              || (logrec->cr_type == CL_HSM)\n                              || (logrec->cr_type == CL_SETATTR))) {\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       \"Getattr needed because this is a %s event, and \"\n                       \"metadata has not been recently updated.\",\n                       changelog_type2str(logrec->cr_type));\n\n            p_op->fs_attr_need.std |= POSIX_ATTR_MASK;\n        }\n    }\n\n    if (!status_mask_set) {\n        /* status mask has not been already computed */\n        /* Warning: do not use cached DB information to check the scope\n         * because some information may have changed (e.g. entry status)\n         * so the entry may now match the scope, and using an outdated status\n         * may result in an invalid matching. */\n        add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                                 &cl_cb_status_mask);\n    } else {\n        cl_cb_status_mask = p_op->fs_attr_need.status;\n    }\n    /* call changelog callback for policies with a matching scope */\n    run_all_cl_cb(logrec, &p_op->entry_id, &p_op->db_attrs, &p_op->fs_attrs,\n                  &status_mask_need, cl_cb_status_mask, &rec_action);\n    p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &status_mask_need);\n\n    /* process the value of rec_action */\n    switch (rec_action) {\n    case PROC_ACT_NONE:    /* nothing particular */\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": run_all_cl_cb=none\", PFID(&p_op->entry_id));\n        break;\n    case PROC_ACT_RM_ALL:  /* remove the entry if it exists */\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": run_all_cl_cb=rm from DB\", PFID(&p_op->entry_id));\n        if (p_op->db_exists) {\n            p_op->db_op_type = OP_TYPE_REMOVE_LAST;\n            return STAGE_PRE_APPLY;\n        } else  /* ignore the record */\n            return STAGE_CHGLOG_CLR;\n\n    case PROC_ACT_SOFTRM_IF_EXISTS:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": run_all_cl_cb=softrm if exists in DB\",\n                   PFID(&p_op->entry_id));\n        /* if no policy manages deleted entries, don't SOFTRM */\n        if (!has_deletion_policy()) {\n            p_op->db_op_type = OP_TYPE_REMOVE_LAST;\n            return STAGE_PRE_APPLY;\n        } else if (p_op->db_exists) {\n            /* soft remove when it exists */\n            p_op->db_op_type = OP_TYPE_SOFT_REMOVE;\n            return STAGE_PRE_APPLY;\n        } else  /* ignore the record */\n            return STAGE_CHGLOG_CLR;\n\n    case PROC_ACT_SOFTRM_ALWAYS:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": run_all_cl_cb=always softrm\", PFID(&p_op->entry_id));\n        /* if no policy manages deleted entries, don't SOFTRM */\n        if (!has_deletion_policy())\n            p_op->db_op_type = OP_TYPE_REMOVE_LAST;\n        else\n            /* always soft remove */\n            p_op->db_op_type = OP_TYPE_SOFT_REMOVE;\n\n        return STAGE_PRE_APPLY;\n    }\n\n    return STAGE_GET_INFO_FS;\n}\n\n/**\n *  Infer information and determine needed information from a changelog record.\n *  \\return the next pipeline step to be performed,\n *          -1 if entry must be dropped.\n */\nstatic int EntryProc_ProcessLogRec(struct entry_proc_op_t *p_op)\n{\n    /* short alias */\n    CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n\n    /* allow event-driven update */\n    bool md_allow_event_updt = true;\n\n    if (logrec->cr_type == CL_UNLINK) {\n#ifdef _LUSTRE_HSM\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   \"UNLINK on %s entry \" DFID \": last=%s, archived=%s\",\n                   p_op->db_exists ? \"known\" : \"unknown\", PFID(&p_op->entry_id),\n                   bool2str(logrec->cr_flags & CLF_UNLINK_LAST),\n                   bool2str(logrec->cr_flags & CLF_UNLINK_HSM_EXISTS));\n#else\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   \"UNLINK on %s entry \" DFID \": last=%s\",\n                   p_op->db_exists ? \"known\" : \"unknown\", PFID(&p_op->entry_id),\n                   bool2str(logrec->cr_flags & CLF_UNLINK_LAST));\n#endif\n\n        if (!has_deletion_policy() && p_op->check_if_last_entry) {\n            /* When inserting that entry, we didn't know whether the\n             * entry was the last one or not, so use the nlink\n             * attribute we requested earlier to determine. */\n            if (ATTR_MASK_TEST(&p_op->db_attrs, nlink)\n                && (ATTR(&p_op->db_attrs, nlink) <= 1)) {\n                DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                           \"UNLINK record for entry with nlink=%u in DB => removing it\",\n                           ATTR(&p_op->db_attrs, nlink));\n                logrec->cr_flags |= CLF_UNLINK_LAST;\n            }\n        }\n        /* else: too dangerous : we risk to clean an entry in backend\n         * that we should not */\n\n        /* is it the last reference to this file? */\n        if (logrec->cr_flags & CLF_UNLINK_LAST) {\n            if (!has_deletion_policy()) {\n                /* no policy manages deleted entries, just drop removed entries\n                 * from DB */\n                if (p_op->db_exists) {\n                    p_op->db_op_type = OP_TYPE_REMOVE_LAST;\n                    return STAGE_PRE_APPLY;\n                } else\n                    /* ignore the record */\n                    return STAGE_CHGLOG_CLR;\n            }\n            /* policy-specific cases are managed in changelog callbacks */\n        } else if (p_op->db_exists) {\n            /* Entry still exists and is known in the DB. */\n            /* Remove the name only. Keep the inode information since\n             * there is more file names referring to it. */\n            p_op->db_op_type = OP_TYPE_REMOVE_ONE;\n            return STAGE_PRE_APPLY;\n        } else {\n            /* UNLINK with unknown file in database -> ignore the\n             * record. This case can happen on systems without LU-543\n             * when we insert a fake UNLINK (with an unknow FID at the\n             * time), but an application has already issued an UNLINK\n             * before the rename operation. */\n            return STAGE_CHGLOG_CLR;\n        }\n        /* end if UNLINK */\n    } else if (logrec->cr_type == CL_RENAME) {\n        /* this is a source name event */\n        /* remove only the old name */\n        /* TODO: could be OP_TYPE_REMOVE_ONE or OP_TYPE_REMOVE_LAST. */\n        p_op->db_op_type = OP_TYPE_REMOVE_ONE;\n        return STAGE_PRE_APPLY;\n    } else if (logrec->cr_type == CL_RMDIR) {\n        /* XXX this piece of code suppose policies do not perform\n         * softrm on directories. This must be modified if\n         * some status managers care about directory removal.\n         */\n        if (p_op->db_exists) {\n            p_op->db_op_type = OP_TYPE_REMOVE_LAST;\n            return STAGE_PRE_APPLY;\n        } else {\n            /* ignore the record */\n            return STAGE_CHGLOG_CLR;\n        }\n    }\n\n    /* end if RMDIR */\n    /* not a removal */\n    if (logrec->cr_type != CL_UNLINK && logrec->cr_type != CL_RMDIR) {\n        if (!p_op->db_exists) {\n            attr_mask_t tmp;\n\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, DFID \"not in DB: INSERT\",\n                       PFID(&p_op->entry_id));\n\n            /* non-unlink (or non-destructive unlink) record on unknown entry:\n             * insert entry to the DB */\n            p_op->db_op_type = OP_TYPE_INSERT;\n\n            /* new entry, set insertion time */\n            ATTR_MASK_SET(&p_op->fs_attrs, creation_time);\n            ATTR(&p_op->fs_attrs, creation_time) = cltime2sec(logrec->cr_time);\n\n            /* we must get info that is not provided by the chglog:\n             * fs_attr_need |=  <needed attributes> AND NOT in fs_attrs.\n             */\n            tmp.std = (POSIX_ATTR_MASK | ATTR_MASK_name | ATTR_MASK_parent_id\n                       | ATTR_MASK_stripe_info | ATTR_MASK_stripe_items\n                       | ATTR_MASK_link | ATTR_MASK_projid);\n            tmp = attr_mask_and_not(&tmp, &p_op->fs_attrs.attr_mask);\n            p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n            /* if we needed fullpath (e.g. for policies), set it */\n            if (attr_mask_test_index(&p_op->db_attr_need, ATTR_INDEX_fullpath)\n                && !ATTR_MASK_TEST(&p_op->fs_attrs, fullpath))\n                attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_fullpath);\n\n            /* EntryProc_FillFromLogRec() will determine\n             * what status are to be retrieved.\n             */\n        } else {    /* non-unlink record on known entry */\n\n            attr_mask_t db_missing;\n\n            p_op->db_op_type = OP_TYPE_UPDATE;\n\n            /* check what information must be updated.\n             * missing info = DB query - retrieved */\n            db_missing = attr_mask_and_not(&p_op->db_attr_need,\n                                           &p_op->db_attrs.attr_mask);\n\n            /* get attrs if some is missing (all std attrs) */\n            if ((db_missing.std & POSIX_ATTR_MASK) &&\n                ((p_op->fs_attrs.attr_mask.std & POSIX_ATTR_MASK) !=\n                 POSIX_ATTR_MASK))\n                p_op->fs_attr_need.std |= POSIX_ATTR_MASK;\n\n            /* get projid if missing (file and dir only) */\n            if ((db_missing.std & ATTR_MASK_projid)\n                && !ATTR_MASK_TEST(&p_op->fs_attrs, projid)\n                && (!ATTR_FSorDB_TEST(p_op, type)\n                    || (!strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_FILE)\n                         && !strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_DIR)))) {\n                p_op->fs_attr_need.std |= ATTR_MASK_projid;\n            }\n\n            /* get stripe info if missing (file only) */\n            if ((db_missing.std & ATTR_MASK_stripe_info)\n                && !ATTR_MASK_TEST(&p_op->fs_attrs, stripe_info)\n                && (!ATTR_FSorDB_TEST(p_op, type)\n                    || !strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_FILE))) {\n                p_op->fs_attr_need.std |=\n                    ATTR_MASK_stripe_info | ATTR_MASK_stripe_items;\n            }\n\n            /* get link content if missing (symlink only) */\n            if ((db_missing.std & ATTR_MASK_link)\n                && !ATTR_MASK_TEST(&p_op->fs_attrs, link)\n                && (!ATTR_FSorDB_TEST(p_op, type)\n                    || !strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_LINK)))\n                p_op->fs_attr_need.std |= ATTR_MASK_link;\n\n            /* EntryProc_FillFromLogRec() will determine\n             * what status are to be retrieved. */\n\n            /* Check md_update policy */\n            if (need_md_update(&p_op->db_attrs, &md_allow_event_updt))\n                p_op->fs_attr_need.std |= POSIX_ATTR_MASK;\n\n            /* check if path update is needed (only if it was not just\n             * updated) */\n            if ((!ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n                 || !ATTR_MASK_TEST(&p_op->fs_attrs, name))\n                && (need_path_update(&p_op->db_attrs, NULL)\n                    || (db_missing.\n                        std & (ATTR_MASK_fullpath | ATTR_MASK_name |\n                               ATTR_MASK_parent_id))))\n                p_op->fs_attr_need.std |= ATTR_MASK_name | ATTR_MASK_parent_id;\n        }\n    }\n\n    /* infer info from changelog record, then continue to next step */\n    return EntryProc_FillFromLogRec(p_op, md_allow_event_updt);\n}\n#endif /* CHANGELOG support */\n\n/* Ensure the fullpath from DB is consistent.\n * Set updt_mask according to the missing info.\n */\nstatic void check_fullpath(attr_set_t *attrs, const entry_id_t *id,\n                           attr_mask_t *updt_mask)\n{\n#ifdef _HAVE_FID\n    /* If the parent id from the changelog refers to a directory\n     * that no longer exists, the path built from the DB may be partial.\n     * If the current entry is the direct child of such a directory,\n     * we must update the parent information.\n     * Else, we should do it for every parent up to the unknown dir.\n     */\n    if (ATTR_MASK_TEST(attrs, fullpath) && ATTR(attrs, fullpath)[0] != '/') {\n        char parent[RBH_NAME_MAX];\n        char *next = strchr(ATTR(attrs, fullpath), '/');\n        if (next != NULL) {\n            entry_id_t parent_id;\n\n            memset(parent, 0, sizeof(parent));\n            strncpy(parent, ATTR(attrs, fullpath),\n                    (ptrdiff_t) (next - ATTR(attrs, fullpath)));\n\n            /* fid consists of 3 numbers */\n            if (sscanf(parent, SFID, RFID(&parent_id)) != FID_SCAN_CNT) {\n                DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                           \"Entry \" DFID\n                           \" has an inconsistent relative path: %s\", PFID(id),\n                           ATTR(attrs, fullpath));\n                /* fullpath is not consistent (should be <id>/name) */\n                ATTR_MASK_UNSET(attrs, fullpath);\n                /* update all path information */\n                updt_mask->std |=\n                    ATTR_MASK_parent_id | ATTR_MASK_name | ATTR_MASK_fullpath;\n            } else if (strchr(next + 1, '/') == NULL) {\n                /* the entry is the direct child of the unknown directory */\n                DisplayLog(LVL_EVENT, ENTRYPROC_TAG,\n                           \"Parent dir for entry \" DFID \" is unknown (parent: \"\n                           DFID \", child name: '%s'): updating entry path info\",\n                           PFID(id), PFID(&parent_id), next + 1);\n                ATTR_MASK_UNSET(attrs, fullpath);\n                updt_mask->std |=\n                    ATTR_MASK_parent_id | ATTR_MASK_name | ATTR_MASK_fullpath;\n            } else {\n                /* FIXME: We should update parent info of an upper entry. */\n                ATTR_MASK_UNSET(attrs, fullpath);\n                /* update path info anyhow, to try fixing the issue */\n                updt_mask->std |=\n                    ATTR_MASK_parent_id | ATTR_MASK_name | ATTR_MASK_fullpath;\n            }\n        } else {\n            /* fullpath is not consistent (should be <pid>/name) */\n            ATTR_MASK_UNSET(attrs, fullpath);\n            /* update path info, to try fixing the issue */\n            updt_mask->std |=\n                ATTR_MASK_parent_id | ATTR_MASK_name | ATTR_MASK_fullpath;\n        }\n    }\n#else\n    if (ATTR_MASK_TEST(attrs, fullpath)\n        && ATTR(attrs, fullpath)[0] != '/') {\n        /* fullpath is not pertinent */\n        ATTR_MASK_UNSET(attrs, fullpath);\n        /* update path info, to try fixing the issue */\n        updt_mask->std |=\n            ATTR_MASK_parent_id | ATTR_MASK_name | ATTR_MASK_fullpath;\n    }\n#endif\n}\n\nstatic bool is_lustre_special(const struct entry_proc_op_t *p_op)\n{\n#ifdef _HAVE_FID\n    if (p_op->entry_id_is_set) {\n        /* check if id is the same as '<root>/.lustre'\n         * or '<root>/.lustre/fid' */\n        if (entry_id_equal(&p_op->entry_id, get_dot_lustre_fid())\n            || entry_id_equal(&p_op->entry_id, get_fid_fid())) {\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       \"Ignoring special lustre directory \" DFID,\n                       PFID(&p_op->entry_id));\n            return true;\n        }\n    } else {    /* id is not known, check other criteria (name, path, ...) */\n\n        const char *path;\n        const entry_id_t *root_id;\n\n        root_id = get_root_id();\n\n        /* check if parent_id is root dir and name is '.lustre' */\n        if (root_id != NULL && ATTR_MASK_TEST(&p_op->fs_attrs, parent_id)\n            && ATTR_MASK_TEST(&p_op->fs_attrs, name)\n            && entry_id_equal(root_id, &ATTR(&p_op->fs_attrs, parent_id))\n            && strcmp(ATTR(&p_op->fs_attrs, name), dot_lustre_name) == 0) {\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       \"Ignoring special lustre directory \" DFID \"/%s\",\n                       PFID(root_id), ATTR(&p_op->fs_attrs, name));\n            return true;\n        }\n\n        path = get_dot_lustre_dir();\n\n        /* check the whole '<mnt_dir>/.lustre' path */\n        if (path != NULL && ATTR_FSorDB_TEST(p_op, fullpath) &&\n            strncmp(ATTR_FSorDB(p_op, fullpath), path, strlen(path) + 1) == 0) {\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       \"Ignoring special lustre directory <%s>\",\n                       ATTR_FSorDB(p_op, fullpath));\n            return true;\n        }\n    }\n#endif\n    return false;\n}\n\n#ifdef HAVE_CHANGELOGS\n/**\n * Infer what are the needed attributes from DB, depending on changelog\n * record content.\n */\nstatic void logrec2dbneed(struct entry_proc_op_t *p_op)\n{\n    CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n    obj_type_t type_clue = TYPE_NONE;\n    uint32_t status_scope = 0;  /* status mask */\n    attr_mask_t tmp;\n\n    /* does the log record gives a clue about entry_type */\n    type_clue = cl2type_clue(logrec);\n\n    p_op->db_attr_need = null_mask;\n\n    if (type_clue == TYPE_NONE) {\n        /* type is a useful information to make decisions (about getstripe,\n         * readlink, ...) */\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_type);\n    } else {\n        ATTR_MASK_SET(&p_op->fs_attrs, type);\n        strcpy(ATTR(&p_op->fs_attrs, type), type2db(type_clue));\n    }\n\n    /* add diff mask for diff mode */\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &diff_mask);\n\n    /* If this is an unlink and we don't know whether it is the\n     * last entry, use nlink. */\n    if (logrec->cr_type == CL_UNLINK && p_op->check_if_last_entry)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_nlink);\n    /* If it's a hard link, we will need the hlink so we can\n     * increment it. Will override the fs value. */\n    else if (logrec->cr_type == CL_HARDLINK)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_nlink);\n\n    /* in case of (last) unlink, we need softrm filter masks.\n     * don't retrieve other information */\n    if (p_op->extra_info.log_record.p_log_rec->cr_type == CL_UNLINK\n        && logrec->cr_flags & CLF_UNLINK_LAST) {\n        tmp = sm_softrm_mask();\n        p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n        /* Entry was deleted. Don't try to get more info from the DB. */\n        return;\n    }\n\n    /* Only need to get md_update if the update policy != always */\n    if (updt_params.md.when != UPDT_ALWAYS)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_md_update);\n\n    /* Only need to get path_update if the update policy != always\n     * and if it is not provided in logrec\n     */\n    if ((updt_params.path.when != UPDT_ALWAYS)\n        && (logrec->cr_namelen == 0))\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_path_update);\n\n    if (entry_proc_conf.detect_fake_mtime)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_creation_time);\n\n    if (type_clue == TYPE_NONE || type_clue == TYPE_LINK)\n        /* check if link content is set for this entry */\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_link);\n\n    if (entry_proc_conf.match_classes) {\n        if (updt_params.fileclass.when != UPDT_ALWAYS)\n            attr_mask_set_index(&p_op->db_attr_need,\n                                ATTR_INDEX_class_update);\n\n        tmp = attr_mask_and_not(&policies.global_fileset_mask,\n                                &p_op->fs_attrs.attr_mask);\n        p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n    }\n\n    /* check if entry is in policies scope */\n    add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                             &status_scope);\n\n    /* get missing attributes to check the scopes:\n     * db_attr_need |= <attrs_for_status> and not <fs_attrs>\n     */\n    tmp = attrs_for_status_mask(status_scope, false);\n    tmp = attr_mask_and_not(&tmp, &p_op->fs_attrs.attr_mask);\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n\n    /* In case of a RENAME, match the new name (not the one from the DB). */\n    if ((logrec->cr_type == CL_EXT)\n        && (p_op->db_attr_need.std & ATTR_MASK_fullpath)) {\n        int rc;\n\n        rc = Lustre_GetFullPath(&p_op->entry_id,\n                                ATTR(&p_op->fs_attrs, fullpath),\n                                sizeof(ATTR(&p_op->fs_attrs, fullpath)));\n        if (rc == 0) {\n            ATTR_MASK_SET(&p_op->fs_attrs, fullpath);\n            p_op->db_attr_need.std &= ~ATTR_MASK_fullpath;\n        }\n    }\n}\n#endif\n\n/**\n * Determine needed DB attributes to process a scanned entry.\n */\nstatic void scan2dbneed(struct entry_proc_op_t *p_op)\n{\n    attr_mask_t attr_allow_cached;\n    uint32_t status_scope = 0;  /* status mask */\n    attr_mask_t tmp;\n\n    /* check if entry is in policies scope */\n    add_matching_scopes_mask(&p_op->entry_id, &p_op->fs_attrs, true,\n                             &status_scope);\n\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &diff_mask);\n    /* retrieve missing attributes for diff */\n    tmp = attr_mask_and_not(&diff_mask, &p_op->fs_attrs.attr_mask);\n    p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n    if (entry_proc_conf.detect_fake_mtime)\n        attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_creation_time);\n\n    /* get all needed attributes for status */\n    attr_allow_cached = attrs_for_status_mask(status_scope, false);\n\n    /* what must be retrieved from DB: */\n    tmp = attr_mask_and_not(&attr_allow_cached, &p_op->fs_attrs.attr_mask);\n    p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n\n    /* no dircount for non-dirs */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type) &&\n        strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_DIR))\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_dircount);\n\n    /* don't get stripe for non-files */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n        && strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE) != 0) {\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_stripe_items);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n\n    /* no readlink for non symlinks */\n    if (ATTR_MASK_TEST(&p_op->fs_attrs, type)) { /* likely */\n        if (!strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK))\n            /* check if symlink's contents is known */\n            attr_mask_set_index(&p_op->db_attr_need, ATTR_INDEX_link);\n        else\n            attr_mask_unset_index(&p_op->db_attr_need, ATTR_INDEX_link);\n    }\n\n    if (entry_proc_conf.match_classes) {\n        if (updt_params.fileclass.when != UPDT_ALWAYS)\n            attr_mask_set_index(&p_op->db_attr_need,\n                                ATTR_INDEX_class_update);\n\n        tmp = attr_mask_and_not(&policies.global_fileset_mask,\n                                &p_op->fs_attrs.attr_mask);\n        p_op->db_attr_need = attr_mask_or(&p_op->db_attr_need, &tmp);\n    }\n}\n\n\n/**\n * check if the entry exists in the database and what info\n * must be retrieved.\n */\nint EntryProc_get_info_db(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc = 0;\n    int next_stage = -1;    /* -1 = skip */\n    attr_mask_t tmp;\n\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n\n    /* always ignore root */\n    if (p_op->entry_id_is_set\n        && entry_id_equal(&p_op->entry_id, get_root_id())) {\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   \"Ignoring record for root directory\");\n        /* drop the entry */\n        next_stage = -1;\n        goto next_step;\n    }\n\n    /* ignore special files */\n    if (is_lustre_special(p_op)) {\n        /* drop the entry */\n        next_stage = -1;\n        goto next_step;\n    }\n#ifdef HAVE_CHANGELOGS\n    /* is this a changelog record? */\n    if (p_op->extra_info.is_changelog_record) {\n        CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n\n        /* chglog_reader_config.mds_has_lu543 has already been tested\n         * in changelog reader before pushing the entry. */\n        if (logrec->cr_type == CL_UNLINK && p_op->get_fid_from_db) {\n            /* It is possible this unlink was inserted by the changelog\n             * reader. Some Lustre server don't give the FID, so retrieve\n             * it now from the NAMES table, given the parent FID and the\n             * filename. */\n            p_op->get_fid_from_db = 0;\n\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, name)) {\n                /* name was previously copied to the fs attributes */\n                rc = ListMgr_Get_FID_from_Path(lmgr, &logrec->cr_pfid,\n                                               ATTR(&p_op->fs_attrs, name),\n                                               &p_op->entry_id);\n            } else {\n                /* Use the name from changelog. It may not be null-terminated\n                 * so copy it to a temporary buffer. */\n                char *tmp_name;\n\n                if (asprintf(&tmp_name, \"%.*s\", logrec->cr_namelen,\n                             rh_get_cl_cr_name(logrec)) == -1) {\n                    rc = -1;\n                } else {\n                    rc = ListMgr_Get_FID_from_Path(lmgr, &logrec->cr_pfid,\n                                                   tmp_name, &p_op->entry_id);\n\n                    free(tmp_name);\n                }\n            }\n\n            if (!rc) {\n                if (!fid_is_sane(&logrec->cr_pfid))\n                    DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                               \"Error: insane parent fid \" DFID \" from DB\",\n                               PFID(&logrec->cr_pfid));\n                /* The FID is now set, so we can register it with the\n                 * constraint engine. Since this operation is at the\n                 * very top of the queue, we register it at the head\n                 * of the constraint list, not at the tail. */\n                p_op->entry_id_is_set = 1;\n                id_constraint_register(p_op, true);\n            }\n\n            /* Unblock the pipeline stage. */\n            EntryProcessor_Unblock(STAGE_GET_INFO_DB);\n\n            if (rc) {\n                /* Not found. Skip the entry */\n                DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n                           \"Warning: parent/filename for UNLINK not found\");\n                next_stage = -1;\n                goto next_step;\n            }\n        }\n\n        /* determine needed attributes from DB */\n        logrec2dbneed(p_op);\n\n        /* attributes to be retrieved */\n        p_op->db_attrs.attr_mask = p_op->db_attr_need;\n\n        rc = ListMgr_Get(lmgr, &p_op->entry_id, &p_op->db_attrs);\n\n        if (rc == DB_SUCCESS) {\n            p_op->db_exists = 1;\n            /* attr mask has been set by ListMgr_Get */\n        } else if (rc == DB_NOT_EXISTS) {\n            p_op->db_exists = 0;\n            /* no attrs from DB */\n            ATTR_MASK_INIT(&p_op->db_attrs);\n        } else {\n            /* ERROR */\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d retrieving entry \" DFID \" from DB: %s.\", rc,\n                       PFID(&p_op->entry_id), lmgr_err2str(rc));\n            p_op->db_exists = 0;\n            /* no attrs from DB */\n            ATTR_MASK_INIT(&p_op->db_attrs);\n        }\n\n        /* Retrieve info from the log record, and decide what info must be\n         * retrieved from filesystem. */\n        next_stage = EntryProc_ProcessLogRec(p_op);\n\n        /* Note: this check must be done after processing log record,\n         * because it can determine if status is needed */\n        tmp = attrs_for_status_mask(p_op->fs_attr_need.status, true);\n        p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n        char tmp_buf[RBH_NAME_MAX];\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG, \"RECORD: %s \" DFID \" %#x %.*s => \"\n                   \"getstripe=%u, getattr=%u, getpath=%u, readlink=%u\"\n                   \", getstatus(%s)\",\n                   changelog_type2str(logrec->cr_type), PFID(&p_op->entry_id),\n                   logrec->cr_flags & CLF_FLAGMASK,\n                   logrec->cr_namelen ? logrec->cr_namelen : 6,\n                   logrec->cr_namelen ? rh_get_cl_cr_name(logrec) : \"<null>\",\n                   NEED_GETSTRIPE(p_op) ? 1 : 0, NEED_GETATTR(p_op) ? 1 : 0,\n                   NEED_GETPATH(p_op) ? 1 : 0, NEED_READLINK(p_op) ? 1 : 0,\n                   name_status_mask(p_op->fs_attr_need.status, tmp_buf,\n                                    sizeof(tmp_buf)));\n    } else {    /* entry from FS scan */\n#endif\n        attr_mask_t attr_need_fresh = {0};\n\n        /* scan is expected to provide full path and attributes. */\n        if (!ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error: missing info from FS scan\");\n            /* skip the entry */\n            next_stage = -1;\n            goto next_step;\n        }\n\n        /* determined needed attributes from DB */\n        scan2dbneed(p_op);\n\n        if (!attr_mask_is_null(p_op->db_attr_need)) {\n            p_op->db_attrs.attr_mask = p_op->db_attr_need;\n            rc = ListMgr_Get(lmgr, &p_op->entry_id, &p_op->db_attrs);\n\n            if (rc == DB_SUCCESS) {\n                p_op->db_exists = 1;\n                tmp = attr_mask_and_not(&p_op->db_attr_need,\n                                        &p_op->db_attrs.attr_mask);\n                p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n            } else if (rc == DB_NOT_EXISTS) {\n                p_op->db_exists = 0;\n                ATTR_MASK_INIT(&p_op->db_attrs);\n            } else {\n                /* ERROR */\n                DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                           \"Error %d retrieving entry \" DFID \" from DB: %s.\",\n                           rc, PFID(&p_op->entry_id), lmgr_err2str(rc));\n                p_op->db_exists = 0;\n                ATTR_MASK_INIT(&p_op->db_attrs);\n            }\n        } else {\n            p_op->db_exists = ListMgr_Exists(lmgr, &p_op->entry_id);\n        }\n\n        /* get status for all policies */\n        p_op->fs_attr_need.status |= all_status_mask();\n        tmp = attr_mask_and_not(&attr_need_fresh, &p_op->fs_attrs.attr_mask);\n        p_op->fs_attr_need = attr_mask_or(&p_op->fs_attr_need, &tmp);\n\n        if (!p_op->db_exists) {\n            /* new entry */\n            p_op->db_op_type = OP_TYPE_INSERT;\n\n            /* set creation time if it was not set by scan module */\n            if (!ATTR_MASK_TEST(&p_op->fs_attrs, creation_time)) {\n                ATTR_MASK_SET(&p_op->fs_attrs, creation_time);\n                /* FIXME min(atime,mtime,ctime)? */\n                ATTR(&p_op->fs_attrs, creation_time) = time(NULL);\n            }\n#ifdef _LUSTRE\n            /* get stripe for files */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n                && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE)\n                /* only if it was not retrieved during the scan */\n                && !(ATTR_MASK_TEST(&p_op->fs_attrs, stripe_info)\n                     && ATTR_MASK_TEST(&p_op->fs_attrs, stripe_items))) {\n                attr_mask_set_index(&p_op->fs_attr_need,\n                                    ATTR_INDEX_stripe_info);\n                attr_mask_set_index(&p_op->fs_attr_need,\n                                    ATTR_INDEX_stripe_items);\n            }\n#endif\n\n            /* readlink for symlinks (if not already known) */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n                && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK)\n                && !ATTR_MASK_TEST(&p_op->fs_attrs, link)) {\n                attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n            } else {\n                attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n            }\n\n            next_stage = STAGE_GET_INFO_FS;\n        } else {\n            p_op->db_op_type = OP_TYPE_UPDATE;\n\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)) {    /* likely set */\n                if (strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_LINK))\n                    /* non-link */\n                    attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n                else {\n                    /* link */\n#ifdef _LUSTRE\n                    /* already known (in DB or FS) */\n                    if (ATTR_FSorDB_TEST(p_op, link))\n                        attr_mask_unset_index(&p_op->fs_attr_need,\n                                              ATTR_INDEX_link);\n                    else    /* not known */\n                        attr_mask_set_index(&p_op->fs_attr_need,\n                                            ATTR_INDEX_link);\n#else\n                    /* For non-lustre filesystems, inodes may be recycled,\n                     * so re-read link even if it is is DB */\n                    if (ATTR_MASK_TEST(&p_op->fs_attrs, link))\n                        attr_mask_unset_index(&p_op->fs_attr_need,\n                                              ATTR_INDEX_link);\n                    else\n                        attr_mask_set_index(&p_op->fs_attr_need,\n                                            ATTR_INDEX_link);\n#endif\n                }\n            }\n\n            /* get parent_id+name, if not set during scan\n             * (eg. for root directory) */\n            if (!ATTR_MASK_TEST(&p_op->fs_attrs, name))\n                attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_name);\n            if (!ATTR_MASK_TEST(&p_op->fs_attrs, parent_id))\n                attr_mask_set_index(&p_op->fs_attr_need, ATTR_INDEX_parent_id);\n\n#ifdef _LUSTRE\n            /* check stripe only for files */\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, type)\n                && !strcmp(ATTR(&p_op->fs_attrs, type), STR_TYPE_FILE)\n                && !strcmp(global_config.fs_type, \"lustre\")) {\n                check_stripe_info(p_op, lmgr);\n            }\n#endif\n            next_stage = STAGE_GET_INFO_FS;\n        }\n\n#ifdef HAVE_CHANGELOGS\n    }   /* end if entry from FS scan */\n#endif\n\n    check_fullpath(&p_op->db_attrs, &p_op->entry_id, &p_op->fs_attr_need);\n\n#ifdef _BENCH_DB\n    /* don't get info from filesystem */\n    next_stage = STAGE_PRE_APPLY;\n#endif\n\n next_step:\n    if (next_stage == -1)\n        /* drop the entry */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n    else\n        /* go to next pipeline step */\n        rc = EntryProcessor_Acknowledge(p_op, next_stage, false);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error %d acknowledging stage %s.\", rc,\n                   stage_info->stage_name);\n    return rc;\n}\n\n/** skip_record a record by acknowledging current operation */\nstatic int skip_record(struct entry_proc_op_t *p_op)\n{\n    int rc;\n\n#ifdef HAVE_CHANGELOGS\n    if (p_op->extra_info.is_changelog_record)\n        /* do nothing on DB but ack the record */\n        rc = EntryProcessor_Acknowledge(p_op, STAGE_CHGLOG_CLR, false);\n    else\n#endif\n        /* remove the operation from processing pipeline */\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage.\",\n                   rc);\n    return rc;\n}\n\n#ifdef HAVE_CHANGELOGS\n/** take DB removal decision when an entry no longer exists in the filesystem */\nstatic int rm_record(struct entry_proc_op_t *p_op)\n{\n    attr_set_t merged_attrs = ATTR_SET_INIT;\n    proc_action_e pa;\n    int rc;\n\n    /* if the entry is no in DB, an no deletion policy is defined,\n     * just drop the log record */\n    if (!p_op->db_exists && !has_deletion_policy())\n        return skip_record(p_op);\n\n    if (p_op->extra_info.is_changelog_record\n        && (p_op->extra_info.log_record.p_log_rec->cr_type != CL_UNLINK\n            && p_op->extra_info.log_record.p_log_rec->cr_type != CL_RMDIR)) {\n        /* Lustre 2 with changelog: we are here because lstat (by fid)\n         * on the entry failed, which ensure the entry no longer\n         * exists. Skip it. The entry will be removed by a subsequent\n         * UNLINK record.\n         *\n         * On other posix filesystems, the entry disappeared between\n         * its scanning and its processing... skip it so it will be\n         * cleaned at the end of the scan. */\n        return skip_record(p_op);\n    }\n\n    ATTR_MASK_INIT(&merged_attrs);\n\n    ListMgr_MergeAttrSets(&merged_attrs, &p_op->fs_attrs, 1);\n    ListMgr_MergeAttrSets(&merged_attrs, &p_op->db_attrs, 0);\n\n    pa = match_all_softrm_filters(&p_op->entry_id, &merged_attrs);\n\n    /* free allocated structs in merged attributes */\n    ListMgr_FreeAttrs(&merged_attrs);\n\n    switch (pa) {\n    case PROC_ACT_NONE:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": match_all_softrm_filters=none\",\n                   PFID(&p_op->entry_id));\n        /* keep the current db_op_type */\n        break;\n\n    case PROC_ACT_RM_ALL:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": match_all_softrm_filters=rm from DB\",\n                   PFID(&p_op->entry_id));\n        return rm_record(p_op);\n\n    case PROC_ACT_SOFTRM_IF_EXISTS:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID\n                   \": match_all_softrm_filters=softrm if exists in DB (exist=%d)\",\n                   PFID(&p_op->entry_id), p_op->db_exists ? 1 : 0);\n        if (p_op->db_exists)\n            p_op->db_op_type = OP_TYPE_SOFT_REMOVE;\n        else\n            /* drop the record */\n            return skip_record(p_op);\n        break;\n\n    case PROC_ACT_SOFTRM_ALWAYS:\n        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                   DFID \": match_all_softrm_filters=always softrm\",\n                   PFID(&p_op->entry_id));\n        p_op->db_op_type = OP_TYPE_SOFT_REMOVE;\n        break;\n    }\n\n    rc = EntryProcessor_Acknowledge(p_op, STAGE_PRE_APPLY, false);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage.\",\n                   rc);\n    return rc;\n}\n#endif\n\nint EntryProc_get_info_fs(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    char tmp_buf[RBH_NAME_MAX];\n#ifdef _HAVE_FID\n    char path[RBH_PATH_MAX];\n    BuildFidPath(&p_op->entry_id, path);\n#else\n    char *path;\n    if (ATTR_FSorDB_TEST(p_op, fullpath)) {\n        path = ATTR_FSorDB(p_op, fullpath);\n    } else {\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Entry path is needed for retrieving file info\");\n        return EINVAL;\n    }\n#endif\n\n    DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n               DFID \": Getattr=%u, Getpath=%u, Readlink=%u\"\n               \", Getstatus(%s), Getstripe=%u\"\n#ifdef _LUSTRE\n               \", Getprojid=%u\"\n#endif\n               , PFID(&p_op->entry_id), NEED_GETATTR(p_op) ? 1 : 0,\n               NEED_GETPATH(p_op) ? 1 : 0, NEED_READLINK(p_op) ? 1 : 0,\n               name_status_mask(p_op->fs_attr_need.status, tmp_buf,\n                                sizeof(tmp_buf)), NEED_GETSTRIPE(p_op) ? 1 : 0\n#ifdef _LUSTRE\n                                , NEED_GETPROJID(p_op) ? 1 : 0\n#endif\n);\n\n    /* don't retrieve info which is already fresh */\n    p_op->fs_attr_need =\n        attr_mask_and_not(&p_op->fs_attr_need, &p_op->fs_attrs.attr_mask);\n\n#ifdef HAVE_CHANGELOGS  /* never needed for scans */\n    if (NEED_GETATTR(p_op) && (p_op->extra_info.is_changelog_record)) {\n        struct stat entry_md;\n\n        rc = errno = 0;\n#if defined(_LUSTRE) && defined(_HAVE_FID) && defined(_MDS_STAT_SUPPORT)\n        if (global_config.direct_mds_stat)\n            rc = lustre_mds_stat_by_fid(&p_op->entry_id, &entry_md);\n        else\n#endif\n        if (lstat(path, &entry_md) != 0)\n            rc = -errno;\n\n        /* get entry attributes */\n        if (rc != 0) {\n            if (err_missing(rc)) {\n                DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                           \"Entry %s no longer exists\", path);\n                return rm_record(p_op);\n            } else\n                DisplayLog(LVL_DEBUG, ENTRYPROC_TAG, \"lstat() failed on %s: %s\",\n                           path, strerror(-rc));\n\n            /* If lstat returns an error, drop the log record */\n            return skip_record(p_op);\n        } else if (entry_md.st_nlink == 0) {\n            /* remove pending */\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                       \"Entry %s has nlink=0: remove pending\", path);\n            return rm_record(p_op);\n        }\n\n        /* convert them to internal structure */\n#if defined(_LUSTRE) && defined(_HAVE_FID) && defined(_MDS_STAT_SUPPORT)\n        stat2rbh_attrs(&entry_md, &p_op->fs_attrs,\n                       !global_config.direct_mds_stat);\n#else\n        stat2rbh_attrs(&entry_md, &p_op->fs_attrs, true);\n#endif\n        ATTR_MASK_SET(&p_op->fs_attrs, md_update);\n        ATTR(&p_op->fs_attrs, md_update) = time(NULL);\n\n    }\n    /* getattr needed */\n    if (NEED_GETPATH(p_op)) {\n        if (path_check_update(&p_op->entry_id, path, &p_op->fs_attrs,\n                              p_op->fs_attr_need) == PCR_ORPHAN) {\n            /* ignore entries not in the namespace */\n            return skip_record(p_op);\n        }\n    }\n#endif\n\n    if (entry_proc_conf.detect_fake_mtime\n        && ATTR_FSorDB_TEST(p_op, creation_time)\n        && ATTR_MASK_TEST(&p_op->fs_attrs, last_mod)) {\n        check_and_warn_fake_mtime(p_op);\n    }\n#ifdef _LUSTRE\n    /* getstripe only for files */\n    if (NEED_GETSTRIPE(p_op)\n        && ATTR_FSorDB_TEST(p_op, type)\n        && strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_FILE) != 0) {\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_info);\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_stripe_items);\n    }\n\n    if (NEED_GETSTRIPE(p_op)) {\n        /* get entry stripe */\n        rc = File_GetStripeByPath(path,\n                                  &ATTR(&p_op->fs_attrs, stripe_info),\n                                  &ATTR(&p_op->fs_attrs, stripe_items));\n        if (rc) {\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_info);\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_items);\n        } else {\n            ATTR_MASK_SET(&p_op->fs_attrs, stripe_info);\n            ATTR_MASK_SET(&p_op->fs_attrs, stripe_items);\n        }\n    }   /* get_stripe needed */\n\n    /* projid: currently, only file and dir supported */\n    if (global_config.lustre_projid && NEED_GETPROJID(p_op)\n        && ATTR_FSorDB_TEST(p_op, type)\n        && (!strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_FILE)\n            || !strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_DIR))) {\n        /* file or dir, get project id */\n        rc = lustre_project_get_id(path);\n        if (rc < 0)  {\n            DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                       \"Failed to get lustre projid for %s (%s): error %d\",\n                       path, ATTR_FSorDB(p_op, type), rc);\n        } else {\n            DisplayLog(LVL_FULL, ENTRYPROC_TAG, DFID \": projid=%u\",\n                       PFID(&p_op->entry_id), rc);\n            ATTR_MASK_SET(&p_op->fs_attrs, projid);\n            ATTR(&p_op->fs_attrs, projid) = rc;\n        }\n    }\n#endif\n\n    if (NEED_ANYSTATUS(p_op)) {\n        int i;\n        sm_instance_t *smi;\n        attr_set_t merged_attrs = ATTR_SET_INIT;    /* attrs from FS+DB */\n        attr_set_t new_attrs = ATTR_SET_INIT;   /* attributes + status */\n\n        ATTR_MASK_INIT(&merged_attrs);\n\n        ListMgr_MergeAttrSets(&merged_attrs, &p_op->fs_attrs, 1);\n        ListMgr_MergeAttrSets(&merged_attrs, &p_op->db_attrs, 0);\n\n        /* match policy scopes according to newly set information:\n         * remove needed status from mask and append the updated one. */\n        p_op->fs_attr_need.status &= ~all_status_mask();\n        /* XXX this fails if scope attributes are missing */\n        add_matching_scopes_mask(&p_op->entry_id, &merged_attrs, true,\n                                 &p_op->fs_attr_need.status);\n\n        i = 0;\n        while ((smi = get_sm_instance(i)) != NULL) {\n            /* clean the mask without freeing sm_status */\n            ATTR_MASK_INIT(&new_attrs);\n\n            if (NEED_GETSTATUS(p_op, i)) {\n                if (smi->sm->get_status_func != NULL) {\n                    DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n                               DFID \": retrieving status for policy '%s'\",\n                               PFID(&p_op->entry_id), smi->sm->name);\n                    /* this also check if entry is ignored for this policy */\n                    rc = smi->sm->get_status_func(smi, &p_op->entry_id,\n                                                  &merged_attrs, &new_attrs);\n                    if (err_missing(rc)) {\n                        DisplayLog(LVL_DEBUG, ENTRYPROC_TAG,\n                                   \"Entry %s no longer exists\", path);\n                        /* changelog: an UNLINK event will be raised,\n                         *       so we ignore current record.\n                         * scan: entry will be garbage collected at the end of\n                         *       the scan */\n                        return skip_record(p_op);\n                    } else if (rc != 0) {\n                        DisplayLog(LVL_MAJOR, ENTRYPROC_TAG,\n                                   \"Failed to get status for %s (%s status manager): error %d\",\n                                   path, smi->sm->name, rc);\n                    } else {\n                        /* merge/update attributes */\n                        ListMgr_MergeAttrSets(&p_op->fs_attrs, &new_attrs,\n                                              true);\n                    }\n                    /* free allocated resources, once merged */\n                    ListMgr_FreeAttrs(&new_attrs);\n                }\n            }\n            i++;\n        }\n        /* free allocated structs in merged attributes */\n        ListMgr_FreeAttrs(&merged_attrs);\n    }\n\n    /* readlink only for symlinks */\n    if (NEED_READLINK(p_op) && ATTR_FSorDB_TEST(p_op, type)\n        && strcmp(ATTR_FSorDB(p_op, type), STR_TYPE_LINK) != 0)\n        attr_mask_unset_index(&p_op->fs_attr_need, ATTR_INDEX_link);\n\n    if (NEED_READLINK(p_op)) {\n        ssize_t len = readlink(path, ATTR(&p_op->fs_attrs, link), RBH_PATH_MAX);\n        if (len >= 0) {\n            ATTR_MASK_SET(&p_op->fs_attrs, link);\n\n            /* add final '\\0' on success */\n            if (len >= RBH_PATH_MAX)\n                ATTR(&p_op->fs_attrs, link)[len - 1] = '\\0';\n            else\n                ATTR(&p_op->fs_attrs, link)[len] = '\\0';\n        } else\n            DisplayLog(LVL_MAJOR, ENTRYPROC_TAG, \"readlink failed on %s: %s\",\n                       path, strerror(errno));\n    }\n\n    /** FIXME some special files should be ignored i.e. not inserted in DB. */\n    if (is_lustre_special(p_op))\n        /* drop the entry */\n        return skip_record(p_op);\n\n    /* match fileclasses if specified in config */\n    /* FIXME: check fileclass update parameters */\n    if (entry_proc_conf.match_classes)\n        match_classes(&p_op->entry_id, &p_op->fs_attrs, &p_op->db_attrs);\n\n    /* go to next step */\n    rc = EntryProcessor_Acknowledge(p_op, STAGE_PRE_APPLY, false);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage.\",\n                   rc);\n    return rc;\n}\n\nstatic bool dbop_is_batchable(struct entry_proc_op_t *first,\n                              struct entry_proc_op_t *next,\n                              attr_mask_t *full_attr_mask)\n{\n    if (first->db_op_type != OP_TYPE_INSERT\n        && first->db_op_type != OP_TYPE_UPDATE\n        && first->db_op_type != OP_TYPE_NONE)\n        return false;\n    else if (first->db_op_type != next->db_op_type)\n        return false;\n    /* starting from here, db_op_type is the same for the 2 operations */\n    /* all NOOP operations can be batched */\n    else if (first->db_op_type == OP_TYPE_NONE)\n        return true;\n    /* different masks can be mixed, as long as attributes for each table are\n     * the same or 0. Ask the list manager about that. */\n    else if (lmgr_batch_compat(*full_attr_mask, next->fs_attrs.attr_mask)) {\n        *full_attr_mask =\n            attr_mask_or(full_attr_mask, &next->fs_attrs.attr_mask);\n        return true;\n    } else\n        return false;\n}\n\n/** operation cleaning before the db_apply step */\nint EntryProc_pre_apply(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n\n    /* once set, never change creation time */\n    if (p_op->db_op_type != OP_TYPE_INSERT)\n        ATTR_MASK_UNSET(&p_op->fs_attrs, creation_time);\n\n#ifdef HAVE_CHANGELOGS\n    /* handle nlink. We don't want the values from the filesystem if\n     * we're not doing a scan. */\n    if (p_op->extra_info.is_changelog_record) {\n        CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n\n        if (logrec->cr_type == CL_CREATE) {\n            /* New file. Hardlink is always 1. */\n            ATTR_MASK_SET(&p_op->fs_attrs, nlink);\n            ATTR(&p_op->fs_attrs, nlink) = 1;\n        } else if ((logrec->cr_type == CL_HARDLINK) &&\n                   (ATTR_MASK_TEST(&p_op->db_attrs, nlink))) {\n            /* New hardlink. Add 1 to existing value. Ignore what came\n             * from the FS, since it can be out of sync by now. */\n            ATTR_MASK_SET(&p_op->fs_attrs, nlink);\n            ATTR(&p_op->fs_attrs, nlink) = ATTR(&p_op->db_attrs, nlink) + 1;\n        }\n    }\n#endif\n\n    /* Only update fields that changed */\n    if (p_op->db_op_type == OP_TYPE_UPDATE) {\n        attr_mask_t tmp;\n        attr_mask_t loc_diff_mask =\n            ListMgr_WhatDiff(&p_op->fs_attrs, &p_op->db_attrs);\n\n        /* In scan mode, always keep md_update and path_update,\n         * to avoid their cleaning at the end of the scan.\n         * Also keep name and parent as they are keys in DNAMES table.\n         */\n        attr_mask_t to_keep = {.std =\n                ATTR_MASK_parent_id | ATTR_MASK_name, 0, 0LL };\n\n        /* the mask to be displayed > diff_mask (include to_keep flags) */\n        attr_mask_t display_mask = attr_mask_and(&diff_mask, &loc_diff_mask);\n\n        /* keep fullpath if parent or name changed (friendly display) */\n        if (loc_diff_mask.std & (ATTR_MASK_parent_id | ATTR_MASK_name)) {\n            to_keep.std |= ATTR_MASK_fullpath;\n            display_mask.std |= ATTR_MASK_fullpath;\n        }\n#ifdef HAVE_CHANGELOGS\n        if (!p_op->extra_info.is_changelog_record)\n#endif\n            to_keep.std |= (ATTR_MASK_md_update | ATTR_MASK_path_update);\n\n        /* remove other unchanged attrs + attrs not in db mask */\n        tmp = attr_mask_or(&loc_diff_mask, &to_keep);\n        tmp = attr_mask_or_not(&tmp, &p_op->db_attrs.attr_mask);\n        p_op->fs_attrs.attr_mask =\n            attr_mask_and(&p_op->fs_attrs.attr_mask, &tmp);\n\n#ifdef _LUSTRE\n        if (p_op->db_stripe_ok) {\n            ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_info);\n            if (ATTR_MASK_TEST(&p_op->fs_attrs, stripe_items)) {\n                ATTR_MASK_UNSET(&p_op->fs_attrs, stripe_items);\n                free_stripe_items(&ATTR(&p_op->fs_attrs, stripe_items));\n            }\n        }\n#endif\n\n        /* FIXME: free cleared attributes */\n\n        /* SQL req optimizations:\n         * if update policy == always and fileclass is not changed,\n         * don't set update timestamp.\n         */\n        if ((updt_params.fileclass.when == UPDT_ALWAYS)\n            && !ATTR_MASK_TEST(&p_op->fs_attrs, fileclass))\n            ATTR_MASK_UNSET(&p_op->fs_attrs, class_update);\n\n        /* nothing changed => noop */\n        if (attr_mask_is_null(p_op->fs_attrs.attr_mask)) {\n            /* no op */\n            p_op->db_op_type = OP_TYPE_NONE;\n        } else if (!attr_mask_is_null(attr_mask_and(&loc_diff_mask,\n                                                    &diff_mask))) {\n            /* something changed in diffmask */\n            GString *attrchg = g_string_new(\"\");\n\n            /* attr from DB */\n            if (!attr_mask_is_null(display_mask))\n                print_attrs(attrchg, &p_op->db_attrs, display_mask, 1);\n\n            printf(\"-\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n\n            /* attr from FS */\n            print_attrs(attrchg, &p_op->fs_attrs, display_mask, 1);\n            printf(\"+\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrchg->str);\n\n            g_string_free(attrchg, TRUE);\n        }\n    } else if (!attr_mask_is_null(diff_mask)) {\n        if (p_op->db_op_type == OP_TYPE_INSERT) {\n            GString *attrnew = g_string_new(NULL);\n\n            print_attrs(attrnew, &p_op->fs_attrs,\n                        attr_mask_and(&p_op->fs_attrs.attr_mask, &diff_mask),\n                        1);\n\n            printf(\"++\" DFID \" %s\\n\", PFID(&p_op->entry_id), attrnew->str);\n            g_string_free(attrnew, TRUE);\n        } else if ((p_op->db_op_type == OP_TYPE_REMOVE_LAST)\n                   || (p_op->db_op_type == OP_TYPE_REMOVE_ONE)\n                   || (p_op->db_op_type == OP_TYPE_SOFT_REMOVE)) {\n            if (ATTR_FSorDB_TEST(p_op, fullpath))\n                printf(\"--\" DFID \" path=%s\\n\", PFID(&p_op->entry_id),\n                       ATTR_FSorDB(p_op, fullpath));\n            else\n                printf(\"--\" DFID \"\\n\", PFID(&p_op->entry_id));\n        }\n    }\n    attr_mask_unset_readonly(&p_op->fs_attrs.attr_mask);\n\n    rc = EntryProcessor_Acknowledge(p_op, STAGE_DB_APPLY, false);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error acknowledging stage %s\",\n                   stage_info->stage_name);\n    return rc;\n}\n\n/**\n * Perform a single operation on the database.\n */\nint EntryProc_db_apply(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n\n    /* insert to DB */\n    switch (p_op->db_op_type) {\n    case OP_TYPE_NONE:\n        /* noop */\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"NoOp(\" DFID \")\",\n                   PFID(&p_op->entry_id));\n        rc = 0;\n        break;\n\n    case OP_TYPE_INSERT:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Insert(\" DFID \")\",\n                   PFID(&p_op->entry_id));\n        rc = ListMgr_Insert(lmgr, &p_op->entry_id, &p_op->fs_attrs, false);\n        break;\n\n    case OP_TYPE_UPDATE:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"Update(\" DFID \")\",\n                   PFID(&p_op->entry_id));\n        rc = ListMgr_Update(lmgr, &p_op->entry_id, &p_op->fs_attrs);\n        break;\n\n    case OP_TYPE_REMOVE_ONE:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"RemoveOne(\" DFID \")\",\n                   PFID(&p_op->entry_id));\n        rc = ListMgr_Remove(lmgr, &p_op->entry_id, &p_op->fs_attrs, false);\n        break;\n\n    case OP_TYPE_REMOVE_LAST:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"RemoveLast(\" DFID \")\",\n                   PFID(&p_op->entry_id));\n        rc = ListMgr_Remove(lmgr, &p_op->entry_id, &p_op->fs_attrs, true);\n        break;\n\n    case OP_TYPE_SOFT_REMOVE:\n\n        if (log_config.debug_level >= LVL_DEBUG) {\n            attr_mask_t tmp = null_mask;\n            attr_mask_t tmp2 = null_mask;\n            GString *gs = g_string_new(NULL);\n\n            tmp.std = ATTR_MASK_fullpath | ATTR_MASK_parent_id | ATTR_MASK_name;\n            tmp2 = sm_softrm_mask();\n            tmp = attr_mask_or(&tmp, &tmp2);\n\n            print_attrs(gs, &p_op->fs_attrs, tmp, true);\n            DisplayLog(LVL_DEBUG, ENTRYPROC_TAG, \"SoftRemove(\" DFID \",%s)\",\n                       PFID(&p_op->entry_id), gs->str);\n            g_string_free(gs, TRUE);\n        }\n\n        /* FIXME get remove time from changelog */\n        ATTR_MASK_SET(&p_op->fs_attrs, rm_time);\n        ATTR(&p_op->fs_attrs, rm_time) = time(NULL);\n        rc = ListMgr_SoftRemove(lmgr, &p_op->entry_id, &p_op->fs_attrs);\n        break;\n\n    default:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Unhandled DB operation type: %d\",\n                   p_op->db_op_type);\n        rc = -1;\n    }\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error %d performing database operation: %s.\", rc,\n                   lmgr_err2str(rc));\n\n    /* Acknowledge the operation if there is a callback */\n#ifdef HAVE_CHANGELOGS\n    if (p_op->callback_func != NULL)\n        rc = EntryProcessor_Acknowledge(p_op, STAGE_CHGLOG_CLR, false);\n    else\n#endif\n        rc = EntryProcessor_Acknowledge(p_op, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    return rc;\n}\n\n/**\n * Perform a batch of operations on the database.\n */\nint EntryProc_db_batch_apply(struct entry_proc_op_t **ops, int count,\n                             lmgr_t *lmgr)\n{\n    int i, rc = 0;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[ops[0]->pipeline_stage];\n    entry_id_t **ids = NULL;\n    attr_set_t **attrs = NULL;\n\n    /* allocate arrays of ids and attrs */\n    ids = MemCalloc(count, sizeof(*ids));\n    if (!ids)\n        return -ENOMEM;\n    attrs = MemCalloc(count, sizeof(*attrs));\n    if (!attrs) {\n        rc = -ENOMEM;\n        goto free_ids;\n    }\n    for (i = 0; i < count; i++) {\n        ids[i] = &ops[i]->entry_id;\n        attrs[i] = &ops[i]->fs_attrs;\n    }\n\n    /* insert to DB */\n    switch (ops[0]->db_op_type) {\n    case OP_TYPE_NONE:\n        /* noop */\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"NoOp(%u ops: \" DFID \"...)\", count,\n                   PFID(ids[0]));\n        rc = 0;\n        break;\n    case OP_TYPE_INSERT:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"BatchInsert(%u ops: \" DFID \"...)\",\n                   count, PFID(ids[0]));\n        rc = ListMgr_BatchInsert(lmgr, ids, attrs, count, false);\n        break;\n    case OP_TYPE_UPDATE:\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG, \"BatchUpdate(%u ops: \" DFID \"...)\",\n                   count, PFID(ids[0]));\n        rc = ListMgr_BatchInsert(lmgr, ids, attrs, count, true);\n        break;\n    default:\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Unexpected operation for batch op: %d\", ops[0]->db_op_type);\n        rc = -1;\n    }\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                   \"Error %d performing batch database operation: %s.\", rc,\n                   lmgr_err2str(rc));\n\n    /* Acknowledge the operation if there is a callback */\n#ifdef HAVE_CHANGELOGS\n    if (ops[0]->callback_func != NULL)\n        rc = EntryProcessor_AcknowledgeBatch(ops, count, STAGE_CHGLOG_CLR,\n                                             false);\n    else\n#endif\n        rc = EntryProcessor_AcknowledgeBatch(ops, count, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    MemFree(attrs);\n free_ids:\n    MemFree(ids);\n    return rc;\n}\n\n#ifdef HAVE_CHANGELOGS\nint EntryProc_chglog_clr(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n    CL_REC_TYPE *logrec = p_op->extra_info.log_record.p_log_rec;\n\n    if (p_op->extra_info.is_changelog_record)\n        DisplayLog(LVL_FULL, ENTRYPROC_TAG,\n                   \"stage %s - record #%llu - id=\" DFID, stage_info->stage_name,\n                   logrec->cr_index, PFID(&p_op->entry_id));\n\n    if (p_op->callback_func) {\n        /* if operation was committed, Perform callback to info collector */\n        rc = p_op->callback_func(lmgr, p_op, p_op->callback_param);\n\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error %d performing callback at stage %s.\", rc,\n                       stage_info->stage_name);\n    }\n\n    /* Acknowledge the operation and remove it from pipeline */\n    rc = EntryProcessor_Acknowledge(p_op, -1, true);\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    return rc;\n}\n#endif\n\nstatic void mass_rm_cb(const entry_id_t *p_id)\n{\n    printf(\"--\" DFID \"\\n\", PFID(p_id));\n}\n\nint EntryProc_rm_old_entries(struct entry_proc_op_t *p_op, lmgr_t *lmgr)\n{\n    int rc;\n    const pipeline_stage_t *stage_info =\n        &entry_proc_pipeline[p_op->pipeline_stage];\n    lmgr_filter_t filter;\n    filter_value_t val;\n    rm_cb_func_t cb = NULL;\n\n    /* callback func for diff display */\n    if (!attr_mask_is_null(diff_mask))\n        cb = mass_rm_cb;\n\n    /* If gc_entries or gc_names are not set,\n     * this is just a special op to wait for pipeline flush.\n     * => don't clean old entries */\n    if (p_op->gc_entries || p_op->gc_names) {\n        lmgr_simple_filter_init(&filter);\n\n        if (p_op->gc_entries) {\n            /* remove entries from all tables that have not been seen during\n             * the scan */\n            val.value.val_uint = ATTR(&p_op->fs_attrs, md_update);\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_md_update,\n                                   LESSTHAN_STRICT, val, 0);\n        }\n\n        if (p_op->gc_names) {\n            /* use the same timestamp for cleaning paths that have not been\n             * seen during the scan */\n            val.value.val_uint = ATTR(&p_op->fs_attrs, md_update);\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_path_update,\n                                   LESSTHAN_STRICT, val, 0);\n        }\n\n        /* partial scan: remove non-updated entries from a subset of the\n         * namespace */\n        if (ATTR_MASK_TEST(&p_op->fs_attrs, fullpath)) {\n            char tmp[RBH_PATH_MAX];\n            strcpy(tmp, ATTR(&p_op->fs_attrs, fullpath));\n            strcat(tmp, \"/*\");\n            val.value.val_str = tmp;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_fullpath, LIKE, val, 0);\n        }\n\n        /* force commit after this operation */\n        ListMgr_ForceCommitFlag(lmgr, true);\n\n        /* remove entries listed in previous scans */\n        if (has_deletion_policy())\n            /* @TODO fix for dirs */\n            rc = ListMgr_MassSoftRemove(lmgr, &filter, time(NULL), cb);\n        else\n            rc = ListMgr_MassRemove(lmgr, &filter, cb);\n\n        lmgr_simple_filter_free(&filter);\n\n        if (rc)\n            DisplayLog(LVL_CRIT, ENTRYPROC_TAG,\n                       \"Error: ListMgr MassRemove operation failed with code %d: %s\",\n                       rc, lmgr_err2str(rc));\n    }\n\n    /* must call callback function in any case, to unblock the scan */\n    if (p_op->callback_func) {\n        /* Perform callback to info collector */\n        p_op->callback_func(lmgr, p_op, p_op->callback_param);\n    }\n    // update last scan end time moved to callback\n\n    /* unset force commit flag */\n    ListMgr_ForceCommitFlag(lmgr, false);\n\n    rc = EntryProcessor_Acknowledge(p_op, -1, true);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, ENTRYPROC_TAG, \"Error %d acknowledging stage %s.\",\n                   rc, stage_info->stage_name);\n\n    return rc;\n\n}\n"
  },
  {
    "path": "src/entry_processor/test_hash.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n *\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2018 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#include <stdio.h>\n#include <assert.h>\n\nunsigned int max_count_to_hash_size(unsigned int max_count);\nvoid *log_config;\n\nint main(int argc, char **argv)\n{\n    int i;\n\n    for (i = 1; i < 1024 * 1024 * 1024; i <<= 1) {\n        unsigned int s = max_count_to_hash_size(i);\n        fprintf(stderr, \"count2size(%d) = %u\\n\", i, s);\n        assert(s >= i || s == 32000251);\n    }\n    return 0;\n}\n"
  },
  {
    "path": "src/fs_scan/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nnoinst_LTLIBRARIES=libfsscan.la\n\nlibfsscan_la_SOURCES= fs_scan.c  fs_scan_main.c task_stack_mngmt.c task_tree_mngmt.c \\\n\t\t      fs_scan.h  fs_scan_types.h  task_stack_mngmt.h  task_tree_mngmt.h\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/fs_scan/fs_scan.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  Filesystem scan module.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"global_config.h\"\n#include \"entry_processor.h\"\n#include \"fs_scan.h\"\n#include \"rbh_logs.h\"\n#include \"RW_Lock.h\"\n#include \"rbh_misc.h\"\n#include \"list_mgr.h\"\n\n#include \"task_stack_mngmt.h\"\n#include \"task_tree_mngmt.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_basename.h\"\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <sys/time.h>   /* for gettimeofday */\n#include <sys/utsname.h>\n\n#include <unistd.h>\n#include <linux/types.h>\n#include <linux/unistd.h>\n#include <errno.h>\n#include <syscall.h>\n\n#include <string.h>\n#include <fcntl.h>\n\nfs_scan_config_t fs_scan_config;\nrun_flags_t fsscan_flags = 0;\nconst char *partial_scan_root = NULL;\n\n#define fsscan_once (fsscan_flags & RUNFLG_ONCE)\n#define fsscan_nogc (fsscan_flags & RUNFLG_NO_GC)\n\nstatic bool is_lustre_fs = false;\nstatic bool is_first_scan = false;\n\n/* information about scanning thread */\n\ntypedef struct thread_scan_info__ {\n    unsigned int index;\n    pthread_t thread_scan;\n    time_t last_action;\n\n    /* NULL if no task is running */\n    robinhood_task_t *current_task;\n\n    /* flag for forcing thread scan to stop */\n    bool force_stop;\n\n    /* entries handled since scan started */\n    unsigned int entries_handled;\n    unsigned int entries_errors;\n\n    /* time consumed for handling entries */\n    struct timeval time_consumed;\n    struct timeval last_processing_time;\n\n} thread_scan_info_t;\n\n/**\n * internal variables\n */\n\nstatic thread_scan_info_t *thread_list = NULL;\n/* nb_threads, fs_path, fs_type: from configuration */\n\nstatic dev_t fsdev; /* for STAY_IN_FS mode */\n\n/* stack of scan tasks */\nstatic task_stack_t tasks_stack;\n\n/* pointer to mother task (NULL if no scan is running) */\nrobinhood_task_t *root_task = NULL;\n\n/* statistics */\nstatic time_t last_scan_time = 0;\nstatic unsigned int last_duration = 0;\nstatic bool last_scan_complete = false;\nstatic time_t scan_start_time = 0;\n\nstatic struct timeval accurate_start_time = { 0, 0 };\n\nstatic unsigned int nb_hang_total = 0;\n\n/* used for adaptive scan interval */\nstatic double usage_max = 50.0; /* default: 50% */\nstatic time_t scan_interval = 0;\n\n/* lock on scan stats and other information.\n * This lock must always be taken AFTER the list lock\n * at the end of a scan.\n */\nstatic pthread_mutex_t lock_scan;\n\n/* threads behavior */\nstatic pthread_attr_t thread_attrs;\n\n/* condition about DB special operations when starting/terminating FS scan */\nstatic pthread_cond_t special_db_op_cond = PTHREAD_COND_INITIALIZER;\nstatic pthread_mutex_t special_db_op_lock = PTHREAD_MUTEX_INITIALIZER;\nstatic bool waiting_db_op = false;\n\nstatic inline void set_db_wait_flag(void)\n{\n    P(special_db_op_lock);\n    waiting_db_op = true;\n    V(special_db_op_lock);\n}\n\nstatic void wait_for_db_callback(void)\n{\n    P(special_db_op_lock);\n    while (waiting_db_op)\n        pthread_cond_wait(&special_db_op_cond, &special_db_op_lock);\n    V(special_db_op_lock);\n}\n\nstatic int db_special_op_callback(lmgr_t *lmgr, struct entry_proc_op_t *p_op,\n                                  void *arg)\n{\n    char timestamp[128];\n\n    DisplayLog(LVL_VERB, FSSCAN_TAG,\n               \"Callback from database for operation '%s'\", (char *)arg);\n\n    /* Update end time for pipeline processing */\n    if (lmgr) {\n        sprintf(timestamp, \"%lu\", (unsigned long)time(NULL));\n        ListMgr_SetVar(lmgr, LAST_SCAN_PROCESSING_END_TIME, timestamp);\n    }\n\n    P(special_db_op_lock);\n    waiting_db_op = false;\n    pthread_cond_signal(&special_db_op_cond);\n    V(special_db_op_lock);\n    return 0;\n}\n\n/* condition about end of 'one-shot' FS_Scan */\nstatic bool scan_finished = false;\nstatic pthread_cond_t one_shot_cond = PTHREAD_COND_INITIALIZER;\nstatic pthread_mutex_t one_shot_lock = PTHREAD_MUTEX_INITIALIZER;\n\nstatic inline void signal_scan_finished(void)\n{\n    P(one_shot_lock);\n    scan_finished = true;\n    pthread_cond_broadcast(&one_shot_cond);\n    V(one_shot_lock);\n}\n\nstatic inline bool all_threads_idle(void)\n{\n    unsigned int i;\n    for (i = 0; i < fs_scan_config.nb_threads_scan; i++)\n        if (thread_list[i].current_task)\n            return false;\n\n    return true;\n}\n\nvoid wait_scan_finished(void)\n{\n    P(one_shot_lock);\n    while (!scan_finished)\n        pthread_cond_wait(&one_shot_cond, &one_shot_lock);\n    V(one_shot_lock);\n}\n\n/**\n * Reset Scan thread statistics (before and after a scan)\n */\nstatic void ResetScanStats(bool do_lock)\n{\n    int i;\n\n    if (do_lock)\n        P(lock_scan);\n\n    for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n        thread_list[i].entries_handled = 0;\n        thread_list[i].entries_errors = 0;\n        timerclear(&thread_list[i].time_consumed);\n        timerclear(&thread_list[i].last_processing_time);\n    }\n\n    if (do_lock)\n        V(lock_scan);\n}\n\nstatic bool ignore_entry(char *fullpath, char *name, unsigned int depth,\n                         struct stat *p_stat)\n{\n    entry_id_t tmpid;\n    attr_set_t tmpattr;\n    unsigned int i;\n    policy_match_t rc = POLICY_NO_MATCH;\n\n#ifdef _HAVE_FID\n    const char *dot_lu = get_dot_lustre_dir();\n\n    if (strncmp(fullpath, dot_lu, strlen(dot_lu) + 1) == 0) {\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG, \"Ignoring '%s'\", fullpath);\n        return true;\n    }\n\n    /* We are not supposed to scan \".lustre\" directory,\n     * but check just in case... */\n    const char *fid_dir = get_fid_dir();\n\n    if (strncmp(fullpath, fid_dir, strlen(fid_dir) + 1) == 0) {\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG, \"Ignoring '%s'\", fullpath);\n        return true;\n    }\n#endif\n\n    /* build temporary attr set for testing ignore condition */\n    ATTR_MASK_INIT(&tmpattr);\n\n    ATTR_MASK_SET(&tmpattr, name);\n    strcpy(ATTR(&tmpattr, name), name);\n\n    ATTR_MASK_SET(&tmpattr, fullpath);\n    strcpy(ATTR(&tmpattr, fullpath), fullpath);\n\n    ATTR_MASK_SET(&tmpattr, depth);\n    ATTR(&tmpattr, depth) = depth;\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    stat2rbh_attrs(p_stat, &tmpattr,\n                   !(is_lustre_fs && global_config.direct_mds_stat));\n#else\n    stat2rbh_attrs(p_stat, &tmpattr, true);\n#endif\n\n    /* Set entry id */\n#ifndef _HAVE_FID\n    tmpid.inode = p_stat->st_ino;\n    tmpid.fs_key = get_fskey();\n#endif\n\n    rc = POLICY_NO_MATCH;\n    for (i = 0; i < fs_scan_config.ignore_count; i++) {\n        switch (entry_matches\n                (&tmpid, &tmpattr, &fs_scan_config.ignore_list[i].bool_expr,\n                 NULL, NULL)) {\n        case POLICY_MATCH:\n            return true;\n\n        case POLICY_MISSING_ATTR:\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"Attribute is missing for checking ignore rule\");\n            if (rc != POLICY_ERR)\n                rc = POLICY_MISSING_ATTR;\n            break;\n\n        case POLICY_ERR:\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"An error occurred when checking ignore rule\");\n            rc = POLICY_ERR;\n            break;\n\n        case POLICY_NO_MATCH:\n            /* continue testing other ignore rules */\n            break;\n        }\n    }\n\n    return (rc != POLICY_NO_MATCH);\n}\n\n/* Terminate a filesystem scan (called by the thread\n * that terminates the last task of scan, and merge\n * itself to the mother task).\n * This function invalidates all entries that have not\n * been updated during the scan.\n * It also updates scan dates and root task.\n */\nstatic int TerminateScan(int scan_complete, time_t end)\n{\n    char timestamp[128];\n    char tmp[1024];\n    lmgr_t lmgr;\n    bool no_db = false;\n\n    if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS) {\n        no_db = true;\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                   \"WARNING: won't be able to update scan stats\");\n    }\n\n    /* store the last scan end date */\n    if (!no_db) {\n        sprintf(timestamp, \"%lu\", (unsigned long)end);\n        ListMgr_SetVar(&lmgr, LAST_SCAN_END_TIME, timestamp);\n    }\n\n    if (!no_db) {\n        /* invoke FSScan_StoreStats, so stats are updated at least once during\n         * the scan */\n        FSScan_StoreStats(&lmgr);\n        /* and update the scan status */\n        if (partial_scan_root) {\n            snprintf(tmp, sizeof(tmp), \"%s (%s)\", SCAN_STATUS_PARTIAL,\n                     partial_scan_root);\n            ListMgr_SetVar(&lmgr, LAST_SCAN_STATUS, tmp);\n        } else\n            ListMgr_SetVar(&lmgr, LAST_SCAN_STATUS,\n                           scan_complete ? SCAN_STATUS_DONE :\n                           SCAN_STATUS_INCOMPLETE);\n\n        /* no other DB actions, close the connection */\n        ListMgr_CloseAccess(&lmgr);\n    }\n\n    /* if scan is incomplete (aborted or failed), don't remove old entries\n     * in DB. */\n    if (scan_complete) {\n        entry_proc_op_t *op;\n\n        /* final DB operation: remove entries with\n         * md_update < scan_start_time */\n        op = EntryProcessor_Get();\n        if (!op) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"CRITICAL ERROR: Failed to allocate a new op\");\n            return -ENOMEM;\n        }\n\n        op->pipeline_stage = entry_proc_descr.GC_OLDENT;\n\n        /* set callback */\n        op->callback_func = db_special_op_callback;\n        op->callback_param = (void *)\"Remove obsolete entries\";\n\n        ATTR_MASK_INIT(&op->fs_attrs);\n\n        /* if this is an initial scan, don't rm old entries\n         * (but flush pipeline still) */\n        if (fsscan_nogc || (is_first_scan && !partial_scan_root)) {\n            op->gc_entries = 0;\n            op->gc_names = 0;\n            op->callback_param = (void *)\"End of flush\";\n        } else {\n            /* clean names not seen during the scan */\n            op->gc_names = 1;\n\n            /* If we care about deleted entries and the scan was partial,\n             * it is dangerous to clean entries because files may have been\n             * moved from one part of the namespace to another.\n             */\n            if (partial_scan_root && has_deletion_policy())\n                op->gc_entries = 0;\n            else\n                op->gc_entries = 1;\n\n            /* set the timestamp of scan in (md_update attribute) */\n            ATTR_MASK_SET(&op->fs_attrs, md_update);\n            ATTR(&op->fs_attrs, md_update) = scan_start_time;\n        }\n\n        /* set root (if partial scan) */\n        if (partial_scan_root) {\n            ATTR_MASK_SET(&op->fs_attrs, fullpath);\n            strcpy(ATTR(&op->fs_attrs, fullpath), partial_scan_root);\n        }\n\n        /* set wait db flag */\n        set_db_wait_flag();\n\n#ifndef _BENCH_SCAN\n        /* Push directory to the pipeline */\n        EntryProcessor_Push(op);\n        wait_for_db_callback();\n#else\n        EntryProcessor_Release(op);\n#endif\n    }\n\n    /* take a lock on scan info */\n    P(lock_scan);\n\n    /* reset threads stats */\n    ResetScanStats(false);\n\n    /* reinitialize scan status */\n    last_scan_complete = scan_complete;\n    last_scan_time = end;\n    last_duration = end - scan_start_time;\n    scan_start_time = 0;\n\n    timerclear(&accurate_start_time);\n\n    root_task = NULL;\n\n    /* release the lock */\n    V(lock_scan);\n\n    if (partial_scan_root)\n        DisplayLog(LVL_EVENT, FSSCAN_TAG, \"File list of %s has been updated\",\n                   partial_scan_root);\n    else\n        DisplayLog(LVL_EVENT, FSSCAN_TAG, \"File list of %s has been updated\",\n                   global_config.fs_path);\n\n    /* sending batched alerts */\n    DisplayLog(LVL_VERB, FSSCAN_TAG, \"Sending batched alerts, if any\");\n    Alert_EndBatching();\n\n    if (scan_complete && fs_scan_config.completion_command != NULL) {\n        char *descr = NULL;\n        char **cmd;\n        char *log_cmd;\n        int rc;\n\n        /* substitute special args in completion command.\n         * only use global std parameters (no entry attrs, nor action params,\n         * nor additional specific parameters).\n         */\n        if (asprintf(&descr, \"scan completion command '%s'\",\n                     fs_scan_config.completion_command[0]) < 0) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"CRITICAL ERROR: Failed to allocate scan completion command string\");\n            return -ENOMEM;\n        }\n\n        rc = subst_shell_params(fs_scan_config.completion_command, descr,\n                                NULL, NULL, NULL, NULL, NULL, true, &cmd);\n        free(descr);\n        if (rc) {\n            log_cmd = concat_cmd(fs_scan_config.completion_command);\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"Invalid scan completion command: %s\", log_cmd);\n            free(log_cmd);\n            /* return rc? */\n        } else {\n            log_cmd = concat_cmd(cmd);\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"Executing scan completion command: %s\", log_cmd);\n            free(log_cmd);\n\n            execute_shell_command(cmd, cb_stderr_to_log, (void *)LVL_EVENT);\n            g_strfreev(cmd);\n        }\n    }\n\n    if (fsscan_once)\n        signal_scan_finished();\n\n    FlushLogs();\n\n    return 0;\n\n}\n\n/**\n * Function for terminating a task\n * and merging recursively with parent terminated tasks.\n */\nstatic int RecursiveTaskTermination(thread_scan_info_t *p_info,\n                                    robinhood_task_t *p_task,\n                                    bool bool_scan_complete)\n{\n    int st;\n    bool bool_termine;\n    robinhood_task_t *current_task = p_task;\n\n    /* notify of current action (for watchdog) */\n    p_info->last_action = time(NULL);\n\n    /* tag itself as terminated */\n    bool_termine = FlagTaskAsFinished(current_task);\n\n    if (bool_termine) {\n        robinhood_task_t *maman;\n        bool bool_termine_mere;\n\n        do {\n            DisplayLog(LVL_FULL, FSSCAN_TAG,\n                       \"%s is finished and has no child left => merging to the parent task\",\n                       current_task->path);\n\n            /* No chance that another thread has a lock on the current task,\n             * because all the children tasks are terminated.\n             * We are the last thread to handle it.\n             */\n            maman = current_task->parent_task;\n\n            if (maman != NULL) {\n                /* removes this task from parent's sub-task list */\n                bool_termine_mere = RemoveChildTask(maman, current_task);\n            } else {    /* manage parent task */\n\n                struct timeval fin_precise;\n                struct timeval duree_precise;\n                unsigned int i, count, err_count;\n\n                gettimeofday(&fin_precise, NULL);\n\n                timersub(&fin_precise, &accurate_start_time, &duree_precise);\n\n                /* End of mother task, compute and display summary */\n                bool_termine_mere = true;\n                count = 0;\n                err_count = 0;\n\n                for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n                    count += thread_list[i].entries_handled;\n                    err_count += thread_list[i].entries_errors;\n                }\n\n                DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                           \"%s of %s %s, %u entries found (%u errors). \"\n                           \"Duration = %ld.%02lds\",\n                           bool_scan_complete ? \"Full scan\" : \"Scan\",\n                           partial_scan_root ? partial_scan_root :\n                           global_config.fs_path,\n                           bool_scan_complete ? \"completed\" : \"aborted\", count,\n                           err_count, duree_precise.tv_sec,\n                           duree_precise.tv_usec / 10000);\n\n                DisplayLog(LVL_EVENT, FSSCAN_TAG, \"Flushing pipeline...\");\n\n                /* merge global scan information */\n                /** @TODO better completion boolean management:\n                 * also check child tasks completion */\n                st = TerminateScan(bool_scan_complete, time(NULL));\n\n                if (st) {\n                    DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                               \"CRITICAL ERROR: TerminateScan returned %d\", st);\n                    return st;\n                }\n\n            }\n\n            /* this thread now manages parent task */\n            p_info->current_task = maman;\n\n            /* notify of current activity (for watchdog) */\n            p_info->last_action = time(NULL);\n\n            /* free the task */\n            DisplayLog(LVL_FULL, FSSCAN_TAG, \"Freeing task %s fd %d\",\n                       current_task->path, current_task->fd);\n            FreeTask(current_task);\n\n            current_task = maman;\n\n        }\n        while (bool_termine_mere && (current_task != NULL));\n\n        /* mission complete! */\n        p_info->current_task = NULL;\n\n    } else {\n        /* The thread leave this task running,\n         * because there are still sub-tasks.\n         * Go and manage another task from taskpool\n         */\n        p_info->current_task = NULL;\n        DisplayLog(LVL_FULL, FSSCAN_TAG,\n                   \"%s finished, but there are still child tasks\",\n                   current_task->path);\n    }\n\n    /* notify of current activity (for watchdog) */\n    p_info->last_action = time(NULL);\n\n    return 0;\n\n}   /* RecursiveTaskTermination */\n\nstatic inline int check_entry_dev(dev_t entry_dev, dev_t *root_dev,\n                                  const char *path, bool is_root)\n{\n    /* Check that the entry is on the same device as the filesystem we manage.\n     * (prevent from mountpoint traversal).\n     */\n    if (entry_dev != *root_dev) {\n        struct stat root_md;\n        /* is the FS root changed: file system may have been remounted.\n         * else: the entry is not in the same filesystem\n         */\n        /* 1) check fs root dev_id (use stat as FS mount point maybe a\n         * symlink) */\n        if (stat(global_config.fs_path, &root_md) == -1) {\n            int rc = -errno;\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"stat failed on %s: %s\", global_config.fs_path,\n                       strerror(-rc));\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR accessing FileSystem: EXITING.\");\n            Exit(rc);\n        }\n        if (root_md.st_dev != *root_dev) {\n            /* manage dev id change after umount/mount */\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"WARNING: Filesystem device id changed (old=%\" PRI_DT\n                       \", new=%\" PRI_DT \"): \"\n                       \"checking if it has been remounted\", *root_dev,\n                       root_md.st_dev);\n            if (ResetFS()) {\n                DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                           \"Filesystem was unmounted!!! EXITING!\");\n                Exit(1);\n            }\n            /* update current root_dev */\n            *root_dev = get_fsdev();\n        }\n        /* else: root is still the same */\n\n        /* entry_dev == *root_dev => OK: the entry is in the root filesystem */\n        if (entry_dev != *root_dev) {\n            /* if new root dev != just retrieved root dev\n             * a remount occurred while we were checking.\n             * Return error so the caller update its dev.\n             */\n            if (is_root)\n                return -1;\n\n            if (global_config.stay_in_fs) {\n                DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                           \"%s (0x%.8\" PRI_DT\n                           \") is in a filesystem different from root (0x%.8\"\n                           PRI_DT \"), entry ignored\", path, entry_dev,\n                           *root_dev);\n                return -1;\n            } else {\n                /* TODO: what fs_key for this entry??? */\n                DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                           \"%s (0x%.8\" PRI_DT\n                           \") is in a filesystem different from root (0x%.8\"\n                           PRI_DT\n                           \"), but 'stay_in_fs' parameter is disabled: processing entry anyhow\",\n                           path, entry_dev, *root_dev);\n            }\n        }\n    }\n    return 0;\n}\n\n#ifndef _NO_AT_FUNC\n\nstatic bool noatime_permitted = true;\n\nstatic int openat_noatime(int pfd, const char *name, int rddir)\n{\n    int fd = -1;\n    int flags = 0;\n    bool had_eperm = false;\n\n    /* is it for readdir? */\n    if (rddir)\n        flags = O_RDONLY | O_DIRECTORY;\n    else\n        flags = O_RDONLY | O_NONBLOCK | O_NOFOLLOW;\n\n    if (noatime_permitted) {\n        /* try to open with NOATIME flag */\n        fd = openat(pfd, name, flags | O_NOATIME);\n        if ((fd < 0) && (errno == EPERM))\n            had_eperm = true;\n    }\n    if (fd < 0)\n        fd = openat(pfd, name, flags);\n\n    /* openat successful but not with NOATIME => no longer use this flag */\n    if (had_eperm && (fd >= 0)) {\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                   \"openat failed with O_NOATIME, but was successful without it: disabling NOATIME.\");\n        noatime_permitted = false;\n    }\n\n    return fd;\n}\n\nstatic int open_noatime(const char *path, int rddir)\n{\n    int fd = -1;\n    int flags = 0;\n    bool had_eperm = false;\n\n    /* is it for readdir? */\n    if (rddir)\n        flags = O_RDONLY | O_DIRECTORY;\n    else\n        flags = O_RDONLY | O_NONBLOCK | O_NOFOLLOW;\n\n    if (noatime_permitted) {\n        /* try to open with NOATIME flag */\n        fd = open(path, flags | O_NOATIME);\n        if ((fd < 0) && (errno == EPERM))\n            had_eperm = true;\n    }\n\n    if (fd < 0)\n        fd = open(path, flags);\n\n    /* open successful but not with NOATIME => no longer use this flag */\n    if (had_eperm && (fd >= 0)) {\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                   \"open failed with O_NOATIME, but was successful without it: disabling NOATIME.\");\n        noatime_permitted = false;\n    }\n\n    return fd;\n}\n#endif\n\n/** disable GC if a transient directory error occurred */\nstatic void check_dir_error(int rc)\n{\n    if (rc != 0 && abs(rc) != ENOENT && abs(rc) != ESTALE) {\n        /* If we cannot read the directory, we must avoid dropping all\n         * its entries from the DB => Switch to NO_GC mode. */\n        fsscan_flags |= RUNFLG_NO_GC;\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"Disabling GC because the namespace can't be fully scanned\");\n    }\n}\n\nstatic int create_child_task(const char *childpath, struct stat *inode,\n                             robinhood_task_t *parent,\n                             const char *scan_root,\n                             const char *entryname)\n{\n    robinhood_task_t *p_task;\n    int rc = 0;\n\n    p_task = CreateTask();\n\n    if (p_task == NULL) {\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"CRITICAL ERROR: task creation failed\");\n        return -1;\n    }\n\n    p_task->parent_task = parent;\n    /* propagate partial_scan_root, unless it is specified */\n    p_task->partial_scan_root = scan_root ? scan_root :\n                                    parent->partial_scan_root;\n    rh_strncpy(p_task->path, childpath, sizeof(p_task->path));\n    if (entryname)\n        rh_strncpy(p_task->relpath, entryname, sizeof(p_task->relpath));\n    else\n        assert(p_task->parent_task->fd == -1);\n\n    /* set parent id */\n    if ((rc = path2id(childpath, &p_task->dir_id, inode)) != 0)\n        goto out_free;\n\n    p_task->dir_md = *inode;\n    p_task->depth = parent->depth + 1;\n    p_task->task_finished = false;\n\n    /* add the task to the parent's subtask list */\n    AddChildTask(parent, p_task);\n\n    /* insert task to the stack */\n    InsertTask_to_Stack(&tasks_stack, p_task);\n    return 0;\n\n out_free:\n    FreeTask(p_task);\n    return rc;\n}\n\nstatic int stat_entry(const char *path, const char *name, int parentfd,\n                      struct stat *inode)\n{\n#ifndef _NO_AT_FUNC\n    /* if called for a directory between root and partial_scan_root */\n    if (parentfd != -1) {\n        if (fstatat(parentfd, name, inode, AT_SYMLINK_NOFOLLOW) == -1)\n            return -errno;\n    } else\n#endif\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    if (is_lustre_fs && global_config.direct_mds_stat) {\n        int rc;\n        rc = lustre_mds_stat(path, parentfd, inode);\n        if (!rc)\n            /* device id is not the one seen by client: change it */\n            inode->st_dev = fsdev;\n        return rc;\n    } else\n#endif\n    if (lstat(path, inode) == -1)\n        return -errno;\n\n    return 0;\n}\n\n/* process a filesystem entry */\nstatic int process_one_entry(thread_scan_info_t *p_info,\n                             robinhood_task_t *p_task,\n                             char *entry_name, int parentfd)\n{\n    char entry_path[RBH_PATH_MAX];\n    struct stat inode;\n    int rc = 0;\n    int no_md = 0;\n\n    /* build absolute path */\n    rc = snprintf(entry_path, RBH_PATH_MAX, \"%s/%s\", p_task->path, entry_name);\n    if (rc >= RBH_PATH_MAX) {\n        DisplayLog(LVL_EVENT, FSSCAN_TAG,\n                   \"Path too long: %s/%s, skipping entry\",\n                   p_task->path, entry_name);\n        return -ENAMETOOLONG;\n    }\n\n    /* retrieve information about the entry (to know if it's a directory\n     * or something else) */\n    rc = stat_entry(entry_path, entry_name, parentfd, &inode);\n    if (rc) {\n#ifdef _LUSTRE\n        if (is_lustre_fs && (rc == -ESHUTDOWN)) {\n            /* File can't be stat because it is on a disconnected OST.\n             * Still push it to the pipeline, to avoid losing valid info\n             * in the DB.\n             */\n            DisplayLog(LVL_EVENT, FSSCAN_TAG,\n                       \"Entry %s is on inactive OST or MDT. \"\n                       \"Cannot get its attributes.\", entry_path);\n            no_md = 1;\n            goto push;\n        }\n#endif\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                   \"failed to stat %s (%s): entry ignored\",\n                   entry_path, strerror(-rc));\n        return rc;\n    }\n\n    /* Test if entry or directory is ignored */\n    if (ignore_entry(entry_path, entry_name, p_task->depth, &inode)) {\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                   \"%s matches an 'ignore' rule. Skipped.\", entry_path);\n        return 0;\n    }\n\n    if (check_entry_dev(inode.st_dev, &fsdev, entry_path, false))\n        return 0;   /* not considered as an error */\n\n    /* Push all entries except dirs to the pipeline.\n     * Note: directories are pushed in Thr_scan(), after the closedir() call.\n     */\n    if (S_ISDIR(inode.st_mode)) {\n        rc = create_child_task(entry_path, &inode, p_task, NULL, entry_name);\n        if (rc)\n            return rc;\n    } else {\n        entry_proc_op_t *op;\n\n#ifdef _LUSTRE\n push:\n#endif\n\n        op = EntryProcessor_Get();\n        if (!op) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"CRITICAL ERROR: Failed to allocate a new op\");\n            return -ENOMEM;\n        }\n#ifdef _HAVE_FID\n        op->pipeline_stage = entry_proc_descr.GET_ID;\n#else\n        op->pipeline_stage = entry_proc_descr.GET_INFO_DB;\n#endif\n        ATTR_MASK_INIT(&op->fs_attrs);\n\n        ATTR_MASK_SET(&op->fs_attrs, parent_id);\n        ATTR(&op->fs_attrs, parent_id) = p_task->dir_id;\n\n        ATTR_MASK_SET(&op->fs_attrs, name);\n        strcpy(ATTR(&op->fs_attrs, name), entry_name);\n\n        ATTR_MASK_SET(&op->fs_attrs, fullpath);\n        strcpy(ATTR(&op->fs_attrs, fullpath), entry_path);\n\n#ifdef ATTR_INDEX_invalid\n        ATTR_MASK_SET(&op->fs_attrs, invalid);\n        ATTR(&op->fs_attrs, invalid) = false;\n#endif\n\n        ATTR_MASK_SET(&op->fs_attrs, depth);\n        /* depth(/<mntpoint>/toto) = 0 */\n        ATTR(&op->fs_attrs, depth) = p_task->depth;\n\n        if (!no_md) {\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n            stat2rbh_attrs(&inode, &op->fs_attrs,\n                           !(is_lustre_fs && global_config.direct_mds_stat));\n#else\n            stat2rbh_attrs(&inode, &op->fs_attrs, true);\n#endif\n            /* set update time  */\n            ATTR_MASK_SET(&op->fs_attrs, md_update);\n            ATTR(&op->fs_attrs, md_update) = time(NULL);\n        } else {\n            /* must still set it to avoid the entry to be impacted by\n             * scan final GC */\n            ATTR_MASK_SET(&op->fs_attrs, md_update);\n            ATTR(&op->fs_attrs, md_update) = time(NULL);\n        }\n        ATTR_MASK_SET(&op->fs_attrs, path_update);\n        ATTR(&op->fs_attrs, path_update) = time(NULL);\n\n        /* Set entry id */\n#ifndef _HAVE_FID\n        if (!no_md) {\n            op->entry_id.inode = inode.st_ino;\n            op->entry_id.fs_key = get_fskey();\n            op->entry_id_is_set = 1;\n        } else\n            op->entry_id_is_set = 0;\n#else\n        op->entry_id_is_set = 0;\n#ifndef _NO_AT_FUNC\n        /* get fid from fd, using openat on parent fd */\n        int fd = openat_noatime(parentfd, entry_name, false);\n        if (fd < 0)\n            DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                       \"openat failed on <parent_fd=%d>/%s: %s\", parentfd,\n                       entry_name, strerror(errno));\n        else {\n            rc = Lustre_GetFidByFd(fd, &op->entry_id);\n            if (rc)\n                DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                           \"fd2fid failed on <parent_fd=%d>/%s: %s\", parentfd,\n                           entry_name, strerror(errno));\n            else {\n                op->entry_id_is_set = 1;\n                op->pipeline_stage = entry_proc_descr.GET_INFO_DB;\n            }\n            close(fd);\n        }\n#endif\n#endif\n\n        op->extra_info_is_set = 0;\n\n#ifdef _LUSTRE\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n        /** Since Lustre2.4 release, entry striping can change\n         * (have_llapi_fswap_layouts) so scanning must update file stripe\n         * information.\n         */\n        if (no_md || S_ISREG(inode.st_mode))\n#else\n        if ((no_md || S_ISREG(inode.st_mode)) && is_first_scan)\n#endif\n        {\n            /* Fetch the stripes information now. This is faster than\n             * doing it later in the pipeline. However if that fails now,\n             * the pipeline will retry.\n             * Do it only for initial scan, as most of the stripes information\n             * is already known for next scans.\n             */\n#ifndef _NO_AT_FUNC\n            /* have a dir fd */\n            rc = File_GetStripeByDirFd(parentfd, entry_name,\n                                       &ATTR(&op->fs_attrs, stripe_info),\n                                       &ATTR(&op->fs_attrs, stripe_items));\n#else\n            rc = File_GetStripeByPath(entry_path,\n                                      &ATTR(&op->fs_attrs, stripe_info),\n                                      &ATTR(&op->fs_attrs, stripe_items));\n#endif\n            if (rc) {\n                ATTR_MASK_UNSET(&op->fs_attrs, stripe_info);\n                ATTR_MASK_UNSET(&op->fs_attrs, stripe_items);\n            } else {\n                ATTR_MASK_SET(&op->fs_attrs, stripe_info);\n                ATTR_MASK_SET(&op->fs_attrs, stripe_items);\n            }\n        }\n\n        if (global_config.lustre_projid && S_ISREG(inode.st_mode)) {\n            rc = lustre_project_get_id(entry_path);\n            if (rc < 0)  {\n                DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                           \"Failed to get lustre projid for %s: error %d\",\n                           entry_path, rc);\n            } else {\n                DisplayLog(LVL_FULL, FSSCAN_TAG, DFID \": projid=%u for %s\",\n                           PFID(&op->entry_id), rc, entry_path);\n                ATTR_MASK_SET(&op->fs_attrs, projid);\n                ATTR(&op->fs_attrs, projid) = rc;\n            }\n        }\n#endif\n\n#ifndef _BENCH_SCAN\n        /* Push entry to the pipeline */\n        EntryProcessor_Push(op);\n#else\n        EntryProcessor_Release(op);\n#endif\n\n    }\n\n    return 0;\n}\n\n/* directory specific types and accessors */\n#ifndef _NO_AT_FUNC\n#define GETDENTS_BUF_SZ 4096\n#define DIR_T int\n#define DIR_FD(_d) (_d)\n#define DIR_ERR(_d) ((_d) < 0)\n#define OPENDIR_STR \"open\"\n#else\n#define DIR_T DIR*\n#define DIR_FD(_d) dirfd(_d)\n#define DIR_ERR(_d) ((_d) == NULL)\n#define OPENDIR_STR \"opendir\"\n#endif\n\nstatic inline DIR_T dir_open(const char *path, int pfd, const char *relpath)\n{\n#ifndef _NO_AT_FUNC\n    if (pfd != -1)\n        return openat_noatime(pfd, relpath, true);\n    else\n        return open_noatime(path, true);\n#else\n    return opendir(path);\n#endif\n}\n\nstatic int process_one_dir(robinhood_task_t *p_task,\n                           thread_scan_info_t *p_info,\n                           unsigned int *nb_entries, unsigned int *nb_errors)\n{\n    DIR_T dirp;\n#ifndef _NO_AT_FUNC\n    char dirent_buf[GETDENTS_BUF_SZ];\n    struct dirent64 *direntry = NULL;\n#else\n    struct dirent direntry;\n    struct dirent *cookie_rep;\n#endif\n    int rc = 0;\n\n    (*nb_entries) = 0;\n\n    /* hearbeat before opendir */\n    p_info->last_action = time(NULL);\n\n    dirp = dir_open(p_task->path,\n                    p_task->parent_task ? p_task->parent_task->fd : -1,\n                    p_task->relpath);\n    if (DIR_ERR(dirp)) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   OPENDIR_STR \" failed on %s (%s)\",\n                   p_task->path, strerror(-rc));\n        (*nb_errors)++;\n        check_dir_error(rc);\n\n        return rc;\n    }\n    DisplayLog(LVL_FULL, FSSCAN_TAG, \"Setting task %s fd %d\", p_task->path,\n               dirp);\n    p_task->fd = dirp;\n\n    /* hearbeat before first readdir */\n    p_info->last_action = time(NULL);\n\n#ifndef _NO_AT_FUNC\n    /* scan directory entries by chunk of 4k */\n    direntry = (struct dirent64 *)dirent_buf;\n    while ((rc = syscall(SYS_getdents64, dirp, direntry, GETDENTS_BUF_SZ))\n                    > 0) {\n        off_t bytepos;\n        struct dirent64 *dp;\n\n        /* notify current activity */\n        p_info->last_action = time(NULL);\n\n        for (bytepos = 0; bytepos < rc;) {\n            dp = (struct dirent64 *)(dirent_buf + bytepos);\n            bytepos += dp->d_reclen;\n\n            /* break ASAP if requested */\n            if (p_info->force_stop) {\n                DisplayLog(LVL_EVENT, FSSCAN_TAG, \"Stop requested: \"\n                           \"cancelling directory scan operation \"\n                           \"(in '%s')\", p_task->path);\n                return -ECANCELED;\n            }\n\n            if (!strcmp(dp->d_name, \".\") || !strcmp(dp->d_name, \"..\"))\n                continue;\n\n            (*nb_entries)++;\n\n            /* Handle filesystem entry. */\n            if (process_one_entry(p_info, p_task, dp->d_name, DIR_FD(dirp)))\n                (*nb_errors)++;\n        }\n    }\n    /* rc == 0 => end of dir */\n    if (rc < 0) {\n        rc = errno;\n        DisplayLog(LVL_CRIT, FSSCAN_TAG, \"ERROR reading directory %s (%s)\",\n                   p_task->path, strerror(rc));\n        (*nb_errors)++;\n    }\n#else\n    /* read entries one by one */\n    while (1) {\n        rc = readdir_r(dirp, &direntry, &cookie_rep);\n\n        /* notify current activity (for watchdog) */\n        p_info->last_action = time(NULL);\n\n        if ((rc == 0) && (cookie_rep == NULL))\n            /* end of directory */\n            break;\n        else if (p_info->force_stop) {\n            DisplayLog(LVL_EVENT, FSSCAN_TAG, \"Stop requested: \"\n                       \"cancelling directory scan operation (in '%s')\",\n                       p_task->path);\n            return -ECANCELED;\n        } else if (rc != 0) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG, \"ERROR reading directory %s (%s)\",\n                       p_task->path, strerror(rc));\n            (*nb_errors)++;\n            break;\n        }\n\n        if (!strcmp(direntry.d_name, \".\") || !strcmp(direntry.d_name, \"..\"))\n            continue;\n\n        (*nb_entries)++;\n\n#ifdef SIMUL_HANGS\n        /* simulate a hang */\n        sleep(20 * p_task->depth);\n#endif\n\n        /* Handle filesystem entry. */\n        if (process_one_entry(p_info, p_task, direntry.d_name, dirfd(dirp)))\n            (*nb_errors)++;\n\n    }   /* end of dir */\n#endif\n    return rc;\n}\n\n/**\n * If scan is restricted to a list of subdirectories, create 1 task\n * per subdirectory.\n */\nstatic int push_dir_list(robinhood_task_t *parent_task)\n{\n    int i, rc;\n\n    for (i = 0; i < fs_scan_config.dir_count; i++) {\n        const char *dir = fs_scan_config.dir_list[i];\n        char *new_task_path;\n        const char *next_name;\n        const char *next_slash;\n        struct stat inode;\n        char name[RBH_NAME_MAX];\n\n        /* check path */\n        if (strncmp(parent_task->path, dir, strlen(parent_task->path))) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR: %s is supposed to be under %s\",\n                       dir, parent_task->path);\n            return -EINVAL;\n        }\n\n        /* push the first level of subdirectory */\n        next_name = dir + strlen(parent_task->path);\n        while (*next_name == '/')\n            next_name++;\n        next_slash = strchr(next_name, '/');\n        if (next_slash) {\n            /* length without final '\\0' */\n            ptrdiff_t len = next_slash - next_name;\n\n            strncpy(name, next_name, len);\n            name[len] = '\\0';\n        } else\n            strcpy(name, next_name);\n\n        if (asprintf(&new_task_path, \"%s/%s\", parent_task->path, name) < 0)\n            return -ENOMEM;\n\n        if (lstat(new_task_path, &inode) == -1) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG, \"Failed to stat directory '%s'\",\n                       new_task_path);\n            return rc;\n        }\n\n        DisplayLog(LVL_FULL, FSSCAN_TAG, \"Pushing dir '%s' to reach \"\n                   \"sub-tree '%s'\", new_task_path, fs_scan_config.dir_list[i]);\n\n        rc = create_child_task(new_task_path, &inode,\n                               parent_task, fs_scan_config.dir_list[i], NULL);\n        free(new_task_path);\n        if (rc)\n            return rc;\n    }\n\n    return 0;\n}\n\nstatic int process_one_task(robinhood_task_t *p_task,\n                            thread_scan_info_t *p_info,\n                            unsigned int *nb_entries, unsigned int *nb_errors)\n{\n    int rc;\n#ifdef _BENCH_DB\n    /* to map entry_id_t to an integer  we can increment */\n    struct id_map {\n        uint64_t high;\n        uint64_t low;\n    } *volatile fakeid;\n    /* level1 tasks: insert 100k entries with root entry id + N. */\n    if (p_task->depth > 1)\n        return 0;\n#endif\n\n    /* if this is the root task, check that the filesystem is still mounted */\n    if (p_task->parent_task == NULL) {\n        /* retrieve filesystem device id */\n        if (stat(p_task->path, &p_task->dir_md)) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"stat failed on %s (%s)\", p_task->path, strerror(errno));\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"Error accessing filesystem: exiting\");\n            Exit(1);\n        }\n        if (check_entry_dev(p_task->dir_md.st_dev, &fsdev, p_task->path, true))\n            p_task->dir_md.st_dev = fsdev;  /* just updated */\n\n        rc = path2id(p_task->path, &p_task->dir_id, &p_task->dir_md);\n        if (rc) {\n            (*nb_errors)++;\n            return rc;\n        }\n    }\n\n    /* As long as the current task path is (strictly)\n     * upper than partial scan root: just lookup, no readdir */\n     if (p_task->partial_scan_root &&\n         (strlen(p_task->path) < strlen(p_task->partial_scan_root))) {\n        char name[RBH_NAME_MAX + 1];\n        const char *next_name, *next_slash;\n\n        /* check path */\n        if (strncmp(p_task->path, p_task->partial_scan_root,\n            strlen(p_task->path))) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR: %s is supposed to be under %s\",\n                       p_task->partial_scan_root, p_task->path);\n            (*nb_errors)++;\n            return -EINVAL;\n        }\n\n        next_name = p_task->partial_scan_root + strlen(p_task->path);\n        while (*next_name == '/')\n            next_name++;\n        next_slash = strchr(next_name, '/');\n        if (next_slash) {\n            /* length without final '\\0' */\n            ptrdiff_t len = next_slash - next_name;\n\n            strncpy(name, next_name, len);\n            name[len] = '\\0';\n        } else\n            strcpy(name, next_name);\n\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG, \"Partial scan: processing '%s' in %s\",\n                   name, p_task->path);\n\n        rc = process_one_entry(p_info, p_task, name, -1);\n        if (rc) {\n            (*nb_errors)++;\n            return rc;\n        }\n    } else if (p_task->depth == 0 && fs_scan_config.dir_count > 0) {\n        /* If scan is restricted to subdirectories, create child tasks under\n         * mother task */\n        rc = push_dir_list(p_task);\n        if (rc) {\n            (*nb_errors)++;\n            return rc;\n        }\n    }\n#ifndef _BENCH_DB\n    else\n#else\n    else if (p_task->depth == 0)\n#endif\n    {\n        /* read the directory and process each entry */\n        rc = process_one_dir(p_task, p_info, nb_entries, nb_errors);\n        if (rc)\n            return rc;\n    }\n#ifdef _BENCH_DB\n    int i;\n#endif\n\n    if (p_task->depth > 0)\n#ifdef _BENCH_DB\n        for (i = 1; i < 100000 && !p_info->force_stop; i++)\n#endif\n        {\n            /* Fill dir info and push it to the pileline for checking alerts\n             * on it, and possibly purge it if it is empty for a long time.\n             */\n            entry_proc_op_t *op;\n\n            op = EntryProcessor_Get();\n            if (!op) {\n                DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                           \"CRITICAL ERROR: Failed to allocate a new op\");\n                return -ENOMEM;\n            }\n\n            ATTR_MASK_INIT(&op->fs_attrs);\n\n            /* set entry ID */\n            op->entry_id = p_task->dir_id;\n#ifdef _BENCH_DB\n            /* add i to the entry id */\n            fakeid = (struct id_map *)&op->entry_id;\n            fakeid->high += i;\n#endif\n            op->entry_id_is_set = 1;\n\n            /* Id already known */\n            op->pipeline_stage = entry_proc_descr.GET_INFO_DB;\n\n#ifndef _BENCH_DB\n            if (p_task->parent_task) {\n                ATTR_MASK_SET(&op->fs_attrs, parent_id);\n                ATTR(&op->fs_attrs, parent_id) = p_task->parent_task->dir_id;\n            }\n#else\n            ATTR_MASK_SET(&op->fs_attrs, parent_id);\n            ATTR(&op->fs_attrs, parent_id) = p_task->dir_id;\n#endif\n\n            ATTR_MASK_SET(&op->fs_attrs, name);\n            rh_strncpy(ATTR(&op->fs_attrs, name), rh_basename(p_task->path),\n                       RBH_NAME_MAX);\n#ifdef _BENCH_DB\n            sprintf(ATTR(&op->fs_attrs, name) +\n                    strlen(ATTR(&op->fs_attrs, name)), \"%d\", i);\n#endif\n\n            ATTR_MASK_SET(&op->fs_attrs, fullpath);\n            strcpy(ATTR(&op->fs_attrs, fullpath), p_task->path);\n#ifdef _BENCH_DB\n            sprintf(ATTR(&op->fs_attrs, fullpath) +\n                    strlen(ATTR(&op->fs_attrs, fullpath)), \"%d\", i);\n#endif\n\n#ifdef ATTR_INDEX_invalid\n            ATTR_MASK_SET(&op->fs_attrs, invalid);\n            ATTR(&op->fs_attrs, invalid) = false;\n#endif\n\n            ATTR_MASK_SET(&op->fs_attrs, depth);\n            /* depth(/tmp/toto) = 0 */\n            ATTR(&op->fs_attrs, depth) = p_task->depth - 1;\n\n            ATTR_MASK_SET(&op->fs_attrs, dircount);\n            ATTR(&op->fs_attrs, dircount) = *nb_entries;\n\n#ifndef _BENCH_PIPELINE\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n            stat2rbh_attrs(&p_task->dir_md, &op->fs_attrs,\n                           !(is_lustre_fs && global_config.direct_mds_stat));\n#else\n            stat2rbh_attrs(&p_task->dir_md, &op->fs_attrs, true);\n#endif\n#ifdef _LUSTRE\n            if (global_config.lustre_projid) {\n                rc = lustre_project_get_id(p_task->path);\n                if (rc < 0)  {\n                    DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                               \"Failed to get lustre projid for dir %s: error %d\",\n                               p_task->path, rc);\n                } else {\n                    DisplayLog(LVL_FULL, FSSCAN_TAG, DFID \": projid=%u for %s\",\n                               PFID(&op->entry_id), rc, p_task->path);\n                    ATTR_MASK_SET(&op->fs_attrs, projid);\n                    ATTR(&op->fs_attrs, projid) = rc;\n                }\n            }\n#endif\n#endif\n\n#ifdef _BENCH_DB\n            /* generate cyclic owner, group, type, size, ... */\n            unsigned int u = (i + 17) % 137;\n            if (global_config.uid_gid_as_numbers) {\n                ATTR(&op->fs_attrs, uid).num = u;\n                ATTR(&op->fs_attrs, gid).num = u/8;\n            } else {\n                sprintf(ATTR(&op->fs_attrs, uid).txt, \"user%u\", u);\n                /* 8 user per group */\n                sprintf(ATTR(&op->fs_attrs, gid).txt, \"group%u\", u/8);\n            }\n            switch (i % 2) {\n            case 0:\n                strcpy(ATTR(&op->fs_attrs, type), STR_TYPE_DIR);\n                break;\n            case 1:\n                strcpy(ATTR(&op->fs_attrs, type), STR_TYPE_FILE);\n                break;\n            }\n            ATTR(&op->fs_attrs, size) = ((i % 311) * 1493);\n\n            p_info->entries_handled++;\n#endif\n            /* set update time  */\n            ATTR_MASK_SET(&op->fs_attrs, md_update);\n            ATTR_MASK_SET(&op->fs_attrs, path_update);\n            ATTR(&op->fs_attrs, md_update) = ATTR(&op->fs_attrs, path_update)\n                = time(NULL);\n\n            op->extra_info_is_set = 0;\n\n#ifndef _BENCH_SCAN\n            /* Push directory to the pipeline */\n            EntryProcessor_Push(op);\n#else\n            EntryProcessor_Release(op);\n#endif\n        }\n    return 0;\n}\n\n/**\n * Thr_scan :\n * main routine for handling tasks.\n */\nstatic void *Thr_scan(void *arg_thread)\n{\n    robinhood_task_t *p_task;\n    int rc;\n\n    struct timeval start_dir;\n    struct timeval end_dir;\n    struct timeval diff;\n\n    thread_scan_info_t *p_info = (thread_scan_info_t *) arg_thread;\n\n    unsigned int nb_entries = 0;\n    unsigned int nb_errors = 0;\n\n    /* Initialize buddy management */\n#ifdef _BUDDY_MALLOC\n    if (BuddyInit(&buddy_config)) {\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"Error Initializing Memory Management\");\n        Exit(1);\n    }\n#endif\n\n    while (!p_info->force_stop) {\n        int task_rc;\n\n        DisplayLog(LVL_FULL, FSSCAN_TAG, \"ThrScan-%d: Waiting for a task\",\n                   p_info->index);\n\n        /* take a task from queue */\n        p_task = GetTask_from_Stack(&tasks_stack);\n\n        /* skip it if the thread was requested to stop */\n        if (p_info->force_stop)\n            break;\n\n        /* ERROR if NULL */\n        if (p_task == NULL) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"CRITICAL ERROR: GetTask_from_Stack returned NULL\");\n            Exit(1);\n        }\n\n        /* update thread info */\n        p_info->current_task = p_task;\n        p_info->last_action = time(NULL);\n\n        /* initialize error counters for current task */\n        nb_entries = 0;\n        nb_errors = 0;\n\n        DisplayLog(LVL_FULL, FSSCAN_TAG,\n                   \"ThrScan-%d: Processing %s (depth %u)\",\n                   p_info->index, p_task->path, p_task->depth);\n\n        /* measure task processing time */\n        gettimeofday(&start_dir, NULL);\n\n        task_rc = process_one_task(p_task, p_info, &nb_entries, &nb_errors);\n\n        gettimeofday(&end_dir, NULL);\n        timersub(&end_dir, &start_dir, &diff);\n\n        /* update thread statistics */\n        timeradd(&diff, &p_info->time_consumed, &p_info->time_consumed);\n        p_info->entries_handled += nb_entries;\n        p_info->entries_errors += nb_errors;\n\n        /* make an average on directory entries */\n        if (nb_entries > 0) {\n            unsigned int rest;\n            p_info->last_processing_time.tv_sec = diff.tv_sec / nb_entries;\n            rest =\n                diff.tv_sec -\n                (p_info->last_processing_time.tv_sec * nb_entries);\n            p_info->last_processing_time.tv_usec =\n                ((1000000 * rest) + diff.tv_usec) / nb_entries;\n        }\n\n        /* terminate processing of current task */\n        rc = RecursiveTaskTermination(p_info, p_task, (task_rc == 0)\n                                      && !p_info->force_stop);\n        if (rc) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"CRITICAL ERROR: RecursiveTaskTermination returned %d\",\n                       rc);\n            Exit(1);\n        }\n    }\n\n    p_info->current_task = NULL;\n\n    /* check scan termination status */\n    if (all_threads_idle())\n        signal_scan_finished();\n\n    return NULL;\n}\n\n/**\n * Audit module initialization\n * (called at deamon startup)\n *\n * The function looks at the content of the configuration structure\n * that have been previously parsed.\n *\n * It returns a status code:\n *   0 : initialization successful\n *   -1 : unexpected error at initialization.\n *   EINVAL : a parameter from the config file is invalid.\n */\nint Robinhood_InitScanModule(void)\n{\n    int st;\n    int rc, i;\n\n    /* fill-in be structures with zeros */\n    memset(&tasks_stack, 0, sizeof(tasks_stack));\n\n    /* initialize module, using configuration info */\n\n    /* preallocation parameters */\n    if (fs_scan_config.nb_prealloc_tasks > 0)\n        SetNbPreallocTasks(fs_scan_config.nb_prealloc_tasks);\n\n    /* initializing task stack */\n\n    st = InitTaskStack(&tasks_stack);\n    if (st)\n        return st;\n\n    /* Initialize locks */\n\n    pthread_mutex_init(&lock_scan, NULL);\n\n    fsdev = get_fsdev();\n\n    if (!strcmp(global_config.fs_type, \"lustre\"))\n        is_lustre_fs = true;\n\n    /* initializing thread attrs */\n\n    pthread_attr_init(&thread_attrs);\n    pthread_attr_setscope(&thread_attrs, PTHREAD_SCOPE_SYSTEM);\n    pthread_attr_setdetachstate(&thread_attrs, PTHREAD_CREATE_JOINABLE);\n\n    /* dynamic allocation */\n    thread_list =\n        MemCalloc(fs_scan_config.nb_threads_scan, sizeof(thread_scan_info_t));\n    if (!thread_list)\n        return ENOMEM;\n\n    /* creating scanning threads  */\n\n    for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n\n        thread_list[i].index = i;\n        thread_list[i].last_action = 0;\n        thread_list[i].current_task = NULL;\n\n        thread_list[i].force_stop = false;\n\n        thread_list[i].entries_handled = 0;\n        thread_list[i].entries_errors = 0;\n\n        timerclear(&thread_list[i].time_consumed);\n        timerclear(&thread_list[i].last_processing_time);\n\n        rc = pthread_create(&(thread_list[i].thread_scan), &thread_attrs,\n                            Thr_scan, &(thread_list[i]));\n\n        if (rc != 0) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR %d CREATING SCANNING THREAD: %s\", rc,\n                       strerror(rc));\n            return rc;\n        }\n    }\n\n    return 0;\n\n}\n\n/**\n * Stop scan module\n */\nvoid Robinhood_StopScanModule(void)\n{\n    unsigned int i;\n    int running = 0;\n    char timestamp[128];\n    lmgr_t lmgr;\n\n    P(lock_scan);\n    /* is a scan really running ? */\n    if (root_task != NULL) {\n        running = 1;\n    }\n    V(lock_scan);\n\n    /* terminate scan threads */\n    for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n        thread_list[i].force_stop = true;\n    }\n\n    DisplayLog(LVL_EVENT, FSSCAN_TAG,\n               \"Stop request has been sent to all scan threads\");\n\n    /* if there are still threads doing something, wait for them */\n    if (!all_threads_idle())\n        wait_scan_finished();\n\n    /* update scan status in db */\n    if (running) {\n        if (ListMgr_InitAccess(&lmgr) == DB_SUCCESS) {\n            sprintf(timestamp, \"%lu\", (unsigned long)time(NULL));\n            ListMgr_SetVar(&lmgr, LAST_SCAN_END_TIME, timestamp);\n            ListMgr_SetVar(&lmgr, LAST_SCAN_STATUS, SCAN_STATUS_ABORTED);\n            ListMgr_CloseAccess(&lmgr);\n        } else {\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"WARNING: not able to update scan stats\");\n        }\n    }\n}\n\n/* Start a scan of the filesystem.\n * This creates a root task and push it to the stack of tasks.\n * @param partial_root NULL for full scan; subdir path for partial scan\n * @retval EBUSY if a scan is already running.\n */\nstatic int StartScan(void)\n{\n    robinhood_task_t *p_parent_task;\n    char timestamp[128];\n    char value[128];\n    lmgr_t lmgr;\n    int no_db = 0;\n    uint64_t count = 0LL;\n    int rc;\n\n    /* Lock scanning status */\n    P(lock_scan);\n\n    /* is a scan already running ? */\n    if (root_task != NULL) {\n        V(lock_scan);\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                   \"An scan is already running on %s\",\n                   partial_scan_root ? partial_scan_root : global_config.\n                   fs_path);\n        return EBUSY;\n    }\n\n    /* create a root task */\n    p_parent_task = CreateTask();\n\n    if (p_parent_task == NULL) {\n        V(lock_scan);\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"ERROR creating scan task for %s\",\n                   partial_scan_root ? partial_scan_root : global_config.\n                   fs_path);\n        return -1;\n    }\n\n    if (partial_scan_root) {\n        /* check that partial_root is under FS root */\n        if (strncmp(global_config.fs_path, partial_scan_root,\n                    strlen(global_config.fs_path))) {\n            V(lock_scan);\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR scan root %s is not under fs root %s\",\n                       partial_scan_root, global_config.fs_path);\n            return -1;\n        }\n        p_parent_task->partial_scan_root = partial_scan_root;\n    }\n\n    /* always start at the root to get info about parent dirs */\n    strcpy(p_parent_task->path, global_config.fs_path);\n    p_parent_task->depth = 0;\n    p_parent_task->task_finished = false;\n\n    /* set the mother task, and remember start time */\n    root_task = p_parent_task;\n    scan_start_time = time(NULL);\n    gettimeofday(&accurate_start_time, NULL);\n\n    if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS) {\n        no_db = 1;\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                   \"WARNING: won't be able to update scan stats\");\n    }\n\n    if (!no_db) {\n        /* archive previous scan start/end time */\n        if (ListMgr_GetVar\n            (&lmgr, LAST_SCAN_START_TIME, timestamp,\n             sizeof(timestamp)) == DB_SUCCESS)\n            ListMgr_SetVar(&lmgr, PREV_SCAN_START_TIME, timestamp);\n        if (ListMgr_GetVar\n            (&lmgr, LAST_SCAN_END_TIME, timestamp,\n             sizeof(timestamp)) == DB_SUCCESS)\n            ListMgr_SetVar(&lmgr, PREV_SCAN_END_TIME, timestamp);\n\n        /* store current scan start time and status in db */\n        sprintf(timestamp, \"%lu\", (unsigned long)scan_start_time);\n        ListMgr_SetVar(&lmgr, LAST_SCAN_START_TIME, timestamp);\n        ListMgr_SetVar(&lmgr, LAST_SCAN_LAST_ACTION_TIME, timestamp);\n        ListMgr_SetVar(&lmgr, LAST_SCAN_STATUS, SCAN_STATUS_RUNNING);\n        /* store the number of scanning threads */\n        sprintf(value, \"%i\", fs_scan_config.nb_threads_scan);\n        ListMgr_SetVar(&lmgr, LAST_SCAN_NB_THREADS, value);\n\n        /* check if it is the first scan (avoid RM_OLD_ENTRIES in this case) */\n        is_first_scan = false;\n        rc = ListMgr_EntryCount(&lmgr, &count);\n\n        if ((rc == DB_SUCCESS) && (count == 0)) {\n            is_first_scan = true;\n            DisplayLog(LVL_EVENT, FSSCAN_TAG,\n                       \"Notice: this is the first scan (DB is empty)\");\n        } else if (rc)\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"Failed to retrieve entry count from DB: error %d\", rc);\n        else\n            DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                       \"%\" PRIu64 \" entries in DB before starting the scan\",\n                       count);\n\n        ListMgr_CloseAccess(&lmgr);\n    }\n\n    /* reset threads stats */\n    ResetScanStats(false);\n\n    /* unlock scanning status */\n    V(lock_scan);\n\n    /* start batching alerts */\n    Alert_StartBatching();\n\n    /* insert first task in stack */\n    InsertTask_to_Stack(&tasks_stack, p_parent_task);\n\n    /* indicates that a scan started in logs */\n    FlushLogs();\n\n    return 0;\n}\n\n/**\n * Start a new scan thread in case a previous thread has timed-out.\n * The new thread recovers the timed-out task and them become a standard\n * Thr_scan().\n */\nstatic void *Thr_scan_recovery(void *arg_thread)\n{\n    int st;\n\n    thread_scan_info_t *p_info = (thread_scan_info_t *) arg_thread;\n\n    p_info->last_action = time(NULL);\n\n    /* Initialize buddy management */\n#ifdef _BUDDY_MALLOC\n    if (BuddyInit(&buddy_config)) {\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"Error Initializing Memory Management\");\n        Exit(1);\n    }\n#endif\n\n    /* terminate and free current task */\n    st = RecursiveTaskTermination(p_info, p_info->current_task, false);\n    if (st) {\n        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                   \"CRITICAL ERROR: RecursiveTaskTermination returned %d\", st);\n        Exit(1);\n    }\n\n    return Thr_scan(arg_thread);\n\n}\n\n/**\n * Updates the max usage indicator (used for adaptive scan interval).\n */\nstatic void UpdateMaxUsage(void)\n{\n    char tmpval[1024];\n    double val;\n    lmgr_t lmgr;\n\n    if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS) {\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG, \"WARNING: can't update usage stats\");\n        return;\n    }\n\n    if (ListMgr_GetVar(&lmgr, USAGE_MAX_VAR, tmpval, sizeof(tmpval)) ==\n        DB_SUCCESS) {\n        if (sscanf(tmpval, \"%lf\", &val) == 1)\n            usage_max = val;\n    }\n\n    scan_interval = fs_scan_config.min_scan_interval\n        + (100.0 - usage_max) * 0.01 * (fs_scan_config.max_scan_interval -\n                                        fs_scan_config.min_scan_interval);\n\n    /* set current scan period, so it is available to client */\n    sprintf(tmpval, \"%lu\", (unsigned long)scan_interval);\n    ListMgr_SetVar(&lmgr, SCAN_INTERVAL_VAR, tmpval);\n\n    ListMgr_CloseAccess(&lmgr);\n}\n\n/**\n * Check thread's activity or start a scan if it's time.\n */\nint Robinhood_CheckScanDeadlines(void)\n{\n    int st;\n    char tmp_buff[256];\n    char tmp_buff2[256];\n    struct tm paramtm;\n\n    time_t loc_last_scan_time;\n    unsigned int loc_last_duration;\n    bool loc_scan_complete;\n    bool loc_scan_running;\n    time_t loc_start_time;\n    time_t loc_last_action;\n    time_t now;\n\n    /* compute scan interval (depending on last usage max) */\n    UpdateMaxUsage();\n\n    /* Get scan info */\n\n    /* lock on scan status */\n    P(lock_scan);\n\n    /* retrieve stats */\n\n    loc_last_scan_time = last_scan_time;\n    loc_last_duration = last_duration;\n    loc_scan_complete = last_scan_complete;\n\n    if (root_task != NULL) {\n        unsigned int i;\n        time_t last_action = 0;\n\n        loc_scan_running = true;\n        loc_start_time = scan_start_time;\n\n        for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n            if ((thread_list[i].current_task != NULL)\n                && (thread_list[i].last_action > last_action)) {\n                last_action = thread_list[i].last_action;\n            }\n        }\n\n        loc_last_action = last_action;\n    } else {\n        loc_scan_running = false;\n        loc_start_time = 0;\n        loc_last_action = 0;\n    }\n\n    V(lock_scan);\n\n    DisplayLog(LVL_FULL, FSSCAN_TAG, \"Verifying scan deadlines for %s\",\n               global_config.fs_path);\n\n    now = time(NULL);\n\n    /* debug traces */\n\n    if (loc_last_scan_time != 0) {\n        strftime(tmp_buff, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&loc_last_scan_time, &paramtm));\n\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                   \"last scan done in %u s: %s (%s)\", loc_last_duration,\n                   tmp_buff, (loc_scan_complete ? \"complete\" : \"partial\"));\n    }\n\n    if (loc_scan_running) {\n        strftime(tmp_buff, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&loc_start_time, &paramtm));\n        strftime(tmp_buff2, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&loc_last_action, &paramtm));\n        DisplayLog(LVL_DEBUG, FSSCAN_TAG,\n                   \"scan running: started at %s, last action: %s\", tmp_buff,\n                   tmp_buff2);\n    }\n\n    if (loc_scan_complete && !loc_scan_running\n        && (now - loc_last_scan_time >= scan_interval)) {\n        FormatDuration(tmp_buff, 256, scan_interval);\n\n        /* starting a new scan, if it's time */\n\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                   \"Starting scan of %s (current scan interval is %s)\",\n                   partial_scan_root ? partial_scan_root : global_config.\n                   fs_path, tmp_buff);\n\n        st = StartScan();\n\n        if (st == EBUSY) {\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"An scan is already running on %s\",\n                       global_config.fs_path);\n        } else if (st != 0) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG, \"Error in StartScan on %s\",\n                       global_config.fs_path);\n            return st;\n        }\n\n    } else if (!loc_scan_complete && !loc_scan_running\n               && (now - loc_last_scan_time >= fs_scan_config.scan_retry_delay))\n    {\n        /* retry a scan, if the last was incomplete */\n\n        DisplayLog(LVL_MAJOR, FSSCAN_TAG, \"Starting scan of %s\",\n                   partial_scan_root ? partial_scan_root : global_config.\n                   fs_path);\n\n        st = StartScan();\n\n        if (st == EBUSY) {\n            DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                       \"An scan is already running on %s\",\n                       global_config.fs_path);\n        } else if (st != 0) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG, \"Error in StartScan on %s\",\n                       global_config.fs_path);\n            return st;\n        }\n\n    } else if (loc_scan_running) {\n        int i;\n\n        /* number of threads having a task assigned */\n        unsigned int nb_assigned = 0;\n\n        /* detect and manage hangs */\n\n        /* for each thread, check timeout */\n        for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n            if (thread_list[i].current_task != NULL)\n                nb_assigned++;\n\n            if ((thread_list[i].current_task != NULL)\n                && (fs_scan_config.scan_op_timeout != 0)\n                && (time(NULL) - thread_list[i].last_action >\n                    fs_scan_config.scan_op_timeout)) {\n                DisplayLog(LVL_VERB, FSSCAN_TAG,\n                           \"Scan thread #%d looks stuck in %s\", i,\n                           thread_list[i].current_task->path);\n\n                /* check if the task is waiting for a lock */\n                if ((thread_list[i].current_task->parent_task == NULL)\n                    && (TestTaskTermination(thread_list[i].current_task))) {\n                    DisplayLog(LVL_VERB, FSSCAN_TAG,\n                               \"Actually, thread #%d is currently updating candidate list of %s\",\n                               i, global_config.fs_path);\n                } else {\n                    DisplayLog(LVL_MAJOR, FSSCAN_TAG,\n                               \"Hang of thread #%d while it was scanning %s (inactive for %ld sec)\",\n                               i, thread_list[i].current_task->path,\n                               time(NULL) - thread_list[i].last_action);\n                    RaiseAlert(\"FS scan is blocked\",\n                               \"A thread has been inactive for %ld sec\\n\"\n                               \"while scanning directory %s\",\n                               time(NULL) - thread_list[i].last_action,\n                               thread_list[i].current_task->path);\n\n                    /* if the config says to exit on timeout => do it */\n                    if (fs_scan_config.exit_on_timeout) {\n                        DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                                   \"exit_on_timeout is set in config file => EXITING\");\n                        Exit(ETIMEDOUT);\n                    }\n\n                    /* else restart the hung thread */\n                    if (TerminateThread(thread_list[i].thread_scan) == 0) {\n                        int rc;\n\n                        nb_hang_total++;\n\n                        /* increment the error counter */\n                        thread_list[i].entries_errors++;\n\n                        /* the monitoring thread does not terminate the task itself,\n                         * to avoid blocking it too. We start a recovery thread for\n                         * that. This new thread will then become a standard worker\n                         * thread, to replace the terminated one.\n                         */\n                        rc = pthread_create(&(thread_list[i].thread_scan),\n                                            &thread_attrs,\n                                            Thr_scan_recovery,\n                                            &(thread_list[i]));\n\n                        if (rc != 0) {\n                            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                                       \"ERROR CREATING SCANNING RECOVERY THREAD: %d: %s\",\n                                       rc, strerror(rc));\n                            return rc;\n                        }\n                    }\n                    /* end if thread terminated by monitoring thread */\n                }   /* end if child task */\n\n            }\n            /* end if hang detected */\n        }   /* end of loop on threads */\n\n        /* if no thread has a task assigned, and an scan is running for a while,\n         * there is something anormal: so, terminate the daemon.\n         */\n        if ((nb_assigned == 0) && (fs_scan_config.scan_op_timeout != 0)\n            && (now - loc_last_action > fs_scan_config.scan_op_timeout)\n            && (now - loc_start_time > fs_scan_config.scan_op_timeout)) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"A scan on %s is supposed to be running, but no threads are active. Anormal situation. Exiting.\",\n                       global_config.fs_path);\n            Exit(1);\n        }\n\n    }\n    /* scan is running */\n    return 0;\n\n}\n\n/**\n * Retrieve some statistics about current and terminated audits.\n * (called by the statistic collector)\n *\n * Take as parameter a structure of statistics to be filled.\n */\nvoid Robinhood_StatsScan(robinhood_fsscan_stat_t *p_stats)\n{\n    /* compute scan interval (depending on last usage max) */\n    UpdateMaxUsage();\n\n    /* lock scan info */\n    P(lock_scan);\n\n    p_stats->last_fsscan_time = last_scan_time;\n    p_stats->last_duration = last_duration;\n    p_stats->scan_complete = last_scan_complete;\n    p_stats->current_scan_interval = scan_interval;\n\n    if (root_task != NULL) {\n        unsigned int i;\n        time_t last_action = 0;\n        struct timeval total_time = { 0, 0 };\n        struct timeval curr_time = { 0, 0 };\n        unsigned int nb_done = 0;\n\n        p_stats->scanned_entries = 0;\n        p_stats->error_count = 0;\n        p_stats->scan_running = true;\n        p_stats->start_time = scan_start_time;\n\n        for (i = 0; i < fs_scan_config.nb_threads_scan; i++) {\n            if ((thread_list[i].current_task != NULL)\n                && (thread_list[i].last_action > last_action)) {\n                last_action = thread_list[i].last_action;\n            }\n\n            /* entry processing time */\n            if (thread_list[i].entries_handled) {\n                timeradd(&thread_list[i].time_consumed, &total_time,\n                         &total_time);\n                timeradd(&thread_list[i].last_processing_time, &curr_time,\n                         &curr_time);\n                p_stats->scanned_entries += thread_list[i].entries_handled;\n                nb_done++;\n            }\n            p_stats->error_count += thread_list[i].entries_errors;\n        }\n\n        p_stats->last_action = last_action;\n\n        /* avg speed */\n        if (p_stats->scanned_entries)\n            p_stats->avg_ms_per_entry =\n                ((1000.0 * total_time.tv_sec) +\n                 (1E-3 * total_time.tv_usec)) /\n                (double)(p_stats->scanned_entries);\n        else\n            p_stats->avg_ms_per_entry = 0.0;\n\n        /* current speed */\n        if (nb_done)\n            p_stats->curr_ms_per_entry =\n                ((1000.0 * curr_time.tv_sec) +\n                 (1E-3 * curr_time.tv_usec)) / (double)(nb_done);\n        else\n            p_stats->curr_ms_per_entry = 0.0;\n\n    } else {\n        p_stats->scan_running = false;\n        p_stats->start_time = 0;\n        p_stats->last_action = 0;\n        p_stats->scanned_entries = 0;\n        p_stats->error_count = 0;\n        p_stats->avg_ms_per_entry = 0.0;\n        p_stats->curr_ms_per_entry = 0.0;\n    }\n\n    p_stats->nb_hang = nb_hang_total;\n\n    V(lock_scan);\n\n}\n"
  },
  {
    "path": "src/fs_scan/fs_scan.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  Module for filesystem scans.\n *\n */\n\n#ifndef _FSSCAN_H\n#define _FSSCAN_H\n\n/* tag for logs */\n#define FSSCAN_TAG \"FS_Scan\"\n\n#include \"fs_scan_types.h\"\n#include \"fs_scan_main.h\"\n\n/* defined in fs_scan.c */\nextern fs_scan_config_t  fs_scan_config;\nextern run_flags_t       fsscan_flags;\nextern const char       *partial_scan_root;\n\n/* Audit module relative types */\n\n/**\n * Structure of audit statistics.\n */\ntypedef struct robinhood_fsscan_stat__ {\n    /* stats about audits */\n    time_t          last_fsscan_time;\n    time_t          current_scan_interval;\n    unsigned int    last_duration;\n    int             scan_complete;\n    int             scan_running;\n    time_t          start_time;\n    time_t          last_action;\n    unsigned int    nb_hang;\n\n    /* current scan progression */\n    unsigned int    scanned_entries;\n    unsigned int    error_count;\n    double          avg_ms_per_entry;\n    double          curr_ms_per_entry;\n\n} robinhood_fsscan_stat_t;\n\n/**\n * Audit module initialization\n * (called at deamon startup)\n *\n * The function looks at the content of the configuration structure\n * that have been previously parsed.\n *\n * It returns a status code:\n *   0 : initialization successful\n *   -1 : unexpected error at initialization.\n *   EINVAL : a parameter from the config file is invalid.\n */\nint Robinhood_InitScanModule(void);\n\n/**\n * Stop audit module + wait for termination\n */\nvoid Robinhood_StopScanModule(void);\n\n/**\n * Wait for scan termination (one shot mode).\n */\nvoid wait_scan_finished(void);\n\n/**\n * Check if audit is to be started and thread hangs.\n * (called by the spooler)\n */\nint Robinhood_CheckScanDeadlines(void);\n\n/**\n * Retrieve some statistics about current and terminated audits.\n * (called by the statistic collector)\n *\n * Take as parameter a structure of statistics to be filled.\n */\nvoid Robinhood_StatsScan(robinhood_fsscan_stat_t *p_stats);\n\n#endif\n"
  },
  {
    "path": "src/fs_scan/fs_scan_main.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * FS scan stop/start routines\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"fs_scan_main.h\"\n#include \"fs_scan.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_cfg_helpers.h\"\n#include <pthread.h>\n#include <errno.h>\n#include <unistd.h>\n\nstatic pthread_t scan_starter_thread;\nstatic pthread_attr_t starter_attr;\nstatic bool terminate = false;\n\n/* Scan starter thread */\nstatic void *scan_starter(void *arg)\n{\n    int rc;\n\n    DisplayLog(LVL_VERB, FSSCAN_TAG, \"Launching FS Scan starter thread\");\n\n    if (fsscan_flags & RUNFLG_ONCE) {\n        rc = Robinhood_CheckScanDeadlines();\n        if (rc)\n            DisplayLog(LVL_CRIT, FSSCAN_TAG, \"Error %d checking FS Scan status\",\n                       rc);\n        pthread_exit(NULL);\n        return NULL;\n    }\n\n    /* not a one-shot mode */\n    while (!terminate) {\n        rc = Robinhood_CheckScanDeadlines();\n        if (rc)\n            DisplayLog(LVL_CRIT, FSSCAN_TAG, \"Error %d checking FS Scan status\",\n                       rc);\n\n        /* attente de la boucle suivante */\n        rh_sleep(fs_scan_config.spooler_check_interval);\n    }\n\n    return NULL;\n}\n\n/** Start FS Scan info collector */\nint FSScan_Start(run_flags_t flags, const char *partial_root)\n{\n    int rc;\n\n    fsscan_flags = flags;\n    partial_scan_root = partial_root;\n\n    if (partial_root) {\n        /* check that partial_root is under FS root */\n        if (strncmp\n            (global_config.fs_path, partial_scan_root,\n             strlen(global_config.fs_path))) {\n            DisplayLog(LVL_CRIT, FSSCAN_TAG,\n                       \"ERROR scan root %s is not under fs root %s\",\n                       partial_scan_root, global_config.fs_path);\n            return EINVAL;\n        }\n    }\n\n    rc = Robinhood_InitScanModule();\n    if (rc)\n        return rc;\n\n    /* start a background thread */\n\n    pthread_attr_init(&starter_attr);\n    pthread_attr_setscope(&starter_attr, PTHREAD_SCOPE_SYSTEM);\n\n    if (pthread_create(&scan_starter_thread, &starter_attr, scan_starter,\n                       NULL))\n        return errno;\n\n    return 0;\n}\n\n/** Wait for scan termination */\nvoid FSScan_Wait(void)\n{\n    wait_scan_finished();\n}\n\n/** Stop FS Scan info collector */\nvoid FSScan_Terminate(void)\n{   /* @TODO */\n    terminate = true;\n\n    Robinhood_StopScanModule();\n}\n\n/** Store FS Scan into database */\nvoid FSScan_StoreStats(lmgr_t *lmgr)\n{\n    robinhood_fsscan_stat_t stats;\n    char tmp_buff[256];\n\n    Robinhood_StatsScan(&stats);\n\n    /* store the number of scanning threads */\n    sprintf(tmp_buff, \"%i\", fs_scan_config.nb_threads_scan);\n    ListMgr_SetVar(lmgr, LAST_SCAN_NB_THREADS, tmp_buff);\n\n    if (stats.scan_running) {\n        if (stats.last_action > 0) {\n            sprintf(tmp_buff, \"%lu\", (unsigned long)stats.last_action);\n            ListMgr_SetVar(lmgr, LAST_SCAN_LAST_ACTION_TIME, tmp_buff);\n        }\n\n        if (stats.scanned_entries) {\n            sprintf(tmp_buff, \"%u\", stats.scanned_entries);\n            ListMgr_SetVar(lmgr, LAST_SCAN_ENTRIES_SCANNED, tmp_buff);\n            sprintf(tmp_buff, \"%u\", stats.error_count);\n            ListMgr_SetVar(lmgr, LAST_SCAN_ERRORS, tmp_buff);\n            sprintf(tmp_buff, \"%.2f\", stats.avg_ms_per_entry);\n            ListMgr_SetVar(lmgr, LAST_SCAN_AVGMSPE, tmp_buff);\n            sprintf(tmp_buff, \"%.2f\", stats.curr_ms_per_entry);\n            ListMgr_SetVar(lmgr, LAST_SCAN_CURMSPE, tmp_buff);\n        }\n    }\n    sprintf(tmp_buff, \"%u\", stats.nb_hang);\n    ListMgr_SetVar(lmgr, LAST_SCAN_TIMEOUTS, tmp_buff);\n\n}\n\n/** Dump FS Scan stats to log file */\nvoid FSScan_DumpStats(void)\n{\n    robinhood_fsscan_stat_t stats;\n    struct tm paramtm;\n    char tmp_buff[256];\n    char tmp_buff2[256];\n\n    Robinhood_StatsScan(&stats);\n\n    DisplayLog(LVL_MAJOR, \"STATS\", \"======== FS scan statistics =========\");\n\n    if (stats.last_fsscan_time != 0) {\n        strftime(tmp_buff, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&stats.last_fsscan_time, &paramtm));\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"last scan  = %s\", tmp_buff);\n\n        FormatDuration(tmp_buff, 256, stats.last_duration);\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"duration    = %s (%u s)\", tmp_buff,\n                   stats.last_duration);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"status      = %s\",\n                   (stats.scan_complete ? \"complete\" : \"incomplete\"));\n    }\n\n    if (stats.current_scan_interval != 0) {\n        FormatDurationFloat(tmp_buff, 256, stats.current_scan_interval);\n        DisplayLog(LVL_MAJOR, \"STATS\", \"current scan interval = %s\", tmp_buff);\n    }\n\n    if (stats.scan_running) {\n        time_t now = time(NULL);\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"scan is running:\");\n\n        strftime(tmp_buff, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&stats.start_time, &paramtm));\n        FormatDurationFloat(tmp_buff2, 256, now - stats.start_time);\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"     started at : %s (%s ago)\",\n                   tmp_buff, tmp_buff2);\n\n        strftime(tmp_buff, 256, \"%Y/%m/%d %T\",\n                 localtime_r(&stats.last_action, &paramtm));\n        FormatDurationFloat(tmp_buff2, 256, now - stats.last_action);\n\n        DisplayLog(LVL_MAJOR, \"STATS\", \"     last action: %s (%s ago)\",\n                   tmp_buff, tmp_buff2);\n\n        if (stats.scanned_entries) {\n            double speed;\n\n            DisplayLog(LVL_MAJOR, \"STATS\",\n                       \"     progress   : %u entries scanned (%u errors)\",\n                       stats.scanned_entries, stats.error_count);\n\n            if (stats.curr_ms_per_entry > 0.0)\n                speed =\n                    (1000.0 / stats.curr_ms_per_entry) *\n                    fs_scan_config.nb_threads_scan;\n            else\n                speed = 0.0;\n\n            DisplayLog(LVL_MAJOR, \"STATS\",\n                       \"     inst. speed (potential): %9.2f entries/sec (%4.2f ms/entry/thread)\",\n                       speed, stats.curr_ms_per_entry);\n\n            if (now - stats.start_time > 0)\n                DisplayLog(LVL_MAJOR, \"STATS\",\n                           \"     avg. speed  (effective): %9.2f entries/sec (%4.2f ms/entry/thread)\",\n                           (float)stats.scanned_entries / (float)(now -\n                                                                  stats.\n                                                                  start_time),\n                           stats.avg_ms_per_entry);\n        }\n    }\n\n    if (stats.nb_hang > 0)\n        DisplayLog(LVL_MAJOR, \"STATS\", \"scan operation timeouts = %u\",\n                   stats.nb_hang);\n\n}\n\n/* ------------ Config management functions --------------- */\n\n#define FSSCAN_CONFIG_BLOCK  \"FS_Scan\"\n#define IGNORE_BLOCK  \"Ignore\"\n\n#define MINUTE 60\n#define HOUR 3600\n#define DAY (24*HOUR)\n\nstatic void fs_scan_cfg_set_default(void *module_config)\n{\n    fs_scan_config_t *conf = (fs_scan_config_t *) module_config;\n\n#ifdef HAVE_CHANGELOGS\n    /* scan rarely */\n    conf->min_scan_interval = 7 * DAY;\n    conf->max_scan_interval = 30 * DAY;\n#else\n    /* scan often */\n    conf->min_scan_interval = 1 * DAY;\n    conf->max_scan_interval = 7 * DAY;\n#endif\n    conf->scan_retry_delay = HOUR;\n    conf->nb_threads_scan = 2;\n    conf->scan_op_timeout = 0;\n    conf->exit_on_timeout = false;\n    conf->spooler_check_interval = MINUTE;\n    conf->nb_prealloc_tasks = 256;\n\n    conf->ignore_list = NULL;\n    conf->ignore_count = 0;\n    conf->dir_list = NULL;\n    conf->completion_command = NULL;\n}\n\nstatic void fs_scan_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, FSSCAN_CONFIG_BLOCK, NULL);\n#ifdef _LUSTRE_HSM\n    print_line(output, 1, \"min_scan_interval      :    7d\");\n    print_line(output, 1, \"max_scan_interval      :   30d\");\n#else\n    print_line(output, 1, \"min_scan_interval      :    1d\");\n    print_line(output, 1, \"max_scan_interval      :    7d\");\n#endif\n    print_line(output, 1, \"scan_retry_delay       :    1h\");\n    print_line(output, 1, \"nb_threads_scan        :     2\");\n    print_line(output, 1, \"scan_op_timeout        :     0 (disabled)\");\n    print_line(output, 1, \"exit_on_timeout        :    no\");\n    print_line(output, 1, \"spooler_check_interval :  1min\");\n    print_line(output, 1, \"nb_prealloc_tasks      :   256\");\n    print_line(output, 1, \"ignore                 :  NONE\");\n    print_line(output, 1, \"dir_list               :  NONE\");\n    print_line(output, 1, \"completion_command     :  NONE\");\n    print_end_block(output, 0);\n}\n\n/** add an item to the ignore list of the configuration */\nstatic int add_ignore_item(fs_scan_config_t *conf, config_item_t item,\n                           const char *blk_name, char *msg_out)\n{\n    conf->ignore_list = realloc(conf->ignore_list,\n                           (conf->ignore_count + 1) * sizeof(whitelist_item_t));\n    if (conf->ignore_list == NULL)\n        return ENOMEM;\n\n    conf->ignore_count++;\n\n    /* analyze and fill boolean expression */\n    return GetBoolExpr(item, blk_name,\n                       &conf->ignore_list[conf->ignore_count - 1].bool_expr,\n                       &conf->ignore_list[conf->ignore_count - 1].attr_mask,\n                       msg_out, NULL);\n}\n\n/** add a directroy to the scan list of the configuration */\nstatic int add_scan_dir(fs_scan_config_t *conf, const char *val,\n                        const char *blk_name, char *msg_out)\n{\n    conf->dir_list = realloc(conf->dir_list,\n                             (conf->dir_count + 1) * sizeof(char *));\n    if (conf->dir_list == NULL)\n        return ENOMEM;\n\n    conf->dir_list[conf->dir_count] = strdup(val);\n    conf->dir_count++;\n\n    return 0;\n}\n\n#define critical_err_check(_ptr_, _blkname_) do { if (!_ptr_) {\\\n                    sprintf(msg_out, \"Internal error reading %s block in \" \\\n                            \"config file\", _blkname_); \\\n                    return EFAULT; \\\n                 }\\\n            } while (0)\n\nstatic int fs_scan_cfg_read(config_file_t config, void *module_config,\n                            char *msg_out)\n{\n    int rc, index;\n    fs_scan_config_t *conf = (fs_scan_config_t *) module_config;\n    bool scan_intl_set = false;\n    time_t scan_intl = 0;\n    config_item_t fsscan_block;\n\n    static const char *fsscan_allowed[] = {\n        \"scan_interval\", \"min_scan_interval\", \"max_scan_interval\",\n        \"scan_retry_delay\", \"nb_threads_scan\", \"scan_op_timeout\",\n        \"exit_on_timeout\", \"spooler_check_interval\", \"nb_prealloc_tasks\",\n        \"completion_command\", \"scan_only\",\n        IGNORE_BLOCK, NULL\n    };\n\n    const cfg_param_t cfg_params[] = {\n        {\"nb_threads_scan\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->nb_threads_scan, 0},\n        {\"scan_retry_delay\", PT_DURATION, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->scan_retry_delay, 0},\n        {\"scan_op_timeout\", PT_DURATION, PFLG_POSITIVE, &conf->scan_op_timeout,\n         0},\n        {\"exit_on_timeout\", PT_BOOL, 0, &conf->exit_on_timeout, 0},\n        {\"spooler_check_interval\", PT_DURATION, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->spooler_check_interval, 0},\n        {\"nb_prealloc_tasks\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->nb_prealloc_tasks, 0},\n        /* completion command can contain wildcards: {cfg}, {fspath} ... */\n        {\"completion_command\", PT_CMD, 0,\n         &conf->completion_command, 0},\n        END_OF_PARAMS\n    };\n\n    /* get FS Scan block */\n    rc = get_cfg_block(config, FSSCAN_CONFIG_BLOCK, &fsscan_block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read scalar parameters */\n    rc = read_scalar_params(fsscan_block, FSSCAN_CONFIG_BLOCK, cfg_params,\n                            msg_out);\n    if (rc)\n        return rc;\n\n    /* parameters with specific management */\n    rc = GetDurationParam(fsscan_block, FSSCAN_CONFIG_BLOCK,\n                          \"min_scan_interval\", PFLG_POSITIVE | PFLG_NOT_NULL,\n                          &conf->min_scan_interval, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == 0)\n        scan_intl_set = true;\n\n    rc = GetDurationParam(fsscan_block, FSSCAN_CONFIG_BLOCK,\n                          \"max_scan_interval\", PFLG_POSITIVE | PFLG_NOT_NULL,\n                          &conf->max_scan_interval, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == 0)\n        scan_intl_set = true;\n\n    rc = GetDurationParam(fsscan_block, FSSCAN_CONFIG_BLOCK,\n                          \"scan_interval\", PFLG_POSITIVE | PFLG_NOT_NULL,\n                          &scan_intl, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == 0) {\n        if (scan_intl_set) {\n            strcpy(msg_out,\n                   \"scan_interval parameter cannot be used with min/max_scan_interval\");\n            return EINVAL;\n        }\n        conf->min_scan_interval = scan_intl;\n        conf->max_scan_interval = scan_intl;\n    }\n\n    /* Find and parse \"ignore\" blocks and \"scan_only\" directives */\n    for (index = 0; index < rh_config_GetNbItems(fsscan_block);\n         index++) {\n        config_item_t curr_item;\n        int extra = 0;\n        char *name;\n        char *val;\n\n        curr_item = rh_config_GetItemByIndex(fsscan_block, index);\n        critical_err_check(curr_item, FSSCAN_CONFIG_BLOCK);\n\n        switch (rh_config_ItemType(curr_item)) {\n        case CONFIG_ITEM_VAR:\n            rc = rh_config_GetKeyValue(curr_item, &name, &val, &extra);\n            if (rc)\n                return EINVAL;\n\n            /* process only scan_only directives */\n            if (strcasecmp(name, \"scan_only\") != 0)\n                continue;\n\n            rc = add_scan_dir(conf, val, name, msg_out);\n            if (rc)\n                return rc;\n            break;\n\n        case CONFIG_ITEM_BLOCK:\n            name = rh_config_GetBlockName(curr_item);\n\n            /* process only ignore blocks */\n            if (strcasecmp(name, IGNORE_BLOCK) != 0)\n                continue;\n\n            rc = add_ignore_item(conf, curr_item, name, msg_out);\n            if (rc)\n                return rc;\n            break;\n\n        default:\n            /* other cases: ignore */\n            continue;\n        }\n\n    }   /* Loop on sub-items */\n\n    CheckUnknownParameters(fsscan_block, FSSCAN_CONFIG_BLOCK, fsscan_allowed);\n\n    return 0;\n}\n\n#define RELOAD_TAG  \"FS_Scan_Config\"\n\n/** Update ignore rules */\nstatic void update_ignore(whitelist_item_t *old_items, unsigned int old_count,\n                          whitelist_item_t *new_items, unsigned int new_count,\n                          const char *block_name)\n{\n    unsigned int i;\n\n    if (old_count != new_count) {\n        DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                   \"Ignore rules count changed in block '%s' but cannot be modified dynamically: ignore update cancelled\",\n                   block_name);\n        return;\n    }\n\n    /* compare ignore boolean expression structure */\n    for (i = 0; i < new_count; i++) {\n        if (!attr_mask_equal(&old_items[i].attr_mask, &new_items[i].attr_mask)\n            || compare_boolexpr(&old_items[i].bool_expr,\n                                &new_items[i].bool_expr)) {\n            DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                       \"Ignore expression #%u changed in block '%s'. \"\n                       \"Only numerical values can be modified dynamically. \"\n                       \"Skipping parameter update.\", i, block_name);\n            return;\n        }\n    }\n\n    /* if they are all the same, update/check their values */\n\n    for (i = 0; i < new_count; i++) {\n        if (update_boolexpr(&old_items[i].bool_expr, &new_items[i].bool_expr)) {\n            char criteriastr[2048];\n            BoolExpr2str(&old_items[i].bool_expr, criteriastr, 2048);\n            DisplayLog(LVL_EVENT, RELOAD_TAG,\n                       \"Ignore expression #%u in block '%s' has been updated and is now: %s\",\n                       i, block_name, criteriastr);\n        }\n    }\n\n    /* XXX attr_mask is unchanged, since we keep the same expression\n     *  structures */\n\n}   /* end update_ignore */\n\nstatic void free_ignore(whitelist_item_t *p_items, int count)\n{\n    int i;\n\n    if (p_items == NULL)\n        return;\n\n    for (i = 0; i < count; i++)\n        FreeBoolExpr(&p_items[i].bool_expr, false);\n\n    free(p_items);\n}\n\nstatic void free_scan_dirs(char **list, int count)\n{\n    int i;\n\n    if (list == NULL)\n        return;\n\n    /* last list item is NULL */\n    for (i = 0;  i < count; i++)\n        free(list[i]);\n\n    free(list);\n}\n\nstatic int fs_scan_cfg_reload(fs_scan_config_t *conf)\n{\n    /* Parameters that can be modified dynamically */\n\n    if (conf->min_scan_interval != fs_scan_config.min_scan_interval) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK \"::min_scan_interval updated: %ld->%ld\",\n                   fs_scan_config.min_scan_interval, conf->min_scan_interval);\n        fs_scan_config.min_scan_interval = conf->min_scan_interval;\n    }\n\n    if (conf->max_scan_interval != fs_scan_config.max_scan_interval) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK \"::max_scan_interval updated: %ld->%ld\",\n                   fs_scan_config.max_scan_interval, conf->max_scan_interval);\n        fs_scan_config.max_scan_interval = conf->max_scan_interval;\n    }\n\n    if (conf->scan_retry_delay != fs_scan_config.scan_retry_delay) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK \"::scan_retry_delay updated: %ld>%ld\",\n                   fs_scan_config.scan_retry_delay, conf->scan_retry_delay);\n        fs_scan_config.scan_retry_delay = conf->scan_retry_delay;\n    }\n\n    if (conf->scan_op_timeout != fs_scan_config.scan_op_timeout) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK \"::scan_op_timeout updated: %ld->%ld\",\n                   fs_scan_config.scan_op_timeout, conf->scan_op_timeout);\n        fs_scan_config.scan_op_timeout = conf->scan_op_timeout;\n    }\n\n    if (conf->exit_on_timeout != fs_scan_config.exit_on_timeout) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK \"::exit_on_timeout updated: %s->%s\",\n                   bool2str(fs_scan_config.exit_on_timeout),\n                   bool2str(conf->exit_on_timeout));\n        fs_scan_config.exit_on_timeout = conf->exit_on_timeout;\n    }\n\n    if (conf->spooler_check_interval != fs_scan_config.spooler_check_interval) {\n        DisplayLog(LVL_EVENT, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK\n                   \"::spooler_check_interval updated: %ld->%ld\",\n                   fs_scan_config.spooler_check_interval,\n                   conf->spooler_check_interval);\n        fs_scan_config.spooler_check_interval = conf->spooler_check_interval;\n    }\n\n    if (compare_cmd\n        (conf->completion_command, fs_scan_config.completion_command)) {\n        DisplayLog(LVL_MAJOR, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK\n                   \"::completion_command changed in config file, but cannot be modified dynamically\");\n        g_strfreev(conf->completion_command);\n        conf->completion_command = NULL;\n    }\n\n    /* Parameters that canNOT be modified dynamically */\n\n    if (conf->nb_threads_scan != fs_scan_config.nb_threads_scan)\n        DisplayLog(LVL_MAJOR, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK\n                   \"::nb_threads_scan changed in config file, but cannot be modified dynamically\");\n\n    if (conf->nb_prealloc_tasks != fs_scan_config.nb_prealloc_tasks)\n        DisplayLog(LVL_MAJOR, \"FS_Scan_Config\",\n                   FSSCAN_CONFIG_BLOCK\n                   \"::nb_prealloc_tasks changed in config file, but cannot be modified dynamically\");\n\n    /* compare ignore list */\n    update_ignore(fs_scan_config.ignore_list, fs_scan_config.ignore_count,\n                  conf->ignore_list, conf->ignore_count, FSSCAN_CONFIG_BLOCK);\n\n    return 0;\n}\n\nstatic int fs_scan_cfg_set(void *cfg, bool reload)\n{\n    fs_scan_config_t *conf = (fs_scan_config_t *) cfg;\n\n    if (reload)\n        return fs_scan_cfg_reload(conf);\n\n    fs_scan_config = *conf;\n    return 0;\n}\n\nstatic void fs_scan_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, FSSCAN_CONFIG_BLOCK, NULL);\n\n    print_line(output, 1, \"# simple scan interval (fixed)\");\n#ifdef HAVE_CHANGELOGS\n    print_line(output, 1, \"scan_interval      =   2d ;\");\n#else\n    print_line(output, 1, \"scan_interval      =   6h ;\");\n#endif\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# min/max for adaptive scan interval:\");\n    print_line(output, 1,\n               \"# the more the filesystem is full, the more frequently it is scanned.\");\n#ifdef HAVE_CHANGELOGS\n    print_line(output, 1, \"#min_scan_interval      =   24h ;\");\n    print_line(output, 1, \"#max_scan_interval      =    7d ;\");\n#else\n    print_line(output, 1, \"#min_scan_interval      =    2h ;\");\n    print_line(output, 1, \"#max_scan_interval      =   12h ;\");\n#endif\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# number of threads used for scanning the filesystem\");\n    print_line(output, 1, \"nb_threads_scan        =     2 ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# when a scan fails, this is the delay before retrying\");\n    print_line(output, 1, \"scan_retry_delay       =    1h ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# timeout for operations on the filesystem\");\n    print_line(output, 1, \"scan_op_timeout        =    1h ;\");\n    print_line(output, 1, \"# exit if operation timeout is reached?\");\n    print_line(output, 1, \"exit_on_timeout        =    yes ;\");\n    print_line(output, 1, \"# external command called on scan termination\");\n    print_line(output, 1,\n               \"# special arguments can be specified: {cfg} = config file path,\");\n    print_line(output, 1, \"# {fspath} = path to managed filesystem\");\n    print_line(output, 1,\n               \"#completion_command     =    \\\"/path/to/my/script.sh -f {cfg} -p {fspath}\\\" ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1,\n               \"# Internal scheduler granularity (for testing and of scan, hangs, ...)\");\n    print_line(output, 1, \"spooler_check_interval =  1min ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Memory preallocation parameters\");\n    print_line(output, 1, \"nb_prealloc_tasks      =   256 ;\");\n    fprintf(output, \"\\n\");\n    print_begin_block(output, 1, IGNORE_BLOCK, NULL);\n    print_line(output, 2,\n               \"# ignore \\\".snapshot\\\" and \\\".snapdir\\\" directories (don't scan them)\");\n    print_line(output, 2, \"type == directory\");\n    print_line(output, 2, \"and\");\n    print_line(output, 2, \"( name == \\\".snapdir\\\" or name == \\\".snapshot\\\" )\");\n    print_end_block(output, 1);\n    print_end_block(output, 0);\n}\n\nstatic void *fs_scan_cfg_new(void)\n{\n    return calloc(1, sizeof(fs_scan_config_t));\n}\n\nstatic void fs_scan_cfg_free(void *cfg)\n{\n    fs_scan_config_t *conf;\n\n    if (cfg == NULL)\n        return;\n\n    conf = (fs_scan_config_t *) cfg;\n\n    free_ignore(conf->ignore_list, conf->ignore_count);\n    free_scan_dirs(conf->dir_list, conf->dir_count);\n\n    free(cfg);\n}\n\nmod_cfg_funcs_t fs_scan_cfg_hdlr = {\n    .module_name = \"FS scan\",\n    .new = fs_scan_cfg_new,\n    .free = fs_scan_cfg_free,\n    .set_default = fs_scan_cfg_set_default,\n    .read = fs_scan_cfg_read,\n    .set_config = fs_scan_cfg_set,\n    .write_default = fs_scan_cfg_write_default,\n    .write_template = fs_scan_cfg_write_template\n};\n"
  },
  {
    "path": "src/fs_scan/fs_scan_types.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _FSSCAN_TYPES_H\n#define _FSSCAN_TYPES_H\n\n#include \"rbh_const.h\"\n#include \"list_mgr.h\"\n\n#include <sys/types.h>\n#include <semaphore.h>\n#include <pthread.h>\n#include <sys/param.h>\n#include <sys/stat.h>\n#include <stdbool.h>\n\n/* a scanning task */\n\ntypedef struct robinhood_task__ {\n    /* absolute path of the directory to be read */\n    char            path[RBH_PATH_MAX];\n\n    /* relative path of the directory from parent task */\n    char            relpath[RBH_NAME_MAX];\n\n    /* fd to directory, kept until the task is freed for child tasks */\n    int fd;\n\n    /* the relative depth of the directory to be read */\n    unsigned int    depth;\n\n    /* id of this directory */\n    entry_id_t      dir_id;\n\n    /* metadatas of this directory */\n    struct stat     dir_md;\n\n    /* parent task */\n    struct robinhood_task__ *parent_task;\n\n    /* partial scan root, in case of partial scans\n     * or restricted scans */\n    const char *partial_scan_root;\n\n    /* lock for protecting the child list\n     * and the task_finished boolean.\n     */\n    pthread_spinlock_t       child_list_lock;\n\n    /* list of child tasks running */\n\n    struct robinhood_task__ *child_list;\n\n    /* this boolean indicates if the task is finished\n     * (not including child tasks)\n     */\n    bool task_finished;\n\n    /* these pointers are used for chaining a (child) task\n     * into its parent list of childs.\n     */\n    struct robinhood_task__ *prev_child;\n    struct robinhood_task__ *next_child;\n\n/* This pointer is used in 2 ways, depending\n   * on the structure status :\n   * - for chaining tasks in the scheduler (in a task_stack_t)\n   * - for chaining free structs in the pool manager\n   */\n    struct robinhood_task__ *next_task;\n\n} robinhood_task_t;\n\n/* We define a maximum value for ordering tasks into the stack,\n * but we however handle cases when it's over.\n * (we will consider that all the tasks over this limit\n * have the same prority)\n */\n#define MAX_TASK_DEPTH  255\n\n/* A stack of tasks ordered by depth,\n * handled by 'task_stack_mngmt' routines.\n */\ntypedef struct tasks_stack__ {\n    pthread_mutex_t     stack_lock; /* lock on the stack */\n    sem_t               sem_tasks;  /* token for available tasks */\n\n    /* Indicates the depth for the first task available */\n    unsigned int        max_task_depth;\n\n    /* list of tasks, ordered by depth */\n    robinhood_task_t   *tasks_at_depth[MAX_TASK_DEPTH + 1];\n\n} task_stack_t;\n\n#endif\n"
  },
  {
    "path": "src/fs_scan/task_stack_mngmt.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Module for managing FS scan tasks as a stack\n * with priorities on entry depth.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"fs_scan.h\"\n#include \"task_stack_mngmt.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n\n/* Initialize a stack of tasks */\nint InitTaskStack(task_stack_t *p_stack)\n{\n    unsigned int index;\n    int rc;\n\n    /* initialize each level of the priority stack */\n    for (index = 0; index <= MAX_TASK_DEPTH; index++) {\n        p_stack->tasks_at_depth[index] = NULL;\n    }\n\n    /* no task waiting for now */\n    p_stack->max_task_depth = 0;\n\n    /* initialize the lock and the semaphore for accessing the list */\n    pthread_mutex_init(&p_stack->stack_lock, NULL);\n\n    /* initially, no task available: sem=0 */\n    if ((rc = sem_init(&p_stack->sem_tasks, 0, 0))) {\n        pthread_mutex_destroy(&p_stack->stack_lock);\n        DisplayLog(LVL_CRIT, FSSCAN_TAG, \"ERROR initializing semaphore\");\n        return rc;\n    }\n\n    return 0;\n\n}\n\n/* insert a task in the stack */\nvoid InsertTask_to_Stack(task_stack_t *p_stack, robinhood_task_t *p_task)\n{\n    unsigned int prof = p_task->depth;\n\n    /* don't distinguish priorities over a given depth */\n    if (prof > MAX_TASK_DEPTH)\n        prof = MAX_TASK_DEPTH;\n\n    /* take the lock on stack */\n    P(p_stack->stack_lock);\n\n    /* insert the task at the good depth */\n    p_task->next_task = p_stack->tasks_at_depth[prof];\n    p_stack->tasks_at_depth[prof] = p_task;\n\n    /* update max_task_depth, if needed */\n    if (prof > p_stack->max_task_depth)\n        p_stack->max_task_depth = prof;\n\n    /* release the stack lock */\n    V(p_stack->stack_lock);\n\n    /* unblock waiting worker threads */\n    sem_post_safe(&p_stack->sem_tasks);\n\n}\n\n/* take a task (blocking until there is a task in the stack) */\nrobinhood_task_t *GetTask_from_Stack(task_stack_t *p_stack)\n{\n    robinhood_task_t *p_task;\n    int index;\n\n    /* wait for a task */\n    sem_wait_safe(&p_stack->sem_tasks);\n\n    /* lock the stack */\n    P(p_stack->stack_lock);\n\n    /* The scan is a 'depth first' scan: directly go to the highest depth. */\n    p_task = p_stack->tasks_at_depth[p_stack->max_task_depth];\n\n    /* sanity check */\n    if (p_task == NULL) {\n        V(p_stack->stack_lock);\n        DisplayLog(LVL_CRIT, FSSCAN_TAG, \"UNEXPECTED ERROR: NO TASK FOUND\");\n        return NULL;\n    }\n\n    /* update the list for this depth */\n    p_stack->tasks_at_depth[p_stack->max_task_depth] = p_task->next_task;\n\n    /* if the list at current depth is empty, we need to\n     * update max_task_depth.\n     */\n    if (p_task->next_task == NULL) {\n        for (index = p_stack->max_task_depth; index >= 0; index--) {\n            if (p_stack->tasks_at_depth[index] != NULL) {\n                p_stack->max_task_depth = index;\n                break;\n            }\n        }\n        /* no item found */\n        if (index < 0)\n            p_stack->max_task_depth = 0;\n    }\n\n    /* unlock the stack */\n    V(p_stack->stack_lock);\n\n    /* returns pointer to the task */\n    return p_task;\n\n}\n"
  },
  {
    "path": "src/fs_scan/task_stack_mngmt.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Module for managing task stack.\n *\n */\n\n#ifndef _TASK_STACK_MNGMT_H\n#define _TASK_STACK_MNGMT_H\n\n#include \"fs_scan_types.h\"\n\n/* initialize a task stack */\nint InitTaskStack(task_stack_t *p_stack);\n\n/* insert a task in the stack */\nvoid InsertTask_to_Stack(task_stack_t *p_stack, robinhood_task_t *p_task);\n\n/* take a task in the stack (block until there is a task available) */\nrobinhood_task_t *GetTask_from_Stack(task_stack_t *p_stack);\n\n#endif\n"
  },
  {
    "path": "src/fs_scan/task_tree_mngmt.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Tools for managing the tasks and their tree.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_logs.h\"\n#include \"Memory.h\"\n#include \"task_tree_mngmt.h\"\n#include <string.h>\n#include <stdio.h>\n#include <unistd.h>\n\n#define sP(_lock_)  pthread_spin_lock(&(_lock_))\n#define sV(_lock_)  pthread_spin_unlock(&(_lock_))\n\n#ifdef _ONE_SHOT\n#define TIME_NOT_SET ((unsigned int)-1)\n#endif\n\n/* defaut pool size for preallocation tasks */\nstatic size_t nb_tasks_prealloc = 256;\nstatic pthread_mutex_t mutex_spool = PTHREAD_MUTEX_INITIALIZER;\nstatic robinhood_task_t *tasks_pool = NULL;\nstatic mem_stat_t stat_mem_tach = { 0, 0 };\n\n/* Set chunk size for preallocation mechanism */\nvoid SetNbPreallocTasks(size_t nb_prealloc)\n{\n    nb_tasks_prealloc = nb_prealloc;\n}\n\nvoid TasksMemInfo(mem_stat_t *p_mem_stat)\n{\n    *p_mem_stat = stat_mem_tach;\n}\n\n/* Allocate and initialize a size structure */\nrobinhood_task_t *CreateTask()\n{\n    robinhood_task_t *p_task;\n\n    GET_PREALLOC(p_task, tasks_pool, nb_tasks_prealloc,\n                 robinhood_task_t, next_task, mutex_spool, stat_mem_tach);\n\n    if (p_task == NULL)\n        return NULL;\n\n    /* zero all fields, except fd */\n    memset(p_task, 0, sizeof(robinhood_task_t));\n    p_task->fd = -1;\n\n    /* initialize spin lock */\n    pthread_spin_init(&p_task->child_list_lock, 0);\n\n    return p_task;\n}\n\n/* Free task resources */\nint FreeTask(robinhood_task_t *p_task)\n{\n    if (p_task->fd != -1)\n        close(p_task->fd); /* check rc? */\n    pthread_spin_destroy(&p_task->child_list_lock);\n\n    /* put it back to the allocation pool */\n    RELEASE_PREALLOC(p_task, tasks_pool, next_task, mutex_spool, stat_mem_tach);\n\n    return 0;\n}\n\n/* Add a child task (must be called by parent task) */\nvoid AddChildTask(robinhood_task_t *p_parent_task,\n                  robinhood_task_t *p_child_task)\n{\n    /* set parent task for this sub-task */\n    p_child_task->parent_task = p_parent_task;\n\n    /* lock the sub-task list of parent */\n    sP(p_parent_task->child_list_lock);\n\n    /* add the sub-task as first list item */\n    if (p_parent_task->child_list == NULL) {\n        p_child_task->prev_child = NULL;\n        p_child_task->next_child = NULL;\n        p_parent_task->child_list = p_child_task;\n    } else {\n        p_child_task->prev_child = NULL;\n        p_child_task->next_child = p_parent_task->child_list;\n        p_parent_task->child_list->prev_child = p_child_task;\n        p_parent_task->child_list = p_child_task;\n    }\n    sV(p_parent_task->child_list_lock);\n\n}\n\n/* Remove a child task from a parent task.\n * This is called by a child task, when it finishes.\n * Return TRUE if the parent task and all its children are completed,\n * FALSE otherwise.\n */\nint RemoveChildTask(robinhood_task_t *p_parent_task,\n                    robinhood_task_t *p_child_task)\n{\n    int done;\n\n    /* take the lock on sub-task list */\n    sP(p_parent_task->child_list_lock);\n\n    /* remove child task from list */\n    if (p_child_task->next_child != NULL)\n        p_child_task->next_child->prev_child = p_child_task->prev_child;\n\n    if (p_child_task->prev_child != NULL)\n        p_child_task->prev_child->next_child = p_child_task->next_child;\n    else\n        /* it was the head item */\n        p_parent_task->child_list = p_child_task->next_child;\n\n    done = (p_parent_task->child_list == NULL)\n        && (p_parent_task->task_finished);\n\n    sV(p_parent_task->child_list_lock);\n\n    /* this task is now orphan */\n    p_child_task->parent_task = NULL;\n\n    return done;\n\n}\n\n/* Tag a task as completed i.e. finished its own work\n * (but it can possibly still have sub-tasks running).\n * Return TRUE if all sub-tasks are terminated too,\n * FALSE otherwise.\n */\nbool FlagTaskAsFinished(robinhood_task_t *p_task)\n{\n    bool done;\n\n    /* take the lock */\n    sP(p_task->child_list_lock);\n\n    p_task->task_finished = true;\n\n    done = (p_task->child_list == NULL);\n\n    sV(p_task->child_list_lock);\n\n    return done;\n}\n\n/* Test is a task is completely done\n * (its own work + its sub-tasks).\n */\nbool TestTaskTermination(robinhood_task_t *p_task)\n{\n    bool done;\n\n    /* take the lock */\n    sP(p_task->child_list_lock);\n\n    done = (p_task->task_finished && (p_task->child_list == NULL));\n\n    sV(p_task->child_list_lock);\n\n    return done;\n}\n"
  },
  {
    "path": "src/fs_scan/task_tree_mngmt.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Tools for managing tasks and their tree organisation.\n */\n\n#ifndef _TREE_TACHES_H\n#define _TREE_TACHES_H\n\n#include \"fs_scan_types.h\"\n#include \"Memory.h\"\n\n/* Set the number of preallocated tasks for the pool.\n */\nvoid SetNbPreallocTasks(size_t nb_prealloc);\n\n/* Create and initialize a task */\nrobinhood_task_t *CreateTask(void);\n\n/* Free the resources of a task */\nint FreeTask(robinhood_task_t *p_task);\n\n/* Add a child task (called by the parent) */\nvoid AddChildTask(robinhood_task_t *p_parent_task,\n                  robinhood_task_t *p_child_task);\n\n/* Remove a child task from a parent task.\n * This is called by a child task, when it finishes.\n * Return TRUE if the parent task and all its children are completed,\n * FALSE otherwise.\n */\nint RemoveChildTask(robinhood_task_t *p_parent_task,\n                    robinhood_task_t *p_child_task);\n\n/**\n * Tag a task as completed i.e. finished its own work\n * (but it can possibly still have sub-tasks running).\n * \\retval true if all sub-tasks are terminated too.\n * \\retval false otherwise.\n */\nbool FlagTaskAsFinished(robinhood_task_t *p_task);\n\n/* Test if a task is totally finished (i.e. the parent task\n * is finished, and all its children too )\n */\nbool TestTaskTermination(robinhood_task_t *p_task);\n\nvoid TasksMemInfo(mem_stat_t *p_mem_stat);\n\n#endif\n"
  },
  {
    "path": "src/include/Makefile.am",
    "content": "\nSUFFIXES = .def\n\nTYPEGEN=$(top_srcdir)/scripts/type_gen.pl\n\n.def.h:\n\t$(TYPEGEN) $< $*.h\n\nnoinst_HEADERS=db_types.h  list_mgr.h RW_Lock.h \\\n\t\tuidgidcache.h Memory.h rbh_const.h rbh_logs.h \\\n\t\trbh_cfg.h rbh_misc.h config_parsing.h \\\n\t\tglobal_config.h entry_processor.h status_manager.h\\\n\t\tfs_scan_main.h chglog_reader.h policy_run.h\\\n\t\txplatform_print.h lustre_extended_types.h \\\n\t\tpolicy_rules.h queue.h  \\\n\t\tentry_proc_hash.h list.h \\\n        lustre/lustre_errno.h update_params.h \\\n        db_schema.h db_schema.def pipeline_types.h \\\n        rbh_params.h rbh_types.h rbh_boolexpr.h rbh_cfg_helpers.h \\\n        rbh_modules.h rbh_basename.h\n\ndb_schema.h: db_schema.def $(TYPEGEN)\nall: db_schema.h\n\nCLEANFILES = db_schema.h\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/include/Memory.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  \\file  Memory.h\n *  \\brief Definition of memory management routines.\n */\n\n#ifndef _MEMORY_H\n#define _MEMORY_H\n\n#include <stdlib.h>\n#include <errno.h>\n\n#ifdef _BUDDY_MALLOC\n\n#include \"BuddyMalloc.h\"\n\n#define MemAlloc(_a)       BuddyMallocExit(_a)\n#define MemCalloc(_s1, _s2) BuddyCalloc(_s1, _s2)\n#define MemRealloc(_p, _s)   BuddyRealloc((caddr_t)(_p), _s)\n#define MemFree(_a)        BuddyFree((caddr_t) (_a))\n#define MemErrno            BuddyErrno\n\n#define GetPreferedPool(_n, _s)  BuddyPreferedPoolCount(_n, _s)\n\n#else\n\n#define MemAlloc(s)       malloc(s)\n#define MemCalloc(_n, _s)   calloc((_n), (_s))\n#define MemRealloc(p, s)  realloc(p, s)\n#define MemFree(p)        free(p)\n#define MemErrno            errno\n\n#define GetPreferedPool(_n, _s)  (_n)\n\n#endif\n\n/** memory pool stats */\ntypedef struct mem_stat_t {\n    unsigned int nb_prealloc;\n    unsigned int nb_used;\n} mem_stat_t;\n\n#ifndef _DEBUG_MEMORY\n\n/**\n *\n * STUFF_PREALLOC: Allocates a pool of pre-allocated entries.\n *\n * This macro Allocates a pool of pre-allocated entries. It calls\n * malloc to get the spool as an arry and then chains all the\n * entries together. Each entry is supposed to have a specific\n * 'next' field, a pointer to an object of its own type, to be\n * used as a pointer to the next entry in the pool.\n *\n * If BuddyMalloc is used, it is supposed to be already initialised.\n *\n * @param pool the preallocted pool that we want to init.\n * @param nb the number of entries to be allocated.\n * @param type the type of the entries to be allocated.\n * @param name_next the name of the field, in structure of type 'type' which pointer to the next entry.\n *\n * @return  nothing (this is a macro), but pool will be NULL if an error occures.\n *\n */\n\n#define STUFF_PREALLOC(_pool, _nb, _type, name_next, memstats)            \\\ndo {                                                                      \\\n  int _i = 0 ;                                                            \\\n  unsigned int _prefered = 0 ;                                            \\\n                                                                          \\\n  _prefered = GetPreferedPool(_nb, sizeof(_type));                        \\\n  _pool= NULL ;                                                           \\\n                                                                          \\\n  if ((_pool = (_type *)MemCalloc(_prefered, sizeof(_type))) != NULL) {   \\\n      memstats.nb_prealloc += _prefered ;                                 \\\n      for (_i = 0 ; _i < _prefered ; _i++) {                              \\\n          if (_i != _prefered - 1)                                        \\\n            _pool[_i].name_next = &(_pool[_i+1]) ;                        \\\n          else                                                            \\\n            _pool[_i].name_next = NULL ;                                  \\\n      }                                                                   \\\n  }                                                                       \\\n} while (0)\n\n/**\n *\n * GET_PREALLOC: Gets an entry in a preallocated pool.\n *\n * This macro is used to get an entry from a pre-allocated pool. If the pool is\n * empty, the macro STUFF_PREALLOC will be called with the same last four\n * arguments to extend the pool. If nb is set to zero during this call,\n * STUFF_PREALLOC is not called and no entry is get from the pool that is empty.\n *\n * @param entry the entry we need.\n * @param pool the preallocted pool that we want to init.\n * @param nb the number of entries to be allocated.\n * @param type the type of the entries to be allocated.\n * @param name_next the name of the field, in structure of type 'type' which\n * pointer to the next entry.\n *\n * @return  nothing (this is a macro), but entry will be NULL if an error\n * occures.\n *\n */\n#define GET_PREALLOC(entry, pool, nb, type, name_next, mutex, memstats) \\\ndo {                                                                    \\\n  pthread_mutex_lock(&mutex);                                           \\\n                                                                        \\\n  if ((pool == NULL) && (nb != 0))                                      \\\n    STUFF_PREALLOC(pool, nb, type, name_next, memstats) ;               \\\n                                                                        \\\n  if (pool != NULL) {                                                   \\\n      memstats.nb_used += 1 ;                                           \\\n      entry = pool ;                                                    \\\n      pool = entry->name_next ;                                         \\\n  }                                                                     \\\n  else                                                                  \\\n   entry = NULL ;                                                       \\\n                                                                        \\\n  pthread_mutex_unlock(&mutex);                                         \\\n} while (0)\n\n/**\n *\n * RELEASE_PREALLOC: Releases an entry and puts it back to the pool.\n *\n * When an entry is no used any more, this macro is used to put it\n * back to the pool, so that it could be reuse later. The released\n * entry is chained to the pool, through the 'name_next' field.\n *\n * @param entry the entry to be released.\n * @param pool the pool to which the entry belongs.\n * @param name_next the name of the field, in structure of type 'type' which pointer to the next entry.\n *\n * @return nothing (this is a macro).\n *\n */\n#define RELEASE_PREALLOC(entry, pool, name_next, mutex , memstats)      \\\ndo {                                                                    \\\n      pthread_mutex_lock(&mutex);                                       \\\n      entry->name_next = pool ;                                         \\\n      pool = entry ;                                                    \\\n      memstats.nb_used -= 1 ;                                           \\\n      pthread_mutex_unlock(&mutex);                                     \\\n} while (0)\n\n#else\n\n#define GET_PREALLOC(entry, pool, nb, type, name_next, mutex, memstats) \\\n        (entry = (type *)malloc(sizeof(type)))\n\n#define RELEASE_PREALLOC(entry, pool, name_next, mutex , memstats)      \\\n        (free(entry))\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/include/RW_Lock.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file RW_Lock.h\n * \\brief This file contains the defintions of the functions and types for the\n *        RW lock management.\n */\n\n#ifndef _RW_LOCK_H\n#define _RW_LOCK_H\n\n#include <pthread.h>\n/* My habit with mutex */\n#ifndef P\n#ifndef DEBUG\n#define P(mutex) pthread_mutex_lock(&mutex)\n#define V(mutex)  pthread_mutex_unlock(&mutex)\n#else\n#define P(mutex){ int rc ; \\\n        if((rc = pthread_mutex_lock(&mutex)) != 0) \\\n            printf(\"  --> Erreur P: %d %d\\n\", rc, errno) ;}\n#define V(mutex){ int rc ; \\\n        if((rc = pthread_mutex_unlock(&mutex)) != 0) \\\n            printf(\"  --> Erreur V: %d %d\\n\", rc, errno) ;}\n#endif\n#endif\n\n/* Type representing the lock itself */\ntypedef struct _RW_LOCK {\n    unsigned int    nbr_active;\n    unsigned int    nbr_waiting;\n    unsigned int    nbw_active;\n    unsigned int    nbw_waiting;\n    pthread_mutex_t mutexProtect;\n    pthread_cond_t  condWrite;\n    pthread_cond_t  condRead;\n    pthread_mutex_t mcond;\n} rw_lock_t;\n\nint rw_lock_init(rw_lock_t *plock);\nint rw_lock_destroy(rw_lock_t *plock);\nint P_w(rw_lock_t *plock);\nint V_w(rw_lock_t *plock);\nint P_r(rw_lock_t *plock);\nint V_r(rw_lock_t *plock);\nint rw_lock_downgrade(rw_lock_t *plock);\n\n#endif /* _RW_LOCK */\n"
  },
  {
    "path": "src/include/chglog_reader.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file    chglog_reader.h\n * \\author  Th. Leibovici\n * \\brief   Interface for Lustre MDT Changelog processing.\n */\n\n/**\n * \\addtogroup CHANGE_LOGS\n * @{\n */\n#ifndef _CHGLOG_READER_H\n#define _CHGLOG_READER_H\n\n#include \"config_parsing.h\"\n#include \"rbh_const.h\"\n#include \"list_mgr.h\"\n#include <stdbool.h>\n\n#define MDT_NAME_MAX  32\n#define READER_ID_MAX 16\n\ntypedef struct mdt_def_t {\n    char mdt_name[MDT_NAME_MAX];\n    char reader_id[READER_ID_MAX];\n} mdt_def_t;\n\n/** Configuration for ChangeLog reader Module */\ntypedef struct chglog_reader_config_t {\n    /** List of MDTs (used for opening ChangeLogs) */\n    mdt_def_t *mdt_def;\n    unsigned int mdt_count;\n\n    /* nbr of changelog records to be agregated for llapi_changelog_clear() */\n    int batch_ack_count;\n\n    bool force_polling;\n    time_t polling_interval;\n\n    /* Maximum number of operations to keep in the internal queue. */\n    int queue_max_size;\n\n    /* Age of the opration we keep in the internal queue before we\n     * push them to thepipeline. */\n    time_t queue_max_age;\n\n    /* Interval at which we have to check whether operation in the\n     * internal queue have aged. */\n    time_t queue_check_interval;\n\n    /* Max delay to update last committed changelog record */\n    time_t commit_update_max_delay;\n\n    /* Max delta of record id to update last committed changelog record */\n    int64_t commit_update_max_delta;\n\n    /* Options suported by the MDS. LU-543 and LU-1331 are related to\n     * events in changelog, where a rename is overriding a destination\n     * file. */\n    bool mds_has_lu543;\n    bool mds_has_lu1331;\n\n} chglog_reader_config_t;\n\n/** start ChangeLog Readers\n * \\param mdt_index -1 for all\n */\nint cl_reader_start(run_flags_t flags, int mdt_index);\n\n/** terminate ChangeLog Readers */\nint cl_reader_terminate(void);\n\n/** wait for ChangeLog Readers termination */\nint cl_reader_wait(void);\n\n/** Release last changelog records, and dump the final stats. */\nint cl_reader_done(void);\n\n/** dump changelog processing stats */\nint cl_reader_dump_stats(void);\n\n/** store changelog stats to db.\n * Must be called after cl_reader_dump_stats() as\n * cl_reader_store_stats() updates last_report time\n */\nvoid cl_reader_store_stats(lmgr_t *lmgr);\n\n/** config handlers */\nextern mod_cfg_funcs_t cl_reader_cfg_hdlr;\n\n#endif\n\n/** @} */\n"
  },
  {
    "path": "src/include/config_parsing.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file config_parsing.h\n * \\author Th. Leibovici\n *\n * \\brief High-level functions for browsing syntax tree.\n */\n/**\n * \\addtogroup CONFIG_PARSING\n * @{\n */\n#ifndef _CONFIG_PARSING_H\n#define _CONFIG_PARSING_H\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <sys/types.h>\n#include <stdbool.h>\n\n/* opaque type */\ntypedef caddr_t config_file_t;\ntypedef caddr_t config_item_t;\n\ntypedef enum {\n    CONFIG_ITEM_BLOCK = 1,\n    CONFIG_ITEM_VAR,\n    CONFIG_ITEM_BOOL_EXPR,\n    CONFIG_ITEM_SET\n} config_item_type;\n\n/**\n * Reads the content of a configuration file and\n * stores it in a memory structure.\n * \\return NULL on error.\n */\nconfig_file_t rh_config_ParseFile(char *file_path);\n\n/**\n * If config_ParseFile returns a NULL pointer,\n * config_GetErrorMsg returns a detailed message\n * to indicate the reason for this error.\n */\nchar *rh_config_GetErrorMsg(void);\n\n/**\n * config_Print:\n * Print the content of the syntax tree\n * to a file.\n */\nvoid rh_config_Print(FILE *output, config_file_t config);\n\n/** Free the memory structure that store the configuration. */\nvoid rh_config_Free(config_file_t config);\n\n/**\n * Indicates how many main blocks are defined into the config file.\n * \\return A positive value if no error.\n *         Else return a negative error code.\n */\nint rh_config_GetNbBlocks(config_file_t config);\n\n/**\n * Retrieves a given block from the config file, from its index\n */\nconfig_item_t rh_config_GetBlockByIndex(config_file_t config,\n                                        unsigned int block_no);\n\n/**\n * Return the name of a block\n */\nchar *rh_config_GetBlockName(config_item_t block);\n\n/**\n * Return the block identifier, if it exists\n */\nchar *rh_config_GetBlockId(config_item_t block);\n\n/**\n * Indicates how many items are defined in a block\n */\nint rh_config_GetNbItems(config_item_t block);\n\n/**\n * Count how many items with the given name are defined in a block\n */\nint rh_config_CountItemNames(config_item_t block, const char *name);\n\n/**\n * Count how many blocks with the given name are in config file\n */\nint rh_config_CountBlockNames(config_file_t cfg, const char *name);\n\n/**\n * Retrieves an item from a given block and the subitem index.\n */\nconfig_item_t rh_config_GetItemByIndex(config_item_t block,\n                                       unsigned int item_no);\n\n/**\n * Indicates which type of item it is\n */\nconfig_item_type rh_config_ItemType(config_item_t item);\n\n/* indicates which type of block content */\nconfig_item_type rh_config_ContentType(config_item_t block);\n\n/**\n * Retrieves a key-value peer from a CONFIG_ITEM_VAR\n */\nint rh_config_GetKeyValue(config_item_t item,\n                          char **var_name, char **var_value,\n                          int *have_extra_args);\n\n/**\n * Returns the number of arguments\n */\nint rh_config_GetExtraArgs(config_item_t item, char ***p_extra_arg_array);\n\n/**\n * Returns a block or variable with the specified name. This name can be \"BLOCK::SUBBLOCK::SUBBLOCK\".\n *\n * @param[in,out] ensure_unique Pointer to boolean. If set to true in input,\n *                              the function checks the item name is unique.\n *                              If it is not, the boolean is set to false in\n *                              output and the conflicting item is returned\n *                              (second item found).\n */\nconfig_item_t rh_config_FindItemByName(config_file_t config, const char *name,\n                                       bool *ensure_unique);\n\n/**\n * Directly returns the value of the key with the specified name.\n * This name can be \"BLOCK::SUBBLOCK::SUBBLOCK::VARNAME\".\n *\n * @param[in,out] ensure_unique Pointer to boolean. If set to true in input,\n *                              the function checks the item name is unique.\n *                              If it is not, the boolean is set to false in\n *                              output and the conflicting item is returned\n *                              (second item found).\n */\nchar *rh_config_FindKeyValueByName(config_file_t config, const char *key_name,\n                                   bool *ensure_unique);\n\n/**\n * Returns a block or variable with the specified name from the given block.\n *\n * @param[in,out] ensure_unique Pointer to boolean. If set to true in input,\n *                              the function checks the item name is unique.\n *                              If it is not, the boolean is set to false in\n *                              output and the conflicting item is returned\n *                              (second item found).\n */\nconfig_item_t rh_config_GetItemByName(config_item_t block, const char *name,\n                                      bool *ensure_unique);\n\n/**\n * Directly returns the value of the key with the specified name\n * relative to the given block.\n *\n * @param[in,out] ensure_unique Pointer to boolean. If set to true in input,\n *                              the function checks the item name is unique.\n *                              If it is not, the boolean is set to false in\n *                              output and the conflicting item is returned\n *                              (second item found).\n */\nchar *rh_config_GetKeyValueByName(config_item_t block, const char *key_name,\n                                  bool *ensure_unique);\n\n/**\n * Get item line\n */\nint rh_config_GetItemLine(config_item_t item);\n\n/**\n * Check if the item has been read\n * (to check for unknown parameters).\n */\nbool rh_config_IsRead(config_item_t item);\n\n#endif\n\n/**\n * @}\n */\n"
  },
  {
    "path": "src/include/db_schema.def",
    "content": "%header\n\n/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file   lustre_hsm_types.h\n * \\brief  Defines data types for Lustre-HSM purpose.\n */\n\n#include <sys/types.h>\n#include <sys/param.h>\n#include <errno.h>\n#include <string.h>\n#include \"rbh_types.h\"\n#include \"global_config.h\"\n\n#ifdef _LUSTRE\n#include \"lustre_extended_types.h\"\n#endif\n\n#if (defined (_LUSTRE) && defined(_HAVE_FID))\n\n#define FID_PK\ntypedef lustre_fid entry_id_t;\n\n#define entry_id_equal( _p_id_1, _p_id_2 ) ( ((_p_id_1)->f_seq == (_p_id_2)->f_seq) && ((_p_id_1)->f_oid == (_p_id_2)->f_oid)  )\n\n#else\n\n#define INUM_PK\ntypedef struct __entry_id__\n{\n  uint64_t fs_key;\n  ino_t inode;\n} entry_id_t;\n\n#define entry_id_equal( _p_id_1, _p_id_2 ) ( ((_p_id_1)->fs_key == (_p_id_2)->fs_key) && ((_p_id_1)->inode == (_p_id_2)->inode)  )\n\n#endif\n\nstatic int lmgr_gen_depth( void * tgt, const void * src )\n{\n    const char * fullpath = (char*)src;\n    unsigned int * p_depth = (unsigned int *)tgt;\n\n    const char     *curr;\n    unsigned int   nb1;\n    unsigned int   nb2;\n    /* depth = number of '/' - 1 - depth of root fs.\n     * E.g.: root=\"/mnt/lustre\", path=\"/mnt/lustre/dir/foo\", depth=4-2-1=1\n     */\n\n    nb1 = 0;\n    curr = global_config.fs_path;\n    while ( ( curr = strchr( curr, '/' ) ) )\n    {\n        curr++;\n        nb1++;\n    }\n\n    nb2 = 0;\n    curr = fullpath;\n    while ( ( curr = strchr( curr, '/' ) ) )\n    {\n        curr++;\n        nb2++;\n    }\n\n    (*p_depth) = nb2 - nb1 - 1;\n\n   return 0;\n}\n\ntypedef union {\n    int  num;\n    char txt[RBH_LOGIN_MAX];\n} uidgid_u;\n \n%attrdef\n\n# /!\\ Entry attribute type must fit with DB type\n# name,\tC type,\tdb type, size (C and db), flags (INIT_ONLY, FREQ_ACCESS, ANNEX_INFO, DIR_ATTR or SLINK_ATTR)\n\n# path information\nparent_id,      entry_id_t,    DB_ID,          0, FREQ_ACCESS | INDEXED | DNAMES\nname,           char, DB_TEXT, RBH_NAME_MAX, FREQ_ACCESS | DNAMES\npath_update,    unsigned int, DB_UINT, 0, FREQ_ACCESS | DNAMES\n\n# built from NAMES table\nfullpath,   char, DB_TEXT, RBH_PATH_MAX, FUNC_ATTR | DNAMES\n\n#generated from fullpath\ndepth, \tunsigned int, DB_UINT, 0, GENERATED, fullpath, lmgr_gen_depth\n\n# directory meta-attributes\ndircount,       unsigned int,  DB_UINT,        0, DIR_ATTR, type, NULL\navgsize,        uint64_t,      DB_BIGUINT,     0, DIR_ATTR, type, NULL\n\n# info in main table\n# POSIX attrs\nuid,        uidgid_u,   DB_UIDGID,  0,   FREQ_ACCESS\ngid,        uidgid_u,   DB_UIDGID,  0,   FREQ_ACCESS\nprojid,  unsigned int,   DB_UINT,    0, FREQ_ACCESS\nsize,       uint64_t,   DB_BIGUINT, 0,   FREQ_ACCESS\nblocks,     uint64_t,   DB_BIGUINT, 0,   FREQ_ACCESS\ncreation_time,  unsigned int,   DB_UINT,    0, FREQ_ACCESS\nlast_access, \tunsigned int, DB_UINT, 0, FREQ_ACCESS\nlast_mod, \tunsigned int, DB_UINT, 0, FREQ_ACCESS\nlast_mdchange, unsigned int,   DB_UINT, 0,   FREQ_ACCESS\ntype,\t    \tchar,   DB_ENUM_FTYPE, 32, FREQ_ACCESS\nmode,       unsigned short, DB_USHORT, 0, FREQ_ACCESS\nnlink,      unsigned int, DB_UINT, 0, FREQ_ACCESS\n# metadata update\nmd_update,\t unsigned int, DB_UINT, 0, FREQ_ACCESS\n# FIXME only for POSIX or Lustre 1.8\ninvalid, \t bool, DB_BOOL, 0, FREQ_ACCESS\n\n# fileclasses and last update\nfileclass,        char,           DB_TEXT,   1024,   FREQ_ACCESS | SEPD_LIST\nclass_update,     unsigned int,   DB_UINT,      0,   FREQ_ACCESS\n\n# info that is set once for each file\nstripe_info, \tstripe_info_t,  DB_STRIPE_INFO, 0,   INIT_ONLY\nstripe_items,   stripe_items_t, DB_STRIPE_ITEMS, 0,  INIT_ONLY\n\n# link content in annex info\nlink,          char, DB_TEXT, RBH_PATH_MAX,     ANNEX_INFO | SLINK_ATTR\n\n#only for removed entries (read-only)\nrm_time, unsigned int, DB_UINT, 0, REMOVED\n"
  },
  {
    "path": "src/include/db_types.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file   db_types.h\n * \\author Th. Leibovici\n * \\brief  This file defines database specific types.\n */\n/**\n * \\addtogroup LIST_MANAGER\n * @{\n */\n#ifndef _DB_TYPES_H\n#define _DB_TYPES_H\n\n#include \"rbh_const.h\"\n#include <stdbool.h>\n\n#ifdef _MYSQL\n\n#define DB_ENGINE_NAME    \"MySQL\"\n\n#include <mysql/mysql.h>\n\ntypedef MYSQL       db_conn_t;\ntypedef MYSQL_RES  *result_handle_t;\n\n/** specific database configuration */\ntypedef struct db_config_t {\n    char server[256];\n    char db[256];\n    char user[256];\n    char password[256];\n    int port;\n    char socket[RBH_PATH_MAX];\n    char engine[1024];\n    char tokudb_compression[50];\n} db_config_t;\n\n#elif defined(_SQLITE)\n\n#define DB_ENGINE_NAME    \"SQLite\"\n\n#include <sqlite3.h>\n\ntypedef sqlite3 *db_conn_t;\n\ntypedef struct result_handle_t {\n    char            **result_array;\n    unsigned int      curr_row;\n    int               nb_rows;\n    int               nb_cols;\n} result_handle_t;\n\ntypedef struct db_config_t {\n    char         filepath[RBH_PATH_MAX];\n    unsigned int retry_delay_microsec;  /* retry time when busy */\n} db_config_t;\n\n#else\n#error \"No database type was specified\"\n#endif\n\n#endif\n\n/**\n * @}\n */\n"
  },
  {
    "path": "src/include/entry_proc_hash.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2013 CEA/DAM\n * Copyright (C) 2013 Cray Inc.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Misc tools for managing entry processor pipeline\n */\n\n#ifndef _ENTRY_PROC_HASH_H\n#define _ENTRY_PROC_HASH_H\n\n#include <glib.h>\n\n/* A hash table slot. */\nstruct id_hash_slot {\n    pthread_mutex_t      lock;\n    struct rh_list_head  list;   /* list of ops */\n    unsigned int         count;\n};\n\n/* A hash table. */\nstruct id_hash {\n    unsigned int         hash_size;\n    struct id_hash_slot  slot[];\n};\n\n/** Return a suitable hash table size for the given entry count */\nunsigned int max_count_to_hash_size(unsigned int max_count);\n\n/**\n * Creates a new hash table for operation entries.\n * @return the new hash table.\n */\nstruct id_hash *id_hash_init(const unsigned int hash_size, bool use_lock);\n\n/* display stats about the hash */\nvoid id_hash_stats(struct id_hash *id_hash, const char *log_str);\n\n/* dump all values in the hash */\nvoid id_hash_dump(struct id_hash *id_hash, bool parent);\n\n/**\n * Murmur3 uint64 finalizer\n * from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp\n */\nstatic inline uint64_t __hash64(uint64_t k)\n{\n    k ^= k >> 33;\n    k *= 0xff51afd7ed558ccdLLU;\n    k ^= k >> 33;\n    k *= 0xc4ceb9fe1a85ec53LLU;\n    k ^= k >> 33;\n    return k;\n}\n\nstatic inline uint64_t id_hash64(const entry_id_t *p_id)\n{\n#ifdef FID_PK\n    return __hash64(p_id->f_seq ^ p_id->f_oid);\n#else\n    return __hash64(p_id->fs_key ^ p_id->inode);\n#endif\n}\n\nstatic inline unsigned int hash_id(const entry_id_t *p_id, unsigned int modulo)\n{\n    return id_hash64(p_id) % modulo;\n}\n\nstatic inline unsigned int hash_name(const entry_id_t *p_id,\n                                     const char *name, unsigned int modulo)\n{\n    return (id_hash64(p_id) ^ g_str_hash(name)) % modulo;\n}\n\n/* return a slot pointer. */\nstatic inline struct id_hash_slot *get_hash_slot(struct id_hash *id_hash,\n                                                 const entry_id_t *p_id)\n{\n    return &id_hash->slot[hash_id(p_id, id_hash->hash_size)];\n}\n\n/* return a slot pointer. */\nstatic inline struct id_hash_slot *get_name_hash_slot(struct id_hash *name_hash,\n                                                      const entry_id_t *\n                                                      parent_id,\n                                                      const char *name)\n{\n    return &name_hash->slot[hash_name(parent_id, name, name_hash->hash_size)];\n}\n\n#endif\n"
  },
  {
    "path": "src/include/entry_processor.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file   entry_processor.h\n * \\author Th. Leibovici\n * \\brief  This file describes entry processor types and calls.\n */\n/**\n * \\addtogroup ENTRY_PROCESSOR\n * @{\n */\n\n#ifndef _ENTRY_PROC_H\n#define _ENTRY_PROC_H\n\n#include \"list.h\"\n#include \"config_parsing.h\"\n#include \"rbh_boolexpr.h\"\n#include <stdint.h>\n#include \"list_mgr.h\"\n\n/* Tag for Logs */\n#define ENTRYPROC_TAG   \"EntryProc\"\n\n/* === pipeline stage flags ===  */\n\n/* parallelism */\n#define STAGE_FLAG_SEQUENTIAL    0x00000001\n#define STAGE_FLAG_FORCE_SEQ     0x00000002 /* temporarily force sequential */\n#define STAGE_FLAG_MAX_THREADS   0x00000004 /* hard coded limit */\n#define STAGE_FLAG_PARALLEL      0x00000008 /* if set, refer to max threads in\n                                               configuration */\n\n/* synchronism */\n#define STAGE_FLAG_SYNC          0x00000010\n#define STAGE_FLAG_ASYNC         0x00000020\n\n/* constraint for entries with same ID */\n#define STAGE_FLAG_ID_CONSTRAINT 0x00000100\n\n/* === common types === */\n\n/* forward declaration */\nstruct entry_proc_op_t;\n\n/**\n * Definition of pipeline stage functions\n */\ntypedef int (*step_function_t) (struct entry_proc_op_t *, lmgr_t *lmgr);\ntypedef int (*step_batch_function_t) (struct entry_proc_op_t **, int count,\n                                      lmgr_t *lmgr);\n\n/**\n * Callback to check if 2 operations can be batched together.\n */\ntypedef bool(*test_batchable_func_t) (struct entry_proc_op_t *,\n                                      struct entry_proc_op_t *,\n                                      attr_mask_t *full_mask);\n\n/**\n * Definition of a pipeline stage\n */\ntypedef struct pipeline_stage_t {\n    unsigned int        stage_index;      /**< index of this puipeline stage */\n    const char         *stage_name;       /**< name of this pipeline stage */\n    step_function_t     stage_function;   /**< function for performing the stage */\n    step_batch_function_t stage_batch_function;  /**< function for performing\n                                            the stage with operation batching */\n    test_batchable_func_t test_batchable; /**< function to check if 2 operations\n                                             are batchable for the batch step */\n    int                 stage_flags;      /**< stage qualifiers */\n    unsigned int        max_thread_count; /**< 0 = UNLIMITED */\n\n} pipeline_stage_t;\n\n/**\n * This structure indicates pipeline steps index and limits\n */\ntypedef struct pipeline_descr_t {\n    unsigned int    stage_count;\n    unsigned int    GET_ID;\n    unsigned int    GET_INFO_DB;\n    unsigned int    GET_INFO_FS;\n    unsigned int    GC_OLDENT;\n    unsigned int    DB_APPLY;\n} pipeline_descr_t;\n\n/* pipeline currently in operation */\nextern pipeline_stage_t *entry_proc_pipeline;\nextern pipeline_descr_t  entry_proc_descr;\nextern void             *entry_proc_arg;    /* pipeline specific arguments */\n\n/* include purpose specific pipeline definitions:\n * These includes MUST define:\n * - pipeline_stage_t entry_proc_pipeline[] array\n * - PIPELINE_STAGE_COUNT\n * - op_extra_info_t type\n */\n#include \"pipeline_types.h\"\n\n/**\n * callback function definition\n * @param struct entry_proc_op_t * : the structure associated to the entry\n * @param void * : InfoCollector specific parameter\n */\ntypedef int (*callback_func_t) (lmgr_t *, struct entry_proc_op_t *, void *);\n\n/**\n * function prototype for freeing  extra_info\n */\ntypedef void (*free_func_t) (void *);\n\n/** operation submitted to the pipeline */\ntypedef struct entry_proc_op_t {\n    /** current stage in pipeline */\n    unsigned int    pipeline_stage;\n\n    /* what is set in this structure ? */\n    unsigned int    entry_id_is_set:1;\n    unsigned int    db_attr_is_set:1;\n    unsigned int    fs_attr_is_set:1;\n    unsigned int    extra_info_is_set:1;\n\n    /* entry exists in db */\n    unsigned int    db_exists:1;\n\n    /* internal flag for pipeline management */\n    unsigned int    being_processed:1;\n    unsigned int    id_is_referenced:1;\n    unsigned int    name_is_referenced:1;\n\n    /* fid needs to be retrieved from db. This is a workaround for\n     * Lustre servers that do not have LU-543. */\n    unsigned int    get_fid_from_db:1;\n\n    /* for changelog unlink record only, determine in pipeline if file\n     * is last and must be completely removed from DB. This is a\n     * workaround for Lustre servers that do not have LU-1331\n     * (extended records/CLF_RENAME_LAST). */\n    unsigned int    check_if_last_entry:1;\n\n    /* for pipeline flush: indicate if not seen entries must be cleaned */\n    unsigned int    gc_entries:1;\n    /* for pipeline flush: indicate if not seen paths must be cleaned\n     * (preserve entries). Used for partial scans. */\n    unsigned int    gc_names:1;\n\n    operation_type_e db_op_type;\n    callback_func_t callback_func;\n    void           *callback_param;\n\n    /* === Entry information === */\n    entry_id_t      entry_id;\n\n    /* list of attrs to be retrieved from DB */\n    attr_mask_t     db_attr_need;\n    /* list of attrs to be retrieved from FS */\n    attr_mask_t     fs_attr_need;\n\n    /* attrs from DB (cached) */\n    attr_set_t      db_attrs;\n    /* attrs from FS (new) */\n    attr_set_t      fs_attrs;\n    /* true if the striping in DB is up-to-date (do not require a DB update)*/\n    bool            db_stripe_ok;\n\n    op_extra_info_t extra_info;\n    free_func_t     extra_info_free_func;\n\n    /* ========================= */\n\n    union {\n        time_t      changelog_inserted;  /* used by changelog reader */\n        struct      timeval start_processing_time;   /* used by pipeline */\n    } timestamp;\n\n    /* double chained list for pipeline */\n    struct rh_list_head list;\n\n    /* double chained list for hash storage (used by constraint on id) */\n    struct rh_list_head id_hash_list;\n\n    /* double chained list for hash storage (used by constraint on parent/name)\n     */\n    struct rh_list_head name_hash_list;\n\n} entry_proc_op_t;\n\n/* test attribute from filesystem, or else from DB */\n#define ATTR_FSorDB_TEST(_entry_op_p, _attr) \\\n        (ATTR_MASK_TEST(&(_entry_op_p)->fs_attrs, _attr) || \\\n         ATTR_MASK_TEST(&(_entry_op_p)->db_attrs, _attr))\n\n/* get attribute from filesystem, or else from DB */\n#define ATTR_FSorDB(_entry_op_p, _attr) \\\n        (ATTR_MASK_TEST(&(_entry_op_p)->fs_attrs, _attr) ? \\\n         ATTR(&(_entry_op_p)->fs_attrs, _attr) :           \\\n         ATTR(&(_entry_op_p)->db_attrs, _attr))\n\n#define NEED_ANYSTATUS(_op) ((_op)->fs_attr_need.status != 0)\n#define NEED_GETSTATUS(_op, _i) ((_op)->fs_attr_need.status & SMI_MASK(_i))\n#define NEED_GETSTRIPE(_op) ((_op)->fs_attr_need.std & \\\n                             (ATTR_MASK_stripe_info | ATTR_MASK_stripe_items))\n#define NEED_GETPATH(_op) ((_op)->fs_attr_need.std & \\\n                            (ATTR_MASK_fullpath | ATTR_MASK_name | \\\n                             ATTR_MASK_parent_id | ATTR_MASK_depth))\n#define NEED_GETATTR(_op) ((_op)->fs_attr_need.std & POSIX_ATTR_MASK)\n#define NEED_READLINK(_op) ((_op)->fs_attr_need.std & ATTR_MASK_link)\n#define NEED_GETPROJID(_op) ((_op)->fs_attr_need.std & ATTR_MASK_projid)\n\n/** config handlers */\nextern mod_cfg_funcs_t entry_proc_cfg_hdlr;\n\n/* ===== entry processor calls ===== */\n\n/**\n *  Initialize entry processor pipeline\n */\nint EntryProcessor_Init(pipeline_flavor_e flavor, run_flags_t flags, void *arg);\n\n/**\n * Terminate EntryProcessor\n * \\param flush_ops: wait the queue to be flushed\n */\nint EntryProcessor_Terminate(bool flush_ops);\n\n/**\n * This function adds a new operation to the queue\n */\nvoid EntryProcessor_Push(entry_proc_op_t *p_entry);\n\n/**\n * Advise that the entry is ready for next step of the pipeline.\n * @param next_stage The next stage to be performed for this entry\n * @param remove This flag indicates that the entry must be removed\n *        from pipeline (basically after the last step).\n */\nint EntryProcessor_Acknowledge(entry_proc_op_t *p_op,\n                               unsigned int next_stage, bool remove);\n\n/** Acknowledge a batch of operations */\nint EntryProcessor_AcknowledgeBatch(entry_proc_op_t **p_op, unsigned int count,\n                                    unsigned int next_stage, bool remove);\n\n/**\n * Set entry id.\n */\nstatic void inline EntryProcessor_SetEntryId(entry_proc_op_t *p_op,\n                                             const entry_id_t *p_id)\n{\n    p_op->entry_id_is_set = 1;\n    p_op->entry_id = *p_id;\n\n    /* @TODO: remember this reference about this entry id (to check id\n     * constraints) */\n}\n\n/**\n *  Returns a clean new entry_proc_op_t structure.\n */\nentry_proc_op_t *EntryProcessor_Get(void);\n\n/**\n * Release an entry op.\n */\nvoid EntryProcessor_Release(entry_proc_op_t *p_op);\n\n/**\n * Dump info about pipeline stages\n */\nvoid EntryProcessor_DumpCurrentStages(void);\n\n/**\n * Unblock processing in a stage.\n */\nvoid EntryProcessor_Unblock(int stage);\n\n#endif\n/**\n * @}\n */\n"
  },
  {
    "path": "src/include/fs_scan_main.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file    fs_scan_main.h\n * \\author  Th. Leibovici\n * \\brief   Interface for FS Scan info provider.\n */\n\n/**\n * \\addtogroup FS_SCAN\n * @{\n */\n#ifndef _FS_SCAN_MAIN_H\n#define _FS_SCAN_MAIN_H\n\n#include \"config_parsing.h\"\n#include \"policy_rules.h\"\n#include <stdbool.h>\n\n/** start scanning module */\nint FSScan_Start(run_flags_t flags, const char *partial_root);\n\n/** terminate scanning module */\nvoid FSScan_Terminate(void);\n\n/** wait for scan termination */\nvoid FSScan_Wait(void);\n\n/** dump scan stats */\nvoid FSScan_DumpStats(void);\n\n/** store scan stats in db */\nvoid FSScan_StoreStats(lmgr_t *lmgr);\n\n/** Configuration of the FS scan Module */\ntypedef struct fs_scan_config_t {\n    /* scan options */\n\n    unsigned int    nb_threads_scan;\n    time_t          min_scan_interval;\n    time_t          max_scan_interval;\n    time_t          scan_retry_delay;\n    time_t          scan_op_timeout;\n    bool            exit_on_timeout;\n\n    /**\n     * interval of the spooler (checks for audits to be launched,\n     * thread hangs, ...) */\n    time_t          spooler_check_interval;\n\n    /** memory management */\n    unsigned        nb_prealloc_tasks;\n\n    /** ignore list (bool expr) */\n    whitelist_item_t *ignore_list;\n    unsigned int    ignore_count;\n\n    /** list of directories to scan (if different from fs_root) */\n    char          **dir_list;\n    unsigned int    dir_count;\n\n    char          **completion_command;\n\n} fs_scan_config_t;\n\n/** config handlers */\nextern mod_cfg_funcs_t fs_scan_cfg_hdlr;\n\n#endif\n\n/** @} */\n/** @} */\n"
  },
  {
    "path": "src/include/global_config.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  global_config.h\n * \\brief Global configuration parameters\n */\n#ifndef _GLB_CFG_H\n#define _GLB_CFG_H\n\n#include \"rbh_cfg.h\"\n#include \"rbh_const.h\"\n#include <sys/param.h>  /* for RBH_PATH_MAX */\n#include <stdio.h>\n#include <stdbool.h>\n\ntypedef enum {\n    FSKEY_ERROR = 0,\n    FSKEY_FSNAME,\n    FSKEY_FSID,\n    FSKEY_DEVID\n} fs_key_t;\n\n/**\n * General Robinhood configuration\n */\ntypedef struct global_config_t {\n    /* filesystem description */\n    char    fs_path[RBH_PATH_MAX];\n    char    fs_type[FILENAME_MAX];\n\n    /* lock file */\n    char    lock_file[RBH_PATH_MAX];\n\n    fs_key_t fs_key;\n\n    /* behavior flags */\n    bool    stay_in_fs;\n    bool    check_mounted;\n    bool    last_access_only_atime;\n    bool    uid_gid_as_numbers;\n\n#if defined(_LUSTRE) && defined(_MDS_STAT_SUPPORT)\n    /** Direct stat to MDS on Lustre filesystems */\n    bool    direct_mds_stat;\n#endif\n\n#ifdef _LUSTRE\n    /* Lustre project ID support */\n    bool    lustre_projid;\n#endif\n} global_config_t;\n\n/** global config structure available to all modules */\nextern global_config_t global_config;\n\n/** handlers for global config */\nextern mod_cfg_funcs_t global_cfg_hdlr;\n\n#endif\n"
  },
  {
    "path": "src/include/list.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright 2013 Cray Inc. All Rights Reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *\n * \\file    list.h\n * \\brief   Double link list implementation.\n *\n * This uses a similar API as the linux kernel, with an extra prefix to\n * avoid a name space conflict with MySQL's my_list.h.\n */\n\n#ifndef _RH_LIST_H\n#define _RH_LIST_H\n\n#include <stdlib.h>\n\nstruct rh_list_head {\n    struct rh_list_head *next;\n    struct rh_list_head *prev;\n};\n\n/* Initialize a list or an element. */\nstatic inline void rh_list_init(struct rh_list_head *head)\n{\n    head->next = head;\n    head->prev = head;\n}\n\n/* Add to the head. */\nstatic inline void rh_list_add(struct rh_list_head *l,\n                               struct rh_list_head *head)\n{\n    head->next->prev = l;\n    l->next = head->next;\n    l->prev = head;\n    head->next = l;\n}\n\n/* Add to the tail. */\nstatic inline void rh_list_add_tail(struct rh_list_head *l,\n                                    struct rh_list_head *head)\n{\n    head->prev->next = l;\n    l->next = head;\n    l->prev = head->prev;\n    head->prev = l;\n}\n\n/* Remove from a list. */\nstatic inline void rh_list_del(struct rh_list_head *l)\n{\n    l->next->prev = l->prev;\n    l->prev->next = l->next;\n}\n\n/* Remove from a list and re-initialize. */\nstatic inline void rh_list_del_init(struct rh_list_head *l)\n{\n    rh_list_del(l);\n    l->next = NULL;\n    l->prev = NULL;\n}\n\n/* Add list2 after list1. list2 must not be empty. */\nstatic inline void rh_list_splice_tail(struct rh_list_head *list1,\n                                       const struct rh_list_head *list2)\n{\n    struct rh_list_head *last = list1->prev;\n\n    list1->prev = list2->prev;\n    last->next = list2->next;\n\n    list1->prev->next = list1;\n    last->next->prev = last;\n}\n\n/* Cut list1 from the first entry up to, and including, the\n * position. Store the result in list2. */\nstatic inline void rh_list_cut_head(struct rh_list_head *list1,\n                                    struct rh_list_head *pos,\n                                    struct rh_list_head *list2)\n{\n    list2->next = list1->next;\n    list2->prev = pos;\n\n    list1->next = pos->next;\n    list1->next->prev = list1;\n\n    list2->next->prev = list2;\n    list2->prev->next = list2;\n}\n\n/* return non-zero if the list is empty. */\nstatic inline int rh_list_empty(const struct rh_list_head *head)\n{\n    return head->next == head;\n}\n\n/* Return a pointer to the structure containing a list element. */\n#define rh_list_entry(ptr, type, member) \\\n    ((type *)((char *)(ptr) -(unsigned long)(&((type *)0)->member)))\n\n/* Return a pointer to the first entry in the list. */\n#define rh_list_first_entry(ptr, type, member) \\\n    rh_list_entry((ptr)->next, type, member)\n\n/* Return a pointer to the last entry in the list. */\n#define rh_list_last_entry(ptr, type, member) \\\n    rh_list_entry((ptr)->prev, type, member)\n\n/* Iterate over a list. l is the cursor. */\n#define rh_list_for_each_entry(l, head, member)              \\\n    for (l = rh_list_entry((head)->next, typeof(*l), member); \\\n         &l->member != (head);                               \\\n         l = rh_list_entry(l->member.next, typeof(*l), member))\n\n/* Iterate over a list. l is the cursor. */\n#define rh_list_for_each_entry_reverse(l, head, member)         \\\n    for (l = rh_list_entry((head)->prev, typeof(*l), member);   \\\n         &l->member != (head);                                  \\\n         l = rh_list_entry(l->member.prev, typeof(*l), member))\n\n/* Iterate over a list in reverse. l is the cursor, tmp stores the\n * next entry. l can be removed during the iteration. */\n#define rh_list_for_each_entry_safe_reverse(l, tmp, head, member)       \\\n    for (l = rh_list_entry((head)->prev, typeof(*l), member),           \\\n             tmp = rh_list_entry(l->member.prev, typeof(*l), member);   \\\n         &l->member != (head);                                          \\\n         l = tmp,                                                       \\\n             tmp = rh_list_entry(l->member.prev, typeof(*l), member))\n\n/* Iterate in a list starting after any element in it. */\n#define rh_list_for_each_entry_after(l, head, start, member)   \\\n    for (l =  rh_list_entry(start->member.next, typeof(*l), member); \\\n        &l->member != (head);                                  \\\n        l = rh_list_entry(l->member.next, typeof(*l), member))\n\n#endif /* _RH_LIST_H */\n"
  },
  {
    "path": "src/include/list_mgr.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file       list_mgr.h\n * \\brief      List Manager Module (interface to database).\n * \\addtogroup LIST_MANAGER\n * @{\n */\n\n#ifndef _LISTMGR_H\n#define _LISTMGR_H\n\n#include <string.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include <glib.h>\n\n/* database specific types */\n#include \"db_types.h\"\n#include \"rbh_const.h\"\n#include \"config_parsing.h\"\n#include \"assert.h\"\n\n/* error codes */\n#define DB_SUCCESS              0\n/* XXX 1: unused */\n#define DB_NOT_EXISTS           2\n#define DB_ALREADY_EXISTS       3\n#define DB_END_OF_LIST          4\n#define DB_OUT_OF_DATE          5\n#define DB_CONNECT_FAILED       6\n#define DB_REQUEST_FAILED       7\n#define DB_BUFFER_TOO_SMALL     8\n#define DB_NO_MEMORY            9\n#define DB_ATTR_MISSING        10\n#define DB_NOT_SUPPORTED       11\n#define DB_INVALID_ARG         12\n#define DB_READ_ONLY_ATTR      13\n#define DB_NOT_ALLOWED         14\n#define DB_TRG_NOT_EXISTS      15\n#define DB_DEADLOCK            16\n#define DB_BAD_SCHEMA          17\n#define DB_NEED_ALTER          18\n#define DB_RBH_SIG_SHUTDOWN    19\n\nstatic inline const char *lmgr_err2str(int err)\n{\n    switch (err) {\n    case DB_SUCCESS:\n        return \"success\";\n    case DB_NOT_EXISTS:\n        return \"entry doesn't exist\";\n    case DB_ALREADY_EXISTS:\n        return \"entry already exists\";\n    case DB_END_OF_LIST:\n        return \"end of list\";\n    case DB_OUT_OF_DATE:\n        return \"obsolete info\";\n    case DB_CONNECT_FAILED:\n        return \"connection failure\";\n    case DB_REQUEST_FAILED:\n        return \"request error\";\n    case DB_BUFFER_TOO_SMALL:\n        return \"buffer is too small\";\n    case DB_NO_MEMORY:\n        return \"out of memory\";\n    case DB_ATTR_MISSING:\n        return \"missing attribute\";\n    case DB_NOT_SUPPORTED:\n        return \"operation not supported\";\n    case DB_INVALID_ARG:\n        return \"invalid argument\";\n    case DB_READ_ONLY_ATTR:\n        return \"read-only attribute\";\n    case DB_NOT_ALLOWED:\n        return \"operation not allowed\";\n    case DB_TRG_NOT_EXISTS:\n        return \"trigger doesn't exist\";\n    case DB_DEADLOCK:\n        return \"deadlock or timeout\";\n    case DB_BAD_SCHEMA:\n        return \"invalid DB schema\";\n    case DB_NEED_ALTER:\n        return \"schema needs to be altered\";\n    case DB_RBH_SIG_SHUTDOWN:\n        return \"robinhood signal shutdown\";\n    default:\n        return \"unknown error\";\n    }\n}\n\n/* Tag in logfile */\n#define LISTMGR_TAG     \"ListMgr\"\n\ntypedef enum {\n    RS_FILE_OK      = 0, /* non-empty file/symlink can be recovered */\n    RS_FILE_DELTA   = 1,  /* file recovered at previous version */\n    RS_FILE_EMPTY   = 2,  /* empty file recovered */\n    RS_NON_FILE     = 3,    /* non-file recovered */\n    RS_NOBACKUP     = 4,    /* entry can't be recovered: no backup */\n    RS_ERROR        = 5,   /* recovery error */\n    RS_COUNT\n} recov_status_t;\n\n#define DB_IS_NULL(_p_v) (((_p_v)->type == DB_TEXT) \\\n    && ((_p_v)->value_u.val_str == NULL))\n\n#define LIST_SEP_CHAR    '+'\n#define LIST_SEP_STR     \"+\"\n/** for use in printf (in case it includes special char) */\n#define LIST_SEP_STR_ESC  LIST_SEP_STR\n\n/* String representation in database (not in config file)\n *\n * When adding a new type, fix the database enum in\n * listmgr_init.c:append_field_def() */\n#define STR_TYPE_LINK   \"symlink\"\n#define STR_TYPE_DIR    \"dir\"\n#define STR_TYPE_FILE   \"file\"\n#define STR_TYPE_CHR    \"chr\"\n#define STR_TYPE_BLK    \"blk\"\n#define STR_TYPE_FIFO   \"fifo\"\n#define STR_TYPE_SOCK   \"sock\"\n\n/* application specific types:\n * these includes MUST define:\n * - entry_id_t type\n * - entry_id_equal( entry_id_t * p_id1, entry_id_t * p_id2) macro or function\n * - entry_info_t type\n * - field_info_t field_infos[] array\n */\n#include \"db_schema.h\"\n\ntypedef union {\n    const char         *val_str;\n    int                 val_int;\n    unsigned int        val_uint;\n    short               val_short;\n    unsigned short      val_ushort;\n    long long           val_bigint;\n    unsigned long long  val_biguint;\n    bool                val_bool;\n    entry_id_t          val_id;\n} db_type_u;\n\n/** value from DB with the associated type */\ntypedef struct db_value_t {\n    db_type_e  type;\n    db_type_u  value_u;\n} db_value_t;\n\n/** table switch */\ntypedef enum {\n    TAB_MAIN,\n    TAB_ANNEX,\n    TAB_STRIPE_INFO,\n    TAB_STRIPE_ITEMS,\n    TAB_VARS,\n    TAB_IDMAP\n} db_tables_t;\n\ntypedef enum {\n    OPIDX_GET,\n    OPIDX_INSERT,\n    OPIDX_UPDATE,\n    OPIDX_RM,\n\n    OPCOUNT\n} op_idx_e;\n\n/** Connection related information for a thread */\ntypedef struct lmgr_t {\n    db_conn_t       conn;\n    unsigned int    last_commit;   /*< 0 if last operation was committed */\n    bool            force_commit;  /*< force commit on next operation */\n    unsigned int    retry_delay;   /*< current retry delay */\n    unsigned int    retry_count;   /*< nbr of retries */\n    struct timeval  first_error; /*< time of first retried error */\n    const char     *last_err_func;  /*< function of the last error */\n    int             last_err_line;  /*< line of the last error */\n\n    /* operation statistics */\n    unsigned int    nbop[OPCOUNT];\n\n} lmgr_t;\n\n/** List manager configuration */\ntypedef struct lmgr_config_t {\n    db_config_t     db_config;\n    unsigned int    commit_behavior;   /* 0: autocommit, 1: commit every\n                            transaction, <n>: commit every <n> transactions */\n    time_t connect_retry_min;   /* min retry delay when connection is lost */\n    time_t connect_retry_max;   /* max retry delay when connection is lost */\n\n    /** enable accounting */\n    bool            acct;\n} lmgr_config_t;\n\n/** config handlers */\nextern mod_cfg_funcs_t lmgr_cfg_hdlr;\n\n/** Cancel SQL retries flag.\n * Set in SIGTERM handler.\n */\nextern volatile bool lmgr_cancel_retry;\n\n/** indicate if batched requests can be done simultaneously\n * (risk of deadlock on ACCT table).\n */\nbool lmgr_parallel_batches(void);\n\n/** Container to associate an ID with its pathname. */\ntypedef struct wagon {\n    entry_id_t   id;\n    char        *fullname;\n} wagon_t;\n\n/* opaque types */\nstruct lmgr_iterator_t;\nstruct lmgr_report_t;\nstruct lmgr_profile_t;\nstruct lmgr_rm_list_t;\n\n/** Options for iterators */\ntypedef struct lmgr_iter_opt_t {\n    unsigned int list_count_max;    /* max entries to be returned by iterator or\n                                       report */\n    unsigned int force_no_acct:1;   /* don't use acct table for reports */\n    unsigned int allow_no_attr:1;   /* allow returning entries if no attr is\n                                       available */\n} lmgr_iter_opt_t;\n\n#define LMGR_ITER_OPT_INIT {.list_count_max = 0, .force_no_acct = 0, \\\n                            .allow_no_attr = 0}\n\ntypedef struct attr_mask {\n    uint32_t std;     /**< standard attribute mask */\n    uint32_t status;  /**< status attribute mask */\n    uint64_t sm_info; /**< attribute mask of status managers info */\n} attr_mask_t;\n\n/** helper to display masks */\n#define DMASK \"%#\"PRIX32\"/%#\"PRIX32\"/%#\"PRIX64\n#define PMASK(_pm) (_pm)->std, (_pm)->status, (_pm)->sm_info\n\nstatic const attr_mask_t null_mask = { 0 };\n\n/** indicate if no bit is set in the attr mask\n * get the whole mask structure, as it can be convenient\n * to test the output of attr_mask_and.\n */\nstatic inline bool attr_mask_is_null(const attr_mask_t mask)\n{\n    return (mask.std == 0 && mask.status == 0 && mask.sm_info == 0);\n}\n\n/** indicate attrs mask equals */\nstatic inline bool attr_mask_equal(const attr_mask_t *mask1,\n                                   const attr_mask_t *mask2)\n{\n    return (mask1->std == mask2->std) && (mask1->status == mask2->status)\n        && (mask1->sm_info == mask2->sm_info);\n}\n\n/** logical AND of 2 masks */\nstatic inline attr_mask_t attr_mask_and(const attr_mask_t *mask1,\n                                        const attr_mask_t *mask2)\n{\n    attr_mask_t mask_out;\n\n    mask_out.std = mask1->std & mask2->std;\n    mask_out.status = mask1->status & mask2->status;\n    mask_out.sm_info = mask1->sm_info & mask2->sm_info;\n\n    return mask_out;\n}\n\n/** remove bits from second mask */\nstatic inline attr_mask_t attr_mask_and_not(const attr_mask_t *mask1,\n                                            const attr_mask_t *mask2)\n{\n    attr_mask_t mask_out;\n\n    mask_out.std = mask1->std & ~mask2->std;\n    mask_out.status = mask1->status & ~mask2->status;\n    mask_out.sm_info = mask1->sm_info & ~mask2->sm_info;\n\n    return mask_out;\n}\n\n/** add missing bits from second mask */\nstatic inline attr_mask_t attr_mask_or_not(const attr_mask_t *mask1,\n                                           const attr_mask_t *mask2)\n{\n    attr_mask_t mask_out;\n\n    mask_out.std = mask1->std | ~mask2->std;\n    mask_out.status = mask1->status | ~mask2->status;\n    mask_out.sm_info = mask1->sm_info | ~mask2->sm_info;\n\n    return mask_out;\n}\n\n/** logical OR of 2 masks */\nstatic inline attr_mask_t attr_mask_or(const attr_mask_t *mask1,\n                                       const attr_mask_t *mask2)\n{\n    attr_mask_t mask_out;\n\n    mask_out.std = mask1->std | mask2->std;\n    mask_out.status = mask1->status | mask2->status;\n    mask_out.sm_info = mask1->sm_info | mask2->sm_info;\n\n    return mask_out;\n}\n\n/** part of attr index that contains flags */\n#define ATTR_INDEX_FLG_MASK     0xFF000000\n/** flags included to index value for status and sm_info */\n#define ATTR_INDEX_FLG_STATUS   0x01000000\n#define ATTR_INDEX_FLG_SMINFO   0x02000000\n/** specific value for getting entry count report */\n#define ATTR_INDEX_FLG_COUNT    0x04000000\n/** unspecified attribute index */\n#define ATTR_INDEX_FLG_UNSPEC   0x08000000\n\n/** convert an attribute index to the index in status array */\nstatic inline unsigned int attr2status_index(unsigned int index)\n{\n    assert(index & ATTR_INDEX_FLG_STATUS);\n    return index & ~ATTR_INDEX_FLG_STATUS;\n}\n\n/** convert an attribute index to the index in sm_info array */\nstatic inline unsigned int attr2sminfo_index(unsigned int index)\n{\n    assert(index & ATTR_INDEX_FLG_SMINFO);\n    return index & ~ATTR_INDEX_FLG_SMINFO;\n}\n\n/** Set of attributes for a FS entry */\ntypedef struct attr_set_t {\n    /** attributes in the structure */\n    attr_mask_t     attr_mask;\n    /** associated values */\n    entry_info_t    attr_values;\n} attr_set_t;\n\n/** static attr_set_t initializer.\n * First item is a structure.\n */\n#define ATTR_SET_INIT { {0} }\n\n/** initialize attr mask */\nstatic inline void ATTR_MASK_INIT(attr_set_t *p_set)\n{\n    memset(&p_set->attr_mask, 0, sizeof(p_set->attr_mask));\n}\n\n/** callback function for 'attrs_for_each'\n * the iteration stops if callback function returns < 0\n */\ntypedef int (*attr_cb_t) (unsigned int attr_index, void *args);\n\n/** iterate on all attributes */\nint attrs_for_each(attr_cb_t func, void *args);\n\n/** iterator on attr indexes\n * @param init   initial value of the iteration.\n * @param cookie must initially store -1\n * @return next iterator value, -1 when the loop ends.\n */\nint attr_index_iter(unsigned int init, int *cookie);\n\nextern unsigned int sm_inst_count;  /* defined in 'status_manager.c' */\nextern unsigned int sm_attr_count;  /* defined in 'status_manager.c' */\n\nstatic inline bool attr_mask_test_index(const attr_mask_t *p_mask,\n                                        unsigned int index)\n{\n    if (index & ATTR_INDEX_FLG_STATUS) {\n        assert(attr2status_index(index) < sm_inst_count);\n\n        /* remove the bit and test in status mask */\n        return p_mask->status & (1 << attr2status_index(index));\n    } else if (index & ATTR_INDEX_FLG_SMINFO) {\n        assert(attr2sminfo_index(index) < sm_attr_count);\n\n        /* remove the bit and test in sm_info mask */\n        return p_mask->sm_info & (1LL << attr2sminfo_index(index));\n    } else {\n        assert(index < (sizeof(p_mask->std) * CHAR_BIT));\n        /* test standard mask */\n        return p_mask->std & (1 << index);\n    }\n}\n\n#include <inttypes.h>\nstatic inline void attr_mask_set_index(attr_mask_t *mask, unsigned int index)\n{\n    if (index & ATTR_INDEX_FLG_STATUS) {\n        assert(attr2status_index(index) < sm_inst_count);\n\n        /* remove the flag and set bit in status mask */\n        mask->status |= (1 << attr2status_index(index));\n    } else if (index & ATTR_INDEX_FLG_SMINFO) {\n        assert(attr2sminfo_index(index) < sm_attr_count);\n\n        /* remove the flag and set bit in sm_info mask */\n        mask->sm_info |= (1LL << attr2sminfo_index(index));\n    } else {\n        assert(index < (sizeof(mask->std) * CHAR_BIT));\n\n        /* set standard mask */\n        mask->std |= (1 << index);\n    }\n}\n\nstatic inline void attr_mask_unset_index(attr_mask_t *mask, unsigned int index)\n{\n    if (index & ATTR_INDEX_FLG_STATUS) {\n        assert(attr2status_index(index) < sm_inst_count);\n\n        /* remove the flag and unset bit in status mask */\n        mask->status &= ~(1 << attr2status_index(index));\n    } else if (index & ATTR_INDEX_FLG_SMINFO) {\n        assert(attr2sminfo_index(index) < sm_attr_count);\n\n        /* remove the flag and unset bit in sm_info mask */\n        mask->sm_info &= ~(1LL << attr2sminfo_index(index));\n    } else {\n        assert(index < (sizeof(mask->std) * CHAR_BIT));\n\n        /* set standard mask */\n        mask->std &= ~(1 << index);\n    }\n}\n\n#define ATTR_MASK_SET(_p_set, _attr_name) \\\n        ((_p_set)->attr_mask.std |= ATTR_MASK_##_attr_name)\n#define ATTR_MASK_UNSET(_p_set, _attr_name) \\\n        ((_p_set)->attr_mask.std &= ~ATTR_MASK_##_attr_name)\n#define ATTR_MASK_TEST(_p_set, _attr_name) \\\n        !!((_p_set)->attr_mask.std & ATTR_MASK_##_attr_name)\n#define ATTR(_p_set, _attr_name) ((_p_set)->attr_values._attr_name)\n\n/* status mask is in a dedicated mask */\n#define SMI_MASK(_smi_idx)  (1 << (_smi_idx))\n#define ATTR_MASK_STATUS_SET(_p_set, _smi_idx) \\\n        ((_p_set)->attr_mask.status |= SMI_MASK(_smi_idx))\n#define ATTR_MASK_STATUS_UNSET(_p_set, _smi_idx) \\\n        ((_p_set)->attr_mask.status &= ~ SMI_MASK(_smi_idx))\n#define ATTR_MASK_STATUS_TEST(_p_set, _smi_idx) \\\n        !!((_p_set)->attr_mask.status & SMI_MASK(_smi_idx))\n#define STATUS_ATTR(_p_set, _smi_idx) \\\n        ((_p_set)->attr_values.sm_status[(_smi_idx)])\n\n/* policy specific attributes are in a dedicated mask */\n#define ATTR_MASK_INFO_SET(_p_set, _smi, _attr_idx) \\\n    ((_p_set)->attr_mask.sm_info |= smi_info_bit((_smi), (_attr_idx)))\n#define ATTR_MASK_INFO_UNSET(_p_set, _smi, _attr_idx) \\\n    ((_p_set)->attr_mask.sm_info &= ~smi_info_bit((_smi), (_attr_idx)))\n#define ATTR_MASK_INFO_TEST(_p_set, _smi, _attr_idx) \\\n    !!((_p_set)->attr_mask.sm_info & smi_info_bit((_smi), (_attr_idx)))\n#define SMI_INFO(_p_set, _smi, _attr_idx) \\\n    ((_p_set)->attr_values.sm_info[(_smi)->sm_info_offset+(_attr_idx)])\n\n#define POSIX_ATTR_MASK (ATTR_MASK_size | ATTR_MASK_blocks | ATTR_MASK_uid \\\n                         | ATTR_MASK_gid | ATTR_MASK_last_access \\\n                         | ATTR_MASK_last_mod | ATTR_MASK_type \\\n                         | ATTR_MASK_mode | ATTR_MASK_nlink \\\n                         | ATTR_MASK_last_mdchange)\n\n/** unset read-only attributes from mask */\nvoid attr_mask_unset_readonly(attr_mask_t *mask);\n\n/** comparators for filters */\ntypedef enum {\n    EQUAL,\n    NOTEQUAL,\n    LESSTHAN,\n    MORETHAN,\n    LESSTHAN_STRICT,\n    MORETHAN_STRICT,\n    LIKE,\n    UNLIKE,\n    RLIKE,\n    ILIKE,      /* case insensitive */\n    IUNLIKE,    /* case insensitive */\n    IN,\n    NOTIN,\n    ISNULL,\n    NOTNULL,\n} filter_comparator_t;\n\n/** filter values associated to db_type field in field_infos array */\ntypedef struct value_list {\n    unsigned int    count;\n    db_type_u      *values;\n} value_list_t;\n\ntypedef union filter_value {\n    db_type_u       value;\n    value_list_t    list;\n} filter_value_t;\n\n#define FV_NULL { {NULL} }\n\n/** simple filter definition */\ntypedef struct lmgr_simple_filter_t {\n    unsigned int         filter_count;\n\n    int                 *filter_flags;\n    unsigned int        *filter_index;\n    filter_comparator_t *filter_compar;\n    filter_value_t      *filter_value;\n\n    /** for internal memory management */\n    unsigned int         prealloc;\n} lmgr_simple_filter_t;\n\n/** Sort types */\ntypedef enum {\n    SORT_NONE, /**< no sorting */\n    SORT_ASC,  /**< sort from lower value to higher */\n    SORT_DESC  /**< sort from higher value to lower */\n} sort_order_t;\n\n/* needed here for defining filters, obj_type_t... */\n#include \"policy_rules.h\"\n\n/** string representation in DB */\nstatic const char *type_db_name[] = {\n    NULL,\n    STR_TYPE_LINK,\n    STR_TYPE_DIR,\n    STR_TYPE_FILE,\n    STR_TYPE_CHR,\n    STR_TYPE_BLK,\n    STR_TYPE_FIFO,\n    STR_TYPE_SOCK\n};\n\nstatic const inline char *type2db(obj_type_t type)\n{\n    if (type > TYPE_SOCK)\n        return type_db_name[TYPE_NONE];\n\n    return type_db_name[type];\n}\n\nstatic inline obj_type_t db2type(const char *str)\n{\n    obj_type_t i;\n\n    for (i = TYPE_NONE + 1; i <= TYPE_SOCK; i++) {\n        if (!strcasecmp(str, type_db_name[i]))\n            return i;\n    }\n    return TYPE_NONE;\n}\n\n/** generic filter type */\ntypedef struct lmgr_filter_t {\n    enum { FILTER_SIMPLE, FILTER_BOOLEXPR } filter_type;\n    union {\n        lmgr_simple_filter_t simple_filter;\n        struct bool_node_t *boolean_expr;   /* not supported yet */\n    } filter_u;\n\n} lmgr_filter_t;\n\n/* for cleaner code */\n#define filter_simple   filter_u.simple_filter\n#define filter_boolexpr filter_u.boolean_expr\n\n/** specifies result order */\ntypedef struct lmgr_sort_type_t {\n    unsigned int    attr_index;\n    sort_order_t    order;\n} lmgr_sort_type_t;\n\n/* -------- Main functions -------- */\n\nenum lmgr_init_flags {\n    LIF_REPORT_ONLY = (1 << 0), /**< report only, no action on DB schema */\n    LIF_ALTER_DB    = (1 << 1), /**< allow altering DB (insert/drop fields) */\n    LIF_ALTER_NODISP = (1 << 2), /**< INTERNAL USE ONLY */\n};\n\n/** Initialize the List Manager */\nint ListMgr_Init(enum lmgr_init_flags flags);\n\n/** Create a connection to the database for current thread */\nint ListMgr_InitAccess(lmgr_t *p_mgr);\n\n/** Close a connection to the database */\nint ListMgr_CloseAccess(lmgr_t *p_mgr);\n\n/**\n * Set force commit behavior.\n * Default is false;\n */\nvoid ListMgr_ForceCommitFlag(lmgr_t *p_mgr, bool force_commit);\n\n/**\n * Check if the last operation was really committed\n * @return true if the last operation has been committed,\n * @return false if commit is deferred.\n */\nbool ListMgr_GetCommitStatus(lmgr_t *p_mgr);\n\n/**\n * Tests if this entry exists in the database.\n * @param p_mgr pointer to a DB connection\n * @param p_id pointer to an entry identifier\n * @return 1 if entry exists\n * @return 0 if doesn't exist\n * @return a negative value on error\n */\nint ListMgr_Exists(lmgr_t *p_mgr, const entry_id_t *p_id);\n\n#ifdef _LUSTRE\n/**\n * Check that validator is matching for the given entry.\n * @param p_mgr pointer to a DB connection\n * @param p_id pointer to an entry identifier (including validator)\n * @param validator VALID_EXISTS, VALID_NOSTRIPE, or validator value.\n * @return DB_OUT_OF_DATE if stripe doesn't match, and remove stripe info.\n * @return DB_NOT_EXISTS  if there is no stripe info available.\n * @return DB_SUCCESS     if stripe is valid.\n */\n#define VALID_EXISTS -2 /* check if the stripe exists */\n#define VALID_NOSTRIPE -1   /* check if the stripe is empty */\nint ListMgr_CheckStripe(lmgr_t *p_mgr, const entry_id_t *p_id, int validator);\n\nvoid free_stripe_items(stripe_items_t *p_stripe_items);\n\n#endif\n\n/**\n * Retrieves an entry from database.\n */\nint ListMgr_Get(lmgr_t *p_mgr, const entry_id_t *p_id, attr_set_t *p_info);\n\n/**\n * Retrieve the FID from the database given the parent FID and the\n * file name.\n */\nint ListMgr_Get_FID_from_Path(lmgr_t *p_mgr, const entry_id_t *parent_fid,\n                              const char *name, entry_id_t *fid);\n\n/**\n * Releases resources of an attr set.\n */\nvoid ListMgr_FreeAttrs(attr_set_t *p_attrs);\n\n/**\n * Inserts a new entry to the database.\n */\nint ListMgr_Insert(lmgr_t *p_mgr, entry_id_t *p_id,\n                   attr_set_t *p_info, bool update_if_exists);\n/**\n * Insert a batch of entries into the database.\n * All entries must have the same attr mask.\n */\nint ListMgr_BatchInsert(lmgr_t *p_mgr, entry_id_t **p_ids,\n                        attr_set_t **p_attrs, unsigned int count,\n                        bool update_if_exists);\n\n/**\n * Modifies an existing entry in the database.\n */\nint ListMgr_Update(lmgr_t *p_mgr, const entry_id_t *p_id,\n                   const attr_set_t *p_update_set);\n\n/**\n * Applies a modification to all entries that match the specified filter.\n */\nint ListMgr_MassUpdate(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                       const attr_set_t *p_attr_set);\n\n/** remove callback function */\ntypedef void (*rm_cb_func_t) (const entry_id_t *);\n\n/**\n * Removes a name from the database. Remove the entry if last is true.\n */\nint ListMgr_Remove(lmgr_t *p_mgr, const entry_id_t *p_id,\n                   const attr_set_t *p_attr_set, bool last);\n\n/**\n * Removes all entries that match the specified filter.\n */\nint ListMgr_MassRemove(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                       rm_cb_func_t);\n\n/**\n * Atomically replace an entry with another, and relink childs in the namespace if needed.\n */\nint ListMgr_Replace(lmgr_t *p_mgr, entry_id_t *old_id, attr_set_t *old_attrs,\n                    entry_id_t *new_id, attr_set_t *new_attrs,\n                    bool src_is_last, bool update_target_if_exists);\n\n/**\n * Soft Rm functions.\n * \\addtogroup SOFT_RM_FUNCTIONS\n * @{\n */\n\n/**\n * Remove an entry from the main database, and insert it to secondary table\n * for delayed removal.\n * \\param p_old_attrs contains rm_time\n */\nint ListMgr_SoftRemove(lmgr_t *p_mgr, const entry_id_t *p_id,\n                       attr_set_t *p_old_attrs);\n\n/**\n * Soft remove a set of entries according to a filter.\n */\nint ListMgr_MassSoftRemove(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                           time_t rm_time, rm_cb_func_t);\n\n/**\n * Definitely remove an entry from the delayed removal table.\n */\nint ListMgr_SoftRemove_Discard(lmgr_t *p_mgr, const entry_id_t *p_id);\n\n/**\n * Initialize a list of items removed 'softly', sorted by expiration time.\n * Selecting 'expired' entries is done using an rm_time criteria in p_filter\n */\nstruct lmgr_rm_list_t *ListMgr_RmList(lmgr_t *p_mgr, lmgr_filter_t *filter,\n                                      const lmgr_sort_type_t *p_sort_type);\n\n/**\n * Get next entry to be removed.\n */\nint ListMgr_GetNextRmEntry(struct lmgr_rm_list_t *p_iter,\n                           entry_id_t *p_id, attr_set_t *p_attrs);\n\n/**\n * Releases rmlist resources.\n */\nvoid ListMgr_CloseRmList(struct lmgr_rm_list_t *p_iter);\n\n/**\n * Get entry to be removed from its fid.\n */\nint ListMgr_GetRmEntry(lmgr_t *p_mgr, const entry_id_t *p_id,\n                       attr_set_t *p_attrs);\n\n/** @} */\n\n/**\n * Create a (persitent) table to tag entries.\n * \\param filter indicate this applies to a restricted set of entries.\n * \\param reset indicate if the table is cleaned in case it already exists.\n */\nint ListMgr_CreateTag(lmgr_t *p_mgr, const char *tag_name,\n                      lmgr_filter_t *p_filter, bool reset);\n/** destroy a tag */\nint ListMgr_DestroyTag(lmgr_t *p_mgr, const char *tag_name);\n\n/**\n * Tag an entry (in the set specified by CreateTag filter)\n */\nint ListMgr_TagEntry(lmgr_t *p_mgr, const char *tag_name,\n                     const entry_id_t *p_id);\n/**\n * Return an iterator on non-tagged entries (in the set specified by CreateTag filter)\n */\nstruct lmgr_iterator_t *ListMgr_ListUntagged(lmgr_t *p_mgr,\n                                             const char *tag_name,\n                                             const lmgr_iter_opt_t *p_opt);\n\n#ifdef _HSM_LITE\n\n/**\n * Filesystem recovery from backup.\n * \\addtogroup RECOVERY_FUNCTIONS\n * @{\n */\n\ntypedef struct _lmgr_recov_stat {\n    unsigned long long  total;\n\n    /* recovery status count */\n    unsigned long long  status_count[RS_COUNT];\n    uint64_t            status_size[RS_COUNT];\n} lmgr_recov_stat_t;\n\n/* Filesystem recovery functions  */\n\n/**\n *  Initialize a recovery process.\n *  \\param p_filter[in] (optional) filter partial filesystem recovery\n *  \\retval DB_SUCCESS the recovery process successfully started;\n *          the stats indicate the recovery states we can expect.\n *  \\retval DB_ALREADY_EXISTS a recovery process already started\n *          and was not properly completed.\n *  \\retval error   another error occurred.\n */\nint ListMgr_RecovInit(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                      lmgr_recov_stat_t *p_stats);\n\n/**\n * Clear the recovery table.\n * /!\\ all previously unrecovered entries will be lost\n */\nint ListMgr_RecovReset(lmgr_t *p_mgr);\n\n/**\n *  List entries by recovery status.\n *  \\param st type of entries to be listed\n *  (done, failed, to be done, all)\n */\ntypedef enum { RT_ALL, RT_TODO, RT_DONE, RT_FAILED } recov_type_e;\nstruct lmgr_iterator_t *ListMgr_RecovList(lmgr_t *p_mgr, recov_type_e st);\n\n/**\n *  Continue a recovery process (returns an iterator on entry list),\n *  possibly using the specified filter.\n *  \\retval iterator must be release using ListMgr_CloseIterator()\n */\nstruct lmgr_iterator_t *ListMgr_RecovResume(lmgr_t *p_mgr, const char *dir_path,\n                       bool retry, /* also retry previously erroneous entries */\n                       const lmgr_iter_opt_t *p_opt);\n\n/** @param last_status last status of the entry (-1: not processed yet) */\nint ListMgr_RecovGetNext(struct lmgr_iterator_t *p_iter, entry_id_t *p_id,\n                         attr_set_t *p_info, recov_status_t *last_status);\n\nint ListMgr_RecovComplete(lmgr_t *p_mgr, lmgr_recov_stat_t *p_stats);\n\nint ListMgr_RecovStatus(lmgr_t *p_mgr, lmgr_recov_stat_t *p_stats);\n\nint ListMgr_RecovSetState(lmgr_t *p_mgr, const entry_id_t *p_id,\n                          recov_status_t status);\n\n/** @} */\n\n#elif defined(HAVE_RM_POLICY)\n/* only keep fullpath by default */\n#if 0\n#define SOFTRM_MASK (POSIX_ATTR_MASK | ATTR_MASK_fullpath | ATTR_MASK_rm_time)\nPOSIX, fullpath, fields with REMOVED flag, fields asked by status manager.\n#endif\n#endif\n/**\n * Function for handling iterators.\n *\n * \\addtogroup ITERATOR_FUNCTIONS\n * @{\n */\n/**\n * Retrieves an iterator on entries that match the given filter.\n */\nstruct lmgr_iterator_t *ListMgr_Iterator(lmgr_t *p_mgr,\n                                         const lmgr_filter_t *p_filter,\n                                         const lmgr_sort_type_t *p_sort_type,\n                                         const lmgr_iter_opt_t *p_opt);\n/**\n * Get next entry from iterator.\n */\nint ListMgr_GetNext(struct lmgr_iterator_t *p_iter,\n                    entry_id_t *p_id, attr_set_t *p_info);\n\n/**\n * Release iterator resources.\n */\nvoid ListMgr_CloseIterator(struct lmgr_iterator_t *p_iter);\n\n/** @} */\n\n/**\n * Function for handling namespace and child entries\n *\n * \\addtogroup NAMESPACE_FUNCTIONS\n * @{\n */\n/**\n * Get the list of children of a given parent (or list of parents).\n * \\param parent_list       [in]  list of parents to get the child of\n * \\param parent_count      [in]  number of ids in parent list\n * \\param child_id_list     [out] array of child ids\n * \\param child_attr_list   [out] array of child attrs\n * \\param child_count       [out] number of returned children\n *\n * ListMgr_FreeAttrs() must be called on each child attribute\n * and child_id_list and child_attr_list must be freed with MemFree()\n */\nint ListMgr_GetChild(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                     const wagon_t *parent_list, unsigned int parent_count,\n                     attr_mask_t attr_mask,\n                     wagon_t **child, attr_set_t **child_attr_list,\n                     unsigned int *child_count);\n\n/** @} */\n\n/**\n * Types and functions for building reports about FS content.\n *\n * \\addtogroup REPORT_FUNCTIONS\n * @{\n */\n\n/** type of report that can be done on each attr */\ntypedef enum {\n    /* 0 = no specific operation */\n    REPORT_MIN = 1,\n    REPORT_MAX,\n    REPORT_AVG,\n    REPORT_SUM,\n    REPORT_COUNT,   /* special: no attribute assigned */\n    REPORT_COUNT_DISTINCT,\n    REPORT_GROUP_BY\n} report_type_t;\n\n/** describe a report field */\ntypedef struct report_field_descr_t {\n    int                 attr_index; /* ATTR_INDEX_FLG_COUNT for count */\n    report_type_t       report_type;\n    sort_order_t        sort_flag;\n\n    bool                filter;   /**< is there a filter on this value ? */\n    filter_comparator_t filter_compar;\n    filter_value_t      filter_value;\n} report_field_descr_t;\n\n/* profile is based on LOG2(size)\n * -> FLOOR(LOG2(size)/5)\n */\n#define SZ_PROFIL_COUNT     10\n#define SZ_MIN_BY_INDEX(_i) (((_i) == 0) ? 0 : (1LL << (((_i) - 1) * 5)))\n/** size profile descriptor */\ntypedef struct size_range__ {\n    uint64_t  min_size;  /* max_size[i] is min_size[i+1] */\n    char     *title;\n} size_range_t;\n\nstatic const __attribute__ ((__unused__))\nsize_range_t size_range[SZ_PROFIL_COUNT] = {\n    {SZ_MIN_BY_INDEX(0), \"0\"},\n    {SZ_MIN_BY_INDEX(1), \"1~31\"},\n    {SZ_MIN_BY_INDEX(2), \"32~1K-\"},\n    {SZ_MIN_BY_INDEX(3), \"1K~31K\"},\n    {SZ_MIN_BY_INDEX(4), \"32K~1M-\"},\n    {SZ_MIN_BY_INDEX(5), \"1M~31M\"},\n    {SZ_MIN_BY_INDEX(6), \"32M~1G-\"},\n    {SZ_MIN_BY_INDEX(7), \"1G~31G\"},\n    {SZ_MIN_BY_INDEX(8), \"32G~1T-\"},\n    {SZ_MIN_BY_INDEX(9), \"+1T\"}\n};\n\n/** size profile values */\ntypedef struct size_profile__ {\n    uint64_t    file_count[SZ_PROFIL_COUNT];\n} size_profile_t;\n\ntypedef union {\n    size_profile_t  size;\n    /* TODO mtime, ... */\n} profile_u;\n\n/** describe a profile field */\ntypedef struct profile_field_descr_t {\n    unsigned int attr_index;\n\n    /* sort range ratio */\n    unsigned int range_ratio_start; /* index of selected range ratio */\n    unsigned int range_ratio_len;   /* nbr of ranges in the selected range\n                                       (0=none) */\n    sort_order_t range_ratio_sort;  /* sort order for this range */\n} profile_field_descr_t;\n\n/**\n * Builds a report from database.\n */\nstruct lmgr_report_t *ListMgr_Report(lmgr_t *p_mgr,\n        const report_field_descr_t *report_desc_array,\n        unsigned int report_descr_count,\n        const profile_field_descr_t *profile_desc,   /* optional */\n        const lmgr_filter_t *p_filter,\n        const lmgr_iter_opt_t *p_opt);\n\n/**\n * Get next report entry.\n * @param p_value_count is IN/OUT parameter. IN: size of output array. OUT: nbr\n * of fields set in array.\n */\nint ListMgr_GetNextReportItem(struct lmgr_report_t *p_iter,\n                              db_value_t *p_value, unsigned int *p_value_count,\n                              profile_u * p_profile);\n\n/**\n * Releases report resources.\n */\nvoid ListMgr_CloseReport(struct lmgr_report_t *p_iter);\n\n/**\n * Get the number of entries in DB.\n */\nint ListMgr_EntryCount(lmgr_t *p_mgr, uint64_t *count);\n\n/**\n * Retrieve profile (on size, atime, mtime, ...)\n * (by status, by user, by group, ...)\n * @param profile_descr information about the attribute to be profiled\n * @param report_descr  information about other fields of the report\n                        (field to group on and field to sort on)\n * @param report_descr_count number of items in report_descr\n */\nstruct lmgr_profile_t *ListMgr_Profile(lmgr_t *p_mgr,\n                                       const profile_field_descr_t *\n                                       profile_descr,\n                                       const report_field_descr_t *\n                                       report_descr,\n                                       unsigned int report_descr_count,\n                                       const lmgr_filter_t *p_filter,\n                                       const lmgr_iter_opt_t *p_opt);\n/**\n * Get next profile entry.\n * @param p_profile the profile structure\n * @param p_value array of values of report_descr\n * @param p_value_count is IN/OUT parameter. IN: size of output array. OUT: nbr of fields set in array.\n */\nint ListMgr_GetNextProfile(struct lmgr_profile_t *p_iter,\n                           profile_u * p_profile,\n                           db_value_t *p_value, unsigned int *p_value_count);\n\n/**\n * Releases profile resources.\n */\nvoid ListMgr_CloseProfile(struct lmgr_profile_t *p_iter);\n\n/** @} */\n\n/**\n * Persistent variable management\n *\n * \\addtogroup PERSISTENT_VARS_MGMT\n * @{\n */\n/*\n * Name of variables stored in database\n */\n#define LAST_POLICY_START_SUFFIX   \"_start\"\n#define LAST_POLICY_END_SUFFIX     \"_end\"\n#define LAST_POLICY_TRIGGER_SUFFIX \"_trigger\"  /* trigger type and target */\n#define LAST_POLICY_STATUS_SUFFIX  \"_status\" /* status & stats about last run */\n#define CURR_POLICY_START_SUFFIX   \"_start_current\"  /* start of current run */\n#define CURR_POLICY_TRIGGER_SUFFIX \"_trigger_current\" /* trigger of current run\n                                                       */\n\n#define FS_PATH_VAR         \"FS_Path\"\n#define ROOT_ID_VAR         \"RootId\"\n#define USAGE_MAX_VAR       \"MaxUsage\"\n#define SCAN_INTERVAL_VAR   \"ScanInterval\"\n#define NEXT_MAINT_VAR      \"NextMaintenance\"\n\n// Scan statistics\n#define LAST_SCAN_START_TIME  \"LastScanStartTime\"\n#define LAST_SCAN_END_TIME    \"LastScanEndTime\"\n#define LAST_SCAN_PROCESSING_END_TIME \"LastScanProcessingEndTime\"\n#define LAST_SCAN_STATUS      \"LastScanStatus\"\n#define LAST_SCAN_LAST_ACTION_TIME \"LastScanLastActionTime\"\n#define LAST_SCAN_ENTRIES_SCANNED  \"LastScanEntriesScanned\"\n#define LAST_SCAN_ERRORS      \"LastScanErrors\"\n#define LAST_SCAN_TIMEOUTS    \"LastScanTimeouts\"\n#define LAST_SCAN_AVGMSPE     \"LastScanAvgMsPerEntry\"\n#define LAST_SCAN_CURMSPE     \"LastScanCurMsPerEntry\"\n#define LAST_SCAN_NB_THREADS  \"LastScanNbThreads\"\n\n#define PREV_SCAN_START_TIME  \"PrevScanStartTime\"\n#define PREV_SCAN_END_TIME    \"PrevScanEndTime\"\n\n#define SCAN_STATUS_DONE       \"done\"\n#define SCAN_STATUS_RUNNING    \"running\"\n#define SCAN_STATUS_ABORTED    \"aborted\"\n#define SCAN_STATUS_INCOMPLETE \"incomplete\"\n#define SCAN_STATUS_PARTIAL    \"partial\"\n\n/* Old changelog statitics */\n#define CL_LAST_READ_REC_ID_OLD   \"ChangelogLastId\"\n#define CL_LAST_READ_REC_TIME_OLD \"ChangelogLastRecTime\"\n#define CL_LAST_READ_TIME_OLD     \"ChangelogLastTime\"\n#define CL_DIFF_INTERVAL_OLD      \"ChangelogDiffInt\"\n/* Old CL counters: <prefix>_<event_name> */\n#define CL_COUNT_PREFIX_OLD       \"ChangelogCount\"\n#define CL_DIFF_PREFIX_OLD        \"ChangelogDiff\"\n/* format for this one was <prefix>_<mdt_name> */\n#define CL_LAST_COMMITTED_OLD     \"ChangelogLastCommit\"\n\n/* New changelog statitics.\n * Variable name is <name>_<mdt_name>\n * Format of value is rec_id:rec_time(epoch.us):step_time(epoch.us)\n */\n#define CL_LAST_READ_REC        \"CL_LastRead\"\n#define CL_LAST_PUSHED_REC      \"CL_LastPushed\"\n#define CL_LAST_COMMITTED_REC   \"CL_LastCommit\"\n#define CL_LAST_CLEARED_REC     \"CL_LastCleared\"\n#define CL_DIFF_INTERVAL        \"CL_DiffInt\"\n\n/* new CL counters:  <prefix>_<mdt_name>_<event_name> */\n#define CL_COUNT_PREFIX         \"CL_Count\"\n#define CL_DIFF_PREFIX          \"CL_Diff\"\n\n#define MAX_VAR_LEN     1024\n/**\n *  Gets variable value.\n */\nint ListMgr_GetVar(lmgr_t *p_mgr, const char *varname, char *value,\n                   int bufsize);\n\n/**\n *  Sets variable value.\n *  @param value size must not exceed 1024 (size of DB field).\n */\nint ListMgr_SetVar(lmgr_t *p_mgr, const char *varname, const char *value);\n\n/** @} */\n\n/**\n *  Functions for handling filters\n *\n * \\addtogroup FILTER_FUNCTIONS\n * @{\n */\n\n/** Initialize a simple filter structure */\nint lmgr_simple_filter_init(lmgr_filter_t *p_filter);\n\nenum filter_flags {\n    FILTER_FLAG_NOT     = (1 << 0), /**< negation of the current test */\n    FILTER_FLAG_OR      = (1 << 1), /**< use OR instead of AND\n                                         (which is the default) */\n    FILTER_FLAG_BEGIN   = (1 << 2), /**<  start a section with parenthesis */\n    FILTER_FLAG_END     = (1 << 3), /**< ends a section with parenthesis */\n    FILTER_FLAG_NOT_BEGIN = (1 << 4), /**< negate the whole expression until\n                                           NOT_END.\n                     * NOT_BEGIN is for expressions like: NOT ( <x> ...\n                     * and is to be terminated by NOT_END.\n                     * whereas BEGIN + NOT will result in (NOT (<x>) ...\n                     */\n    FILTER_FLAG_NOT_END    = (1 << 5), /**< terminates a NOT_BEGIN */\n    FILTER_FLAG_ALLOW_NULL = (1 << 6), /** null value is allowed to match\n                                           the condition */\n\n    FILTER_FLAG_ALLOC_STR  = (1 << 7), /** for internal usage: string in filter\n                                           is allocated */\n    FILTER_FLAG_ALLOC_LIST = (1 << 8), /** for internal usage: list in filter\n                                           is allocated */\n    FILTER_FLAG_BEGIN_BLOCK = (1 << 9), /**< start a section with parenthesis */\n    FILTER_FLAG_END_BLOCK   = (1 << 10), /**< ends a section with parenthesis */\n};\n\n/** Add a criteria to a simple filter */\nint lmgr_simple_filter_add(lmgr_filter_t *p_filter,\n                           unsigned int attr_index,\n                           filter_comparator_t comparator,\n                           filter_value_t value, enum filter_flags flag);\n\n/* check if the given attribute is part of a filter */\nint lmgr_filter_check_field(const lmgr_filter_t *p_filter,\n                            unsigned int attr_index);\n\n/**\n * Add a criteria to a simple filter or modify it if it already exists in the\n * filter\n */\nint lmgr_simple_filter_add_or_replace(lmgr_filter_t *p_filter,\n                                      unsigned int attr_index,\n                                      filter_comparator_t comparator,\n                                      filter_value_t value,\n                                      enum filter_flags flag);\n\n/**\n * Add a criteria to a simple filter if it does not already exist in the filter\n */\nint lmgr_simple_filter_add_if_not_exist(lmgr_filter_t *p_filter,\n                                        unsigned int attr_index,\n                                        filter_comparator_t comparator,\n                                        filter_value_t value,\n                                        enum filter_flags flag);\n\n/** release a filter structure */\nint lmgr_simple_filter_free(lmgr_filter_t *p_filter);\n\nstruct sm_instance;\nstruct time_modifier;\n\n/**\n * Convert simple expressions to ListMgr filter (append filter).\n * Imbrications of AND and OR filters produced by\n * convert_boolexpr_to_simple_filter() are only supported by listmgr_iterators.\n * Callers that use convert_boolexpr_to_simple_filter() must take care not using\n * \"OR\" expression if they are using other listmgr calls.\n * @param[in]     boolexpr  the boolean expression to be converted.\n * @param[in,out] filter    the output filter to be appended.\n * @param[in]     smi       the current status manager (if any).\n * @param[in]     time_mod  time modifier for maintenance mode.\n * @param[in]     flags     filter flags\n * @param[in]     op_ctx    default boolean operation\n */\nint convert_boolexpr_to_simple_filter(struct bool_node_t *boolexpr,\n                                      lmgr_filter_t *filter,\n                                      const struct sm_instance *smi,\n                                      const struct time_modifier *time_mod,\n                                      enum filter_flags flags,\n                                      bool_op_t op_ctx);\n\n/** Set a complex filter structure */\nint lmgr_set_filter_expression(lmgr_filter_t *p_filter,\n                               struct bool_node_t *boolexpr);\n\n/**\n * Check that all fields in filter are in the given mask of supported attributes\n * @param index if not NULL, it is set to the index of the unsupported filter.\n *              and -1 for other errors.\n */\nint lmgr_check_filter_fields(lmgr_filter_t *p_filter, attr_mask_t attr_mask,\n                             int *index);\n\n/** Convert a set notation (eg. \"3,5-8,12\") to a list of values\n * \\param type[in] the type of output array (DB_INT, DB_UINT, ...)\n * \\param p_list[out] list of values (the function allocates a buffer for\n *                    p_list->values)\n */\nint lmgr_range2list(const char *set, db_type_e type, value_list_t *p_list);\n\n/** @} */\n\n/**\n * If p_target_attrset attributes are unset,\n * retrieve them from p_source_attrset.\n * \\param update if the attribute is set in both src and tgt,\n *        this boolean indicates if it must be updated in the target.\n */\nvoid ListMgr_MergeAttrSets(attr_set_t *p_target_attrset,\n                           const attr_set_t *p_source_attrset, bool update);\n\n/** return the mask of attributes that differ */\nattr_mask_t ListMgr_WhatDiff(const attr_set_t *p_tgt,\n                             const attr_set_t *p_src);\n\n/** print attribute value to display to the user\n * @param[in,out] str  Allocated GString to be appended.\n * @param quote string to quote string types (eg. \"'\").\n * @return 0 on success, a negative value on error.\n */\nint ListMgr_PrintAttr(GString *str, db_type_e type,\n                      const db_type_u *value_ptr, const char *quote);\n\n/**\n * Same as ListMgr_PrintAttr, except that the value is passed by pointer\n * instead of db_type_u.\n * @param[in,out] str  Allocated GString to be appended.\n * @return 0 on success, a negative value on error.\n */\nint ListMgr_PrintAttrPtr(GString *str, db_type_e type, void *value_ptr,\n                         const char *quote);\n\n/**\n * Generate fields automatically from already existing fields,\n * and check the target mask is satisfied.\n */\nint ListMgr_GenerateFields(attr_set_t *p_set, attr_mask_t target_mask);\n\n/** Check mask compatibility for request batching. */\nbool lmgr_batch_compat(attr_mask_t m1, attr_mask_t m2);\n\n/** Add begin or end block. */\nint lmgr_simple_filter_add_block(lmgr_filter_t *, enum filter_flags);\n\n/**\n * Check if conditions can be translated to SQL statement for DB query\n */\nbool cond2sql_ok(bool_node_t *boolexpr,\n                      const struct sm_instance *smi,\n                      const struct time_modifier *time_mod);\n\n/* return a sort_order_t or a negative value on error */\nint str2sort_order(const char *str);\n\n#endif\n\n/** @} */\n"
  },
  {
    "path": "src/include/lustre/lustre_errno.h",
    "content": ""
  },
  {
    "path": "src/include/lustre_extended_types.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file   lustre_extended_types.h\n * \\brief  Specific types for handling lustre data.\n */\n#ifndef _LUSTRE_EXTRA_TYPES_H\n#define _LUSTRE_EXTRA_TYPES_H\n\n#ifdef _LUSTRE\n#ifndef LPX64\n#define LPX64 \"%#llx\"\n#endif\n\n#ifndef LPX64i\n#define LPX64i \"%llx\"\n#endif\n\n#ifndef LPU64\n#define LPU64 \"%llu\"\n#endif\n\n#include <sys/types.h>\n#include <asm/types.h>\n#endif\n\n#include <assert.h>\n#define LASSERT assert\n\n#ifndef _LUSTRE_API_HEADER\n#include <lustre/liblustreapi.h>\n#else\n#include <lustre/lustreapi.h>\n#endif\n\n#ifndef LOV_MAX_STRIPE_COUNT\n/* in old lustre versions, this is not defined in lustre_user.h */\n#define LOV_MAX_STRIPE_COUNT 160\n#endif\n\n#ifndef DFID_NOBRACE\n#define DFID_NOBRACE LPX64\":0x%x:0x%x\"\n#endif\n\n#ifndef XATTR_NAME_LOV\n#define XATTR_NAME_LOV \"trusted.lov\"\n#endif\n\n#ifdef FID_LEN\n#define RBH_FID_LEN (2*FID_LEN)\n#else\n#define RBH_FID_LEN 64\n#endif\n\n/* missing prototypes in lustre1.8 */\n#if defined(HAVE_LLAPI_GETPOOL_INFO) && !defined(_HAVE_FID)\nextern int llapi_get_poollist(const char *name, char **poollist, int list_size,\n                              char *buffer, int buffer_size);\nextern int llapi_get_poolmembers(const char *poolname, char **members,\n                                 int list_size, char *buffer, int buffer_size);\n#endif\n\n#ifndef HAVE_OBD_STATFS\nstruct obd_statfs {\n    __u64 os_type;\n    __u64 os_blocks;\n    __u64 os_bfree;\n    __u64 os_bavail;\n    __u64 os_files;\n    __u64 os_ffree;\n    __u8 os_fsid[40];\n    __u32 os_bsize;\n    __u32 os_namelen;\n    __u64 os_maxbytes;\n    __u32 os_state; /* positive error code on server */\n    __u32 os_spare1;\n    __u32 os_spare2;\n    __u32 os_spare3;\n    __u32 os_spare4;\n    __u32 os_spare5;\n    __u32 os_spare6;\n    __u32 os_spare7;\n    __u32 os_spare8;\n    __u32 os_spare9;\n};\n#endif\n\n#ifdef HAVE_CHANGELOGS\n\n#include <stdbool.h>\n\n/*\n * Untangle the various changes of Lustre userspace changelog\n * API. Originally, there was the \"struct changelog_rec\". Then the\n * \"struct changelog_ext_rec\" was added in Lustre 2.5, and all records\n * given to the applications were converted to that format by\n * liblustreapi. Then in Lustre 2.7, the commit 0f22e4 removed \"struct\n * changelog_ext_rec\" and introduced the flexible format.\n *\n * Define HAVE_FLEX_CL for 2.7 Lustre, use HAVE_CHANGELOG_EXTEND_REC\n * for Lustre 2.4->2.6, and nothing for the older versions.\n *\n * Add accessors to make sense of all that:\n *\n * rh_rename_one_record: if the changelog is a CL_RENAME,\n * rh_rename_one_record() will return false if it is followed by a\n * CL_EXT record. Since the LU-1331 fix, rename operations use only\n * one changelog record.\n *\n * rh_get_cl_cr_name(): return a pointer to cr_name\n */\n\n#if HAVE_DECL_CLF_RENAME\n/* Lustre 2.7 */\n#define CL_REC_TYPE struct changelog_rec\n#define HAVE_FLEX_CL    /* Flexible changelogs */\n\nstatic inline bool rh_is_rename_one_record(const struct changelog_rec *rec)\n{\n    return rec->cr_flags & CLF_RENAME;\n}\n\nstatic inline char *rh_get_cl_cr_name(const struct changelog_rec *rec)\n{\n    return changelog_rec_name((struct changelog_rec *)rec);\n}\n\n/* This doesn't make sense anymore but it is still defined by Lustre\n * 2.7. */\n#undef HAVE_CHANGELOG_EXTEND_REC\n\n#elif HAVE_CHANGELOG_EXTEND_REC\n/* Lustre 2.3 to 2.6. */\n#define CL_REC_TYPE struct changelog_ext_rec\n\nstatic inline bool rh_is_rename_one_record(const struct changelog_ext_rec *rec)\n{\n    return rec->cr_flags & CLF_EXT_VERSION;\n}\n\nstatic inline char *rh_get_cl_cr_name(const struct changelog_ext_rec *rec)\n{\n    /* Don't use changelog_rec_name() because the cr_name has been\n     * moved by changelog_extend_rec(). So cr_name is always at the\n     * same spot, rename or not. */\n    return (char *)rec->cr_name;\n}\n\n#else\n/* Lustre 2.1 to 2.2 */\n#define CL_REC_TYPE struct changelog_rec\n\nstatic inline bool rh_is_rename_one_record(const struct changelog_rec *rec)\n{\n    return false;\n}\n\nstatic inline char *rh_get_cl_cr_name(const struct changelog_rec *rec)\n{\n    return (char *)rec->cr_name;\n}\n\n#endif\n\n#endif /* HAVE_CHANGELOGS */\n\n#ifndef LOV_PATTERN_F_RELEASED\n#define LOV_PATTERN_F_RELEASED  0x80000000  /* HSM released file */\n#endif\n\n#ifdef _HAVE_FID\n\n/* The following stuff is to decode link EA from userspace */\n\n#include <byteswap.h>\n#include <assert.h>\n\n/* undefined types in lustre_idl */\n#define be32_to_cpu(x) bswap_32(x)\n#define be64_to_cpu(x) (__u64)bswap_64(x)\n#define CLASSERT assert\n#define LASSERTF(a, b, c) assert(a)\ntypedef void *lnet_nid_t;\ntypedef time_t cfs_time_t;\n\n#ifdef _LUSTRE_IDL_HEADER\n/* lustre_idl.h references many undefined symbols\n * in functions or structures we don't need.\n * So ignore the warnings. */\n#pragma GCC push_options\n#pragma GCC diagnostic ignored \"-Wimplicit-function-declaration\"\n#include <lustre/lustre_idl.h>\n#pragma GCC pop_options\n#else\n\n/* Workaround for lustre 2.6.0 (waiting for LU-3613):\n * if XATTR_NAME_LINK is not defined in Lustre headers,\n * we have to define it by ourselves until patch for LU-3613 lands.\n */\n#ifndef XATTR_NAME_LINK\n#define XATTR_NAME_LINK \"trusted.link\"\n#endif\n\nstruct link_ea_header {\n    __u32 leh_magic;\n    __u32 leh_reccount;\n    __u64 leh_len;  /* total size */\n    /* future use */\n    __u32 padding1;\n    __u32 padding2;\n};\n\n/** Hardlink data is name and parent fid.\n * Stored in this crazy struct for maximum packing and endian-neutrality\n */\nstruct link_ea_entry {\n        /** __u16 stored big-endian, unaligned */\n    unsigned char lee_reclen[2];\n    unsigned char lee_parent_fid[sizeof(struct lu_fid)];\n    char lee_name[0];\n} __attribute__ ((packed));\n\nstatic inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)\n{\n    dst->f_seq = be64_to_cpu(src->f_seq);\n    dst->f_oid = be32_to_cpu(src->f_oid);\n    dst->f_ver = be32_to_cpu(src->f_ver);\n}\n\nenum fid_seq {\n    FID_SEQ_OST_MDT0 = 0,\n    FID_SEQ_LLOG = 1,   /* unnamed llogs */\n    FID_SEQ_ECHO = 2,\n    FID_SEQ_OST_MDT1 = 3,\n    FID_SEQ_OST_MAX = 9,    /* Max MDT count before OST_on_FID */\n    FID_SEQ_LLOG_NAME = 10, /* named llogs */\n    FID_SEQ_RSVD = 11,\n    FID_SEQ_IGIF = 12,\n    FID_SEQ_IGIF_MAX = 0x0ffffffffULL,\n    FID_SEQ_IDIF = 0x100000000ULL,\n    FID_SEQ_IDIF_MAX = 0x1ffffffffULL,\n    FID_SEQ_START = 0x200000000ULL,\n    FID_SEQ_LOCAL_FILE = 0x200000001ULL,\n    FID_SEQ_DOT_LUSTRE = 0x200000002ULL,\n    FID_SEQ_LOCAL_NAME = 0x200000003ULL,\n    FID_SEQ_SPECIAL = 0x200000004ULL,\n    FID_SEQ_QUOTA = 0x200000005ULL,\n    FID_SEQ_QUOTA_GLB = 0x200000006ULL,\n    FID_SEQ_ROOT = 0x200000007ULL,  /* Located on MDT0 */\n    FID_SEQ_NORMAL = 0x200000400ULL,\n    FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL\n};\nstatic inline int fid_seq_is_rsvd(const __u64 seq)\n{\n    return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);\n};\n\nstatic inline int fid_seq_is_idif(const __u64 seq)\n{\n    return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;\n}\n\nstatic inline int fid_is_idif(const struct lu_fid *fid)\n{\n    return fid_seq_is_idif(fid->f_seq);\n}\n\nstatic inline int fid_seq_is_igif(const __u64 seq)\n{\n    return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;\n}\n\nstatic inline int fid_is_igif(const struct lu_fid *fid)\n{\n    return fid_seq_is_igif(fid->f_seq);\n}\n\nstatic inline int fid_is_sane(const struct lu_fid *fid)\n{\n    return fid != NULL &&\n        ((fid->f_seq >= FID_SEQ_START && fid->f_ver == 0) ||\n         fid_is_igif(fid) || fid_is_idif(fid) || fid_seq_is_rsvd(fid->f_seq));\n}\n#endif\n\nstruct lu_buf {\n    void *lb_buf;\n    ssize_t lb_len;\n};\n\nstruct linkea_data {\n        /**\n         ** Buffer to keep link EA body.\n         **/\n    struct lu_buf *ld_buf;\n        /**\n         ** The matched header, entry and its length in the EA\n         **/\n    struct link_ea_header *ld_leh;\n    struct link_ea_entry *ld_lee;\n    int ld_reclen;\n};\n\n#define LINKEA_NEXT_ENTRY(ldata)        \\\n        (struct link_ea_entry *)((char *)ldata.ld_lee + ldata.ld_reclen)\n\n#define LINKEA_FIRST_ENTRY(ldata)       \\\n        (struct link_ea_entry *)(ldata.ld_leh + 1)\n\n#endif /* _HAVE_FID */\n\n#endif\n"
  },
  {
    "path": "src/include/pipeline_types.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  lustre_hsm_pipeline.h\n * \\brief This file describes EntryProcessor pipeline.\n */\n\n#ifndef _PIPELINE_DEF_H\n#define _PIPELINE_DEF_H\n\n#ifdef HAVE_CHANGELOGS\ntypedef struct changelog_record {\n    CL_REC_TYPE  *p_log_rec;\n    char         *mdt;\n} changelog_record_t;\n#endif\n\n/** purpose specific information attached to a pipeline operation */\ntypedef struct op_extra_info_t {\n#ifdef HAVE_CHANGELOGS\n    /** changelog record info */\n    changelog_record_t  log_record;\n\n    /** is this entry from changelog ?*/\n    unsigned int        is_changelog_record:1;\n#endif\n} op_extra_info_t;\n\nstatic void inline extra_info_init(op_extra_info_t *p_extra_info)\n{\n}\n\n/** pipeline definitions */\nextern pipeline_stage_t std_pipeline[];\nextern const pipeline_descr_t std_pipeline_descr;\n\nextern pipeline_stage_t diff_pipeline[];\nextern const pipeline_descr_t diff_pipeline_descr;\n\ntypedef enum {\n    STD_PIPELINE,\n    DIFF_PIPELINE,\n} pipeline_flavor_e;\n\n/* specific argument for diff pipeline (accessible as entry_proc_arg) */\ntypedef struct _diff_arg {\n    enum { NO_APPLY = 0, APPLY_FS, APPLY_DB } apply;\n    attr_mask_t     diff_mask;\n    const char     *db_tag;\n    FILE           *lovea_file;\n    FILE           *fid_remap_file;\n    unsigned int    recov_from_backend:1;\n} diff_arg_t;\n\n#endif\n"
  },
  {
    "path": "src/include/policy_rules.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  policy_rules.h\n * \\brief policy rules definition\n */\n\n#ifndef _POLICIES_H\n#define _POLICIES_H\n\n#include \"rbh_boolexpr.h\"\n#include \"rbh_params.h\"\n#include \"list_mgr.h\"\n#include <sys/time.h>\n\n/** whitelist item is just a boolean expression */\ntypedef struct whitelist_item_t {\n    bool_node_t     bool_expr;\n    attr_mask_t     attr_mask; /**< summary of attributes involved in boolean\n                                    expression */\n} whitelist_item_t;\n\n#define POLICY_NAME_LEN  128\n#define RULE_ID_LEN      128\n#define FILESET_ID_LEN   128\n#define HINTS_LEN       4096\n\ntypedef struct rbh_params action_params_t;\n\n/* fileset definition */\ntypedef struct fileset_item_t {\n    char fileset_id[FILESET_ID_LEN];\n\n    /** condition for files to be in this fileset */\n    bool_node_t definition;\n    /** summary of attributes involved in boolean expression */\n    attr_mask_t attr_mask;\n\n    /* user tunable */\n    unsigned int matchable:1;   /* is the fileset matchable or is it a temporary\n                                 * fileset to build another one? */\n    /* flags for internal management */\n    unsigned int used_in_policy:1;  /* is the fileset referenced in a policy? */\n\n    /* action parameters for policies (merged with parameters from \"policy\" and\n     * \"rule\", and overrides them).\n     * Each hash table key is a policy name (lower case),\n     * and the associated value is an action_params_t structure. */\n    GHashTable *policy_action_params;\n\n    /** @TODO aggregation policy */\n\n} fileset_item_t;\n\n/**\n * Return the action parameters associated to a fileset for the given policy.\n */\naction_params_t *get_fileset_policy_params(const fileset_item_t *fileset,\n                                           const char *policy_name);\n\n/* what to do with the entry after the policy action.\n * returned by action_function */\ntypedef enum {\n    PA_NONE = 0,\n    PA_RM_ONE,\n    PA_RM_ALL,\n    PA_UPDATE\n} post_action_e;\n\ntypedef int (*db_cb_func_t) (void *cb_arg, operation_type_e op,\n                             const entry_id_t *id, const attr_set_t *attrs);\n\ntypedef int (*action_func_t) (const entry_id_t *id, attr_set_t *attrs,\n                              const action_params_t *params,\n                              post_action_e *what_after, db_cb_func_t db_cb_fn,\n                              void *db_cb_arg);\n\ntypedef enum {\n    ACTION_UNSET, /**< not set */\n    ACTION_NONE,  /**< explicit noop */\n    ACTION_FUNCTION,\n    ACTION_COMMAND\n} action_type_e;\n\nstruct action_func_info {\n    action_func_t call;\n    char *name;\n};\n\ntypedef struct policy_action {\n    action_type_e type;\n    union {\n        char **command;\n        struct action_func_info func;\n    } action_u; /* command for ACTION_COMMAND,\n                 * function for ACTION_FUNCTION, ... */\n} policy_action_t;\n\n/** policy rule */\ntypedef struct rule_item_t {\n    char rule_id[RULE_ID_LEN];\n\n    fileset_item_t **target_list;\n    unsigned int target_count;\n\n    /** condition for purging/migrating files */\n    bool_node_t condition;\n\n    /** if specified, overrides policy defaults */\n    policy_action_t action;\n    /** merged with default action_params from the policy and overrides them.\n     *  merged with fileclass action_params (overridden by them). */\n    action_params_t action_params;\n\n    /** attributes involved in condition, action and action_params */\n    attr_mask_t attr_mask;\n\n} rule_item_t;\n\n/** list of rules for a policy */\ntypedef struct policy_rules_t {\n    whitelist_item_t   *whitelist_rules;\n    unsigned int        whitelist_count;\n\n    fileset_item_t    **ignore_list;\n    unsigned int        ignore_count;\n\n    rule_item_t        *rules; /* one of them can be the default policy */\n    unsigned int        rule_count;\n\n    /* minimum set of attributes to check rules and build action_params */\n    attr_mask_t         run_attr_mask;\n\n} policy_rules_t;\n\n#define NO_POLICY(p_list) (((p_list)->whitelist_count + (p_list)->ignore_count \\\n                           + (p_list)->rule_count) == 0)\n\nstatic bool inline has_default_policy(policy_rules_t *list)\n{\n    int i;\n\n    for (i = 0; i < list->rule_count; i++) {\n        if (!strcasecmp(list->rules[i].rule_id, \"default\"))\n            return true;\n    }\n    return false;\n}\n\n/* ======================================================================\n * Function for managing all policy configuration (migration, purge, unlink)\n * ======================================================================*/\n\n/** config handlers */\nextern mod_cfg_funcs_t policies_cfg_hdlr;\n\n/** policy descriptor */\ntypedef struct policy_descr_t {\n    /** @TODO store policy info a persistent way for later check */\n    char                name[POLICY_NAME_LEN];\n    bool_node_t         scope;\n    attr_mask_t         scope_mask;\n\n    /* In the case of 'multi-action' status managers,indicate the implemented\n     * action. */\n    char               *implements;\n\n    /* status of entries for which an action is running...\n     * (used to check status of outstanding entries)\n     */\n    const char         *status_current;\n    struct sm_instance *status_mgr;\n    policy_action_t     default_action;\n\n    /* attr index of the sort order (e.g. last_mod, creation_time, ...) */\n    /* default value for policy_run_config_t.lru_sort_attr */\n    unsigned int        default_lru_sort_attr;\n    /* default value for policy_run_config_t.lru_sort_order */\n    sort_order_t        default_lru_sort_order;\n\n    policy_rules_t      rules;\n\n    /* does this policy manage deleted entries? */\n    bool                manage_deleted;\n} policy_descr_t;\n\ntypedef struct policies_t {\n    policy_descr_t     *policy_list;\n    unsigned int        policy_count;\n\n    /* status mask for all policies that provide a get_status() function */\n    attr_mask_t         global_status_mask;\n\n    fileset_item_t     *fileset_list;\n    unsigned int        fileset_count;\n    attr_mask_t         global_fileset_mask;    /**< mask for all filesets */\n\n    /* is there any policy that manages deleted entries? */\n    unsigned int        manage_deleted:1;\n\n} policies_t;\nextern struct policies_t policies;\n\n/**\n * Test if a policy exists and gives its index in policies.policy_list.\n * \\param[out] index index in the policies.policy_list array.\n */\nbool policy_exists(const char *name, int *index);\n\n/** Indicate if any policy manages deleted entries */\n\nstatic inline bool has_deletion_policy(void)\n{\n    return !!policies.manage_deleted;\n}\n\n/** determine the fileclasses an entry matches for reports (report != no)*/\nint match_classes(const entry_id_t *id, attr_set_t *p_attrs_new,\n                  const attr_set_t *p_attrs_cached);\n\n/* return values for matching */\ntypedef enum {\n    POLICY_MATCH = 0,\n    POLICY_NO_MATCH,\n    POLICY_MISSING_ATTR,\n    POLICY_ERR\n} policy_match_t;\n\n/** time modifier */\ntypedef struct time_modifier {\n    double time_factor;\n    time_t time_min;\n} time_modifier_t;\n\n/** retrieve fileset structure from its name */\nfileset_item_t *get_fileset_by_name(const policies_t *policies,\n                                    const char *name);\n\n/** get the first matching policy case for the given file\n *  \\param pp_fileset(out) set to the matching fileset\n *         or NULL for the default policy case\n */\nrule_item_t *policy_case(const policy_descr_t *policy,\n                         const entry_id_t *p_entry_id,\n                         const attr_set_t *p_entry_attr,\n                         fileset_item_t **pp_fileset);\n\n/** get the policy case for the given fileclass.\n *  \\param pp_fileset is set to the matching fileset\n *         or NULL for the default policy case\n */\nrule_item_t *class_policy_case(const policy_descr_t *policy,\n                               const char *class_id,\n                               fileset_item_t **pp_fileset);\n\n/** test if an entry is in policy scope */\npolicy_match_t match_scope(const policy_descr_t *pol, const entry_id_t *id,\n                           const attr_set_t *attrs, bool warn);\n\n/** Add status attributes mask according to all matching policy scopes.\n * @param tolerant If false, display a warning and don't set a status in the\n *                 mask attributes are missing to check the scope.\n *                 If true, set a status in the mask if the entry can't be\n *                 matched against a scope (no warning is issued).\n */\nvoid add_matching_scopes_mask(const entry_id_t *id, const attr_set_t *attr,\n                              bool tolerant, uint32_t *mask);\n\n/** @TODO RBHv3 check if all these functions are used */\n\n/**\n * Check if an entry has a chance to be matched in any policy condition.\n * (does not report warnings if attrs are missing).\n * \\param pp_fileset(out) the matched fileclass.\n */\npolicy_match_t policy_match_all(const policy_descr_t *policy,\n                                const entry_id_t *p_entry_id,\n                                const attr_set_t *p_entry_attr,\n                                const time_modifier_t *time_mod,\n                                fileset_item_t **pp_fileset);\n\n/* @TODO: for commands only:\n * Some specific strings in parameter values are interpreted and replaced,\n * according to the given entry id and its attributes:\n * {fid}, {path}, {name}, {rule}, {fileclass}, {ost_pool}.\n * For action functions: get these values directly from id and attrs arguments.\n */\n\n/* Check if an entry is whitelisted for the given policy.\n * \\param pp_fileset(out) the matched fileclass.\n */\npolicy_match_t is_whitelisted(const policy_descr_t *policy,\n                              const entry_id_t *p_entry_id,\n                              const attr_set_t *p_entry_attr,\n                              fileset_item_t **fileset);\n\n/** determine if a class is whitelisted for the given policy */\nbool class_is_whitelisted(const policy_descr_t *policy, const char *class_id);\n\n/* check if entry matches a boolean expression */\npolicy_match_t entry_matches(const entry_id_t *p_entry_id,\n                             const attr_set_t *p_entry_attr,\n                             bool_node_t *p_node,\n                             const time_modifier_t *p_pol_mod,\n                             const struct sm_instance *smi);\n\n/* read an action params block from config */\nint read_action_params(config_item_t param_block, action_params_t *params,\n                       attr_mask_t *mask, char *msg_out);\n\n/* parse policy action value from config */\nint parse_policy_action(const char *name, const char *value,\n                        char **extra, unsigned int extra_cnt,\n                        policy_action_t *action,\n                        attr_mask_t *mask, char *msg_out);\n\n/**\n * Convert criteria to ListMgr data\n * \\param p_comp        IN: the condition to be converted\n * \\param p_attr_index  OUT: related attribute index\n * \\param p_compar      OUT: listmgr comparator\n * \\param db_type_u     OUT: value\n * \\param p_must_release OUT: set to true if the db_type_u.val_str string must\n *                            be released.\n * \\return -1 if this is not a criteria stored in DB.\n */\nstruct sm_instance;\nint criteria2filter(const compare_triplet_t *p_comp,\n                    unsigned int *p_attr_index, filter_comparator_t *p_compar,\n                    filter_value_t *p_value, bool *p_must_release,\n                    const struct sm_instance *smi,\n                    const time_modifier_t *time_mod);\n\n#endif\n"
  },
  {
    "path": "src/include/policy_run.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009-2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file policy_run.h\n * \\brief This module tr/iggers migrations to HSM or external storage.\n */\n#ifndef _GENERIC_POLICIES_H\n#define _GENERIC_POLICIES_H\n\n#include \"rbh_const.h\"\n#include \"config_parsing.h\"\n#include \"policy_rules.h\"\n#include \"queue.h\"\n#include <sys/types.h>\n#include \"rbh_logs.h\"\n\n/**\n *  Trigger type\n */\ntypedef enum {\n    TRIG_ALWAYS = 0, /**< always trigger policy at scheduled interval */\n    TRIG_CONDITION, /**<  check a condition at scheduled interval */\n    /* TODO 2.6 add conditions: rbh idle, host under a given load, external command or module ... */\n} trigger_type_t;\n\n/** target for a policy run */\ntypedef enum {\n    TGT_NONE = 0, /* if specified as a module parameter: check all triggers. */\n    TGT_FS,     /* apply policies to the filesystem */\n#ifdef _LUSTRE\n    TGT_OST,    /* apply policies to the specified OST */\n    TGT_POOL,   /* apply policies to the specified pool of OSTs */\n    TGT_PROJID, /* apply policies to the specified project id */\n#endif\n    TGT_USER,   /* apply policies to the specified user */\n    TGT_GROUP,  /* apply policies to the specified group */\n    TGT_FILE,   /* check/apply policies to the specified file */\n    TGT_CLASS,  /* apply policies to the specified fileclass */\n} policy_target_t;\n\n/* TODO actions can be:\n *   1) policy wide (always the same for a given policy)\n *   2) trigger wide: specified for each trigger (?)\n *   3) policy_case wide: different for each policy case\n */\n\ntypedef enum {\n    PCT_THRESHOLD,    /**< threshold is specified as a percentage */\n    VOL_THRESHOLD,    /**< threshold is specified as a (used) size (in Bytes) */\n    COUNT_THRESHOLD,  /**< threshold is specified as a number of entries */\n    CNTPCT_THRESHOLD  /**< threshold is specified as a percentage of\n                           the number of available inodes */\n} trigger_value_type_t;\n\ntypedef union {\n    ull_t   volume;\n    ull_t   count;\n    double  percent;\n} threshold_u;\n\ntypedef struct trigger_item_t {\n    trigger_type_t      trigger_type;\n    policy_target_t     target_type;\n\n    char              **list; /**< list of groups or user for user/group\n                                   triggers. NULL=> apply to all. */\n    unsigned int        list_size;\n    time_t              check_interval;\n\n    unsigned int        max_action_nbr;\n    ull_t               max_action_vol;\n\n    trigger_value_type_t hw_type;\n    /* volume, percent, or count */\n    threshold_u         hw_u;\n\n    trigger_value_type_t lw_type;\n    /* volume, percent, or count */\n    threshold_u         lw_u;\n\n    /* min time to wait between 2 trigger applications */\n    time_t              post_trigger_wait;\n\n    /* trigger options: */\n    /* raise alert when it is triggered */\n    bool                alert_hw;\n\n    /* raise alert when it cannot reach low threshold */\n    bool                alert_lw;\n\n    /* action params (overrides policy action params) */\n    action_params_t     action_params;\n    attr_mask_t         params_mask;\n\n} trigger_item_t;\n\nstatic inline char *trigger2str(const trigger_item_t *trig)\n{\n    if (trig->trigger_type == TRIG_ALWAYS)\n        return \"scheduled\";\n    /* else: condition to be checked at scheduled interval */\n\n    switch (trig->target_type) {\n    case TGT_FS:\n        return \"global_usage\";\n#ifdef _LUSTRE\n    case TGT_OST:\n        return \"ost_usage\";\n    case TGT_POOL:\n        return \"pool_usage\";\n    case TGT_PROJID:\n        return \"projid_usage\";\n#endif\n    case TGT_USER:\n        return \"user_usage\";\n    case TGT_GROUP:\n        return \"group_usage\";\n\n    case TGT_NONE:\n        RBH_BUG(\"TGT_NONE: not an expected trigger\");\n    case TGT_CLASS:    /* only for manual actions */\n    case TGT_FILE: /* only for manual actions */\n        RBH_BUG(\"No trigger expected on files or fileclass: \"\n                \"only for manual actions\");\n    }\n    return NULL;\n}\n\ntypedef enum {\n    MS_INVALID = -1,  /**< Wrong value */\n    MS_NONE    = 0,   /**< No matching */\n    MS_CACHE_ONLY,    /**< Use only cached information for matching */\n    MS_AUTO_ATTRS,    /**< Complete cached information by retrieving missing\n                           attributes and attributes that are required to be\n                           up to date (path excluded) */\n    MS_AUTO_ALL,      /**< Complete cached information by retrieving missing\n                           attributes and attributes that are required to be\n                           up to date (including path) */\n    MS_FORCE_UPDT,    /**< Force using up to date information */\n} match_source_t;\n\n\ntypedef struct policy_run_config_t {\n    unsigned int        nb_threads;\n    unsigned int        queue_size;\n    unsigned int        db_request_limit;\n\n    unsigned int        max_action_nbr; /**< can also be specified in each\n                                             trigger */\n    ull_t               max_action_vol; /**< can also be specified in each\n                                             trigger */\n    trigger_item_t     *trigger_list;\n    unsigned int        trigger_count;\n\n    time_t              check_action_status_delay;\n    time_t              action_timeout;\n\n    /** interval for reporting progress of current policy run */\n    time_t              report_interval;\n\n    /* maintenance related option */\n    /** is this policy influenced by maintenance mecanism */\n    bool                maintenance_sensitive;\n    /** time window to start modifying time conditions */\n    time_t              pre_maintenance_window;\n    /** minimal time condition */\n    time_t              maint_min_apply_delay;\n\n    /** min error percentage to suspend current policy (0=disable) */\n    double              suspend_error_pct;\n    /** min error count to suspend current policy (0=disable) */\n    unsigned int        suspend_error_min;\n\n    /** attr index of the sort order (e.g. last_mod, creation_time, ...).\n     * overrides default_lru_sort_attr (from policy descr). */\n    unsigned int        lru_sort_attr;\n    /* overrides default_lru_sort_order */\n    sort_order_t        lru_sort_order;\n\n    /** if specified, overrides default_action from the policy descriptor.\n     * Can then be overriden by rules. */\n    policy_action_t     action;\n\n    /** default action parameters for the policy.\n     *  They can be overriden by action params from rule and fileset. */\n    action_params_t     action_params;\n\n    /** attributes used in action and action_params */\n    attr_mask_t         run_attr_mask;\n\n    bool                check_action_status_on_startup;\n    bool                recheck_ignored_entries;\n\n    /** report policy actions in report file? */\n    bool                report_actions;\n\n    /** Delay (in milliseconds) for resubmitting entries\n     * to a scheduler after it delayed an entry. */\n    unsigned int        reschedule_delay_ms;\n\n    /** number of action schedulers */\n    int                      sched_count;\n    /** list of actions schedulers */\n    const struct action_scheduler **schedulers;\n    /** configuration of action schedulers */\n    void                   **sched_cfg;\n\n    /** source information for checking policy rules before scheduling */\n    match_source_t       pre_sched_match;\n    /** source information for checking policy rules before action\n     * (after scheduling) */\n    match_source_t       post_sched_match;\n\n    /** command to execute before each policy run */\n    char          **pre_run_command;\n    /** command to execute after each policy run */\n    char          **post_run_command;\n\n} policy_run_config_t;\n\ntypedef struct counters_t {\n    ull_t count;\n    ull_t vol;\n    ull_t blocks;\n    ull_t targeted;\n} counters_t;\n\n/** add counters together */\nstatic inline void counters_add(counters_t *dst, const counters_t *src)\n{\n    dst->count += src->count;\n    dst->vol += src->vol;\n    dst->blocks += src->blocks;\n    dst->targeted += src->targeted;\n}\n\n/** test if a counter is zero */\nstatic inline bool counter_is_set(const counters_t *c)\n{\n    return (c->count != 0 || c->vol != 0 || c->blocks != 0 || c->targeted != 0);\n}\n\n/** test if any of the counter fields reached a limit.\n * @param c the counter\n * @param l the limits\n */\nstatic inline bool counter_reached_limit(const counters_t *c,\n                                         const counters_t *l)\n{\n    return (((l->count != 0) && (c->count >= l->count))\n            || ((l->vol != 0) && (c->vol >= l->vol))\n            || ((l->blocks != 0) && (c->blocks >= l->blocks))\n            || ((l->targeted != 0) && (c->targeted >= l->targeted)));\n}\n\n/** test if a specified target has been reached */\nstatic inline bool counter_not_reached(const counters_t *c,\n                                       const counters_t *t)\n{\n    return (((t->count != 0) && (c->count < t->count))\n            || ((t->vol != 0) && (c->vol < t->vol))\n            || ((t->blocks != 0) && (c->blocks < t->blocks))\n            || ((t->targeted != 0) && (c->targeted < t->targeted)));\n}\n\ntypedef struct __action_summary {\n    time_t          policy_start;\n    time_t          last_report;\n    counters_t      action_ctr;\n    unsigned int    skipped;\n    unsigned int    errors;\n} action_summary_t;\n\ntypedef enum {\n    TRIG_NOT_CHECKED,   /* not checked yet */\n    TRIG_BEING_CHECKED, /* currently beeing checked */\n    TRIG_RUNNING,       /* current policy run for this trigger */\n    TRIG_OK,            /* no run is needed */\n    TRIG_NO_LIST,       /* no file list available */\n    TRIG_NOT_ENOUGH,    /* not enough candidates */\n    TRIG_CHECK_ERROR,   /* Misc Error */\n    TRIG_ABORTED,       /* aborted purge */\n    TRIG_UNSUPPORTED    /* Trigger not supported in this mode */\n} trigger_status_t;\n\n/* Info about each trigger */\ntypedef struct trigger_status__ {\n    time_t              last_check;  /* the last time this trigger was tested */\n    trigger_status_t    status;\n\n    /* total of triggered actions since startup */\n    counters_t          total_ctr;\n    /* last triggered actions */\n    counters_t          last_ctr;\n\n    /* its usage, the last time it was checked for OST and global FS triggers */\n    double              last_usage;\n    /* for inode based thresholds there is also percentage */\n    double              last_count;\n} trigger_info_t;\n\n/* policy runtime information */\ntypedef struct policy_info_t {\n    policy_descr_t         *descr;        /**< point to policy descriptor */\n    policy_run_config_t    *config;       /**< policy run configuration */\n    const action_params_t  *trigger_action_params;  /**< action parameters from\n                                                         trigger */\n    entry_queue_t           queue;        /**< processing queue */\n    pthread_t              *threads;      /**< worker threads array (size in config) */\n    pthread_t               trigger_thr;  /**< trigger checker thread */\n    lmgr_t                  lmgr;         /**< db connexion for triggers */\n    trigger_info_t         *trigger_info; /**< stats about policy triggers */\n    dev_t                   fs_dev;       /**< to check if filesystem is\n                                               unmounted */\n    struct sched_res_t     *sched_res;    /**< internal state of schedulers\n                                           *   (see config to known their count) */\n    action_summary_t        progress;\n    time_t                  first_eligible;\n    time_modifier_t        *time_modifier;\n    time_t                  gcd_interval; /**< gcd of check intervals\n                                               (gcd(policy triggers)) */\n    run_flags_t             flags;        /**< from policy_opt */\n    bool                    aborted;      /**< abort status */\n    bool                    stopping;     /**< current run is stopping */\n    volatile bool           waiting;      /**< a thread is already trying to\n                                               join the trigger thread */\n} policy_info_t;\n\n/**\n * Scheduler init function prototype.\n * @param[in]   config          Scheduler configuration created by its config handlers.\n * @param[out]  p_sched_data    Private context allocated by this function.\n */\ntypedef int (*sched_init_func_t)(void *config, void **p_sched_data);\n\n/**\n *  Reset a scheduler.\n *  Empty all its internal queues and reset its state for a new policy run.\n */\ntypedef int (*sched_reset_func_t)(void *sched_data);\n\ntypedef enum {\n    SCHED_OK            = 0,    /**< Entry taken into account by scheduler */\n    SCHED_DELAY         = 1,    /**< Wait before submitting new entries */\n    SCHED_SKIP_ENTRY    = 2,    /**< Skip the entry for the  current run */\n    SCHED_STOP_RUN      = 3,    /**< Stop submitting entries for the current\n                                 *   policy run. Already submitted tasks\n                                 *   keep running. Queued entries in previous\n                                 *   schedulers are canceled.\n                                 */\n    SCHED_KILL_RUN      = 4,    /**< Abort the policy run and cancel in-flight\n                                     tasks in next schedulers. */\n} sched_status_e;\n\n/**\n * Function to be called by an action scheduler to trigger an action,\n * or skip an entry.\n * @param st    SCHED_OK if the action is to be performed.\n *              Another status in other cases (stop run, etc.)\n */\ntypedef void (*sched_cb_t)(void *udata, sched_status_e st);\n\n\n/**\n * Scheduler function prototype.\n * @param[in,out] sched_data   Scheduler private context created by\n *                             sched_init.\n * @param[in]     id           Entry id for the action to be scheduled.\n * @param[in]     attrs        Entry attributes for the action to be scheduled.\n * @param[in]     cb           Function to be called by the scheduler to trigger\n *                             an action, or skip an entry.\n * @param[in]     udata        Argument to be passed to cb function.\n *\n * @return  A positive code (sched_status_e) for policy workflow control.\n *          A negative value (-errno) on error.\n */\ntypedef int (*sched_func_t)(void *sched_data,\n                            const entry_id_t *id,\n                            const attr_set_t *attrs,\n                            sched_cb_t cb,\n                            void *udata);\n\n/** Action scheduler (implemented by plugins) */\ntypedef struct action_scheduler {\n    const char            *sched_name;      /**< Scheduler name */\n    const ctx_cfg_funcs_t *sched_cfg_funcs; /**< Configuration handlers */\n    sched_init_func_t      sched_init_func; /**< Scheduler initialization\n                                              function */\n    sched_reset_func_t     sched_reset_func;/**< Scheduler reset function */\n    attr_mask_t            sched_attr_mask; /**< Needed attributes to make the\n                                              scheduling decision */\n    sched_func_t           sched_schedule;  /**< Function to invoke the\n                                              scheduler */\n} action_scheduler_t;\n\n/** policies runtime config */\ntypedef struct policy_run_config_list_t {\n    policy_run_config_t *configs;\n    unsigned int         count;\n} policy_run_config_list_t;\n\n/** defined in policies/policy_run_cfg.c */\nextern policy_run_config_list_t run_cfgs;\n\n/** config handlers */\nextern mod_cfg_funcs_t policy_run_cfg_hdlr;\n\ntypedef union {\n    int         index;\n    const char *name;\n} target_u;\n\ntypedef struct policy_opt_t {\n    policy_target_t     target;\n    run_flags_t         flags;\n    target_u            optarg_u;\n    double              usage_pct;   /**< target pct for purges */\n\n    /* limits from command line */\n    unsigned int        max_action_nbr;\n    ull_t               max_action_vol;\n} policy_opt_t;\n\n/**\n * Start a policy module instance (workers, triggers...).\n * @param[out] policy   This structure is filled with all policy run information\n *                      and resources.\n * @param[in] policy_descr Describes the policy to be managed.\n * @param[in] p_config     Policy run configuration.\n * @param[in] options      Run options from command line.\n */\nint policy_module_start(policy_info_t *policy, /* out */\n                        policy_descr_t *policy_descr,  /* in */\n                        policy_run_config_t *p_config, /* in */\n                        const policy_opt_t *options);  /* in */\nint policy_module_stop(policy_info_t *policy);\nint policy_module_wait(policy_info_t *policy);\nvoid policy_module_dump_stats(policy_info_t *policy);\n\n/* update trigger intervals,\n * update gcd_interval\n */\nvoid policy_module_update_check_interval(policy_info_t *policy);\n\n#endif\n"
  },
  {
    "path": "src/include/queue.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file queue.h\n * \\brief Module for managing the queue of files to be purged.\n */\n\n#include <semaphore.h>\n#include <time.h>\n#include <pthread.h>\n\n#ifndef _QUEUE_MNGMT_H\n#define _QUEUE_MNGMT_H\n\ntypedef struct entry_queue_t {\n    /* cyclic array of entries */\n    void          **queue;\n\n    /* size and indexes */\n    unsigned int    array_size;\n    unsigned int    queue_size;\n    unsigned int    first_index;\n    unsigned int    last_index;\n\n    /* mutex for accessing the queue */\n    pthread_mutex_t queue_lock;\n\n    /* token for free slots */\n    sem_t           sem_empty;\n    /* token for filled slots */\n    sem_t           sem_full;\n\n    /* ==== stats ==== */\n\n    time_t          last_submitted;\n    time_t          last_unqueued;\n    time_t          last_ack;\n\n    /* idle threads */\n    unsigned int    nb_thr_waiting;\n\n    /* array of status count */\n    unsigned int   *status_array;\n    unsigned int    status_count;\n\n    /* special fields for counting feedback info */\n    unsigned long long *feedback_array;\n    unsigned int    feedback_count;\n\n} entry_queue_t;\n\n/**\n * Queue initialization.\n * @param queue_size: buffer size (over this count, inserts are blocking).\n * @param max_status: the max value for status (will keep track of acknowledgments from 0 to this value)\n * @param feedback_count: the number of feedback values from workers\n */\nint CreateQueue(entry_queue_t *p_queue, unsigned int queue_size,\n                unsigned int max_status, unsigned int feedback_count);\n\n/**\n * Reset status info\n */\nvoid Reset_StatusCount(entry_queue_t *p_queue);\n\n/**\n * Reset feedback info at given index\n */\nvoid Reset_Feedback(entry_queue_t *p_queue, unsigned int feedback_index);\n\n/**\n * Insert an entry to the queue.\n * Can be blocking if the queue is full.\n */\nint Queue_Insert(entry_queue_t *p_queue, void *entry);\n\n/**\n * Get an entry from the queue.\n * The call is blocking until there is an element available\n * in the queue.\n */\nint Queue_Get(entry_queue_t *p_queue, void **p_ptr);\n\n/**\n * Acknwoledge when an entry has been handled.\n * Indicates the status and optionnal feedback info (as unsigned long long\n * array). To be called by the worker thread.\n */\nvoid Queue_Acknowledge(entry_queue_t *p_queue, unsigned int status,\n                       unsigned long long *feedback_array,\n                       unsigned int feedback_count);\n\nvoid RetrieveQueueStats(entry_queue_t *p_queue, unsigned int *p_nb_thr_wait,\n                        unsigned int *p_nb_items, time_t *p_last_submitted,\n                        time_t *p_last_unqueued, time_t *p_last_ack,\n                        unsigned int *status_array,\n                        unsigned long long *feedback_array);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_basename.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _RBH_BASENAME_H\n#define _RBH_BASENAME_H\n\n/**\n * Ensure to use the GNU version of basename\n * that does not modify its input argument.\n */\nconst char *rh_basename(const char *path);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_boolexpr.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  rbh_boolexpr.h\n * \\brief condition types.\n */\n\n#ifndef _BOOLEXPR_H\n#define _BOOLEXPR_H\n\n#include \"rbh_const.h\"\n#include <strings.h>\n#include <stdbool.h>\n#include <time.h>\n\n/* define ATTR_INDEX_ ... */\n#include \"db_schema.h\"\n\ntypedef enum {\n    COMP_NONE = 0,              /**<     not set */\n    COMP_GRTHAN,                /**<     > */\n    COMP_GRTHAN_EQ,             /**<     >= */\n    COMP_LSTHAN,                /**<     < */\n    COMP_LSTHAN_EQ,             /**<     <= */\n    COMP_EQUAL,                 /**<     == */\n    COMP_DIFF,                  /**<     != */\n    COMP_LIKE,                  /**<     regexp matching */\n    COMP_UNLIKE,                /**<     regexp not matching */\n} compare_direction_t;\n\ntypedef enum {\n    CRITERIA_TREE = 0,\n    CRITERIA_PATH,\n    CRITERIA_NAME,\n    CRITERIA_INAME,\n    CRITERIA_TYPE,\n    CRITERIA_OWNER,\n    CRITERIA_GROUP,\n    CRITERIA_SIZE,\n    CRITERIA_DEPTH,\n    CRITERIA_DIRCOUNT,\n    CRITERIA_NLINK,\n    CRITERIA_LAST_ACCESS,\n    CRITERIA_LAST_MOD,\n    CRITERIA_LAST_MDCHANGE,\n    CRITERIA_CREATION,\n    CRITERIA_RMTIME,\n#ifdef _LUSTRE\n    CRITERIA_POOL,\n    CRITERIA_OST,\n    CRITERIA_PROJID,\n#endif\n    CRITERIA_FILECLASS,\n    CRITERIA_STATUS,\n    /* /!\\ str2criteria relies on the fact that CRITERIA_XATTR is the first\n     * non-standard criteria */\n    CRITERIA_XATTR,\n    CRITERIA_SM_INFO,\n} compare_criteria_t;\n\n#define NO_CRITERIA ((compare_criteria_t)-1)\n\n#define XATTR_PREFIX    \"xattr\"\n\n/* /!\\ str2criteria relies on the fact that CRITERIA_XATTR is the first\n     * non-standard criteria */\n#define MAX_CRITERIA CRITERIA_SM_INFO\n\nconst char *criteria2str(compare_criteria_t crit);\n\nstruct sm_instance;\nstruct sm_info_def;\ncompare_criteria_t str2criteria(const char *str, const struct sm_instance *smi,\n                                const struct sm_info_def **ppdef,\n                                unsigned int *idx);\n\n#define LRU_ATTR_NONE  (ATTR_INDEX_FLG_UNSPEC)\n#define LRU_ATTR_INVAL (ATTR_INDEX_FLG_UNSPEC | 0x1)\n\n#define ALLOWED_LRU_ATTRS_STR \"none, creation, last_access, last_mod, \"\\\n                              \"rm_time, size, or status manager specific.\"\n\n/**\n * Return the attribute index for the given lru_sort_attr string.\n * @retval LRU_ATTR_NONE  'lru_sort_attr = none' (no sorting)\n * @retval LRU_ATTR_INVAL invalid lru_sort_attr.\n */\nunsigned int str2lru_attr(const char *str, const struct sm_instance *smi);\n\ntypedef enum {\n    BOOL_ERR = 0,\n    BOOL_NOT,\n    BOOL_OR,\n    BOOL_AND\n} bool_op_t;\n\ntypedef enum {\n    TYPE_NONE = 0,\n    TYPE_LINK,\n    TYPE_DIR,\n    TYPE_FILE,\n    TYPE_CHR,\n    TYPE_BLK,\n    TYPE_FIFO,\n    TYPE_SOCK\n} obj_type_t;\n\n/** string representation in policies */\nstatic const char *type_cfg_name[] = {\n    \"?\",\n    \"symlink\",\n    \"directory\",\n    \"file\",\n    \"char\",\n    \"block\",\n    \"fifo\",\n    \"socket\"\n};\n\nstatic inline const char *type2str(obj_type_t type)\n{\n    if (type > TYPE_SOCK)\n        return type_cfg_name[TYPE_NONE];\n\n    return type_cfg_name[type];\n}\n\nstatic inline obj_type_t str2type(const char *str)\n{\n    obj_type_t i;\n\n    for (i = TYPE_NONE; i <= TYPE_SOCK; i++) {\n        if (!strcasecmp(str, type_cfg_name[i]))\n            return i;\n    }\n    return TYPE_NONE;\n}\n\ntypedef union {\n    char               str[RBH_PATH_MAX]; /**< for all conditions based on a\n                                               string */\n    unsigned long long size;      /**< for size-based conditions */\n    unsigned int       integer;   /**< for int base conditions */\n    time_t             duration;  /**< for last access and last mod condition */\n    obj_type_t         type;      /**< for conditions based on object type */\n} compare_value_t;\n\nenum compare_flags {\n    CMP_FLG_ANY_LEVEL   = (1 << 0), /**< Indicates that the compare triplet is\n                                     for matching any level of directories. */\n    CMP_FLG_INSENSITIVE = (1 << 1), /**< case insensitive string matching */\n};\n\n/* whitelist rules are defined by a tree of comparators */\n\n/** <attribute> <comparator> <value> triplet */\ntypedef struct compare_triplet_t {\n    enum compare_flags  flags;\n    compare_criteria_t  crit;\n    char                attr_name[RBH_NAME_MAX]; /**< for xattrs, or status\n                                                      manager specific attr */\n    compare_direction_t op;\n    compare_value_t     val;\n} compare_triplet_t;\n\n/** Type of boolean expression: unary, binary or criteria */\ntypedef enum {\n    NODE_CONSTANT,  /**< boolean constant (TRUE or FALSE) */\n    NODE_CONDITION,\n    NODE_UNARY_EXPR,\n    NODE_BINARY_EXPR\n} node_type_t;\n\n/** Recursive definition of a Boolean expression */\ntypedef struct bool_node_t {\n    node_type_t node_type;\n    union {\n        compare_triplet_t *condition;   /**< for final condition on any field */\n        bool               constant;    /**< true or false */\n        struct {\n            bool_op_t           bool_op; /**< boolean operator */\n            struct bool_node_t *expr1;   /**< for unary or binary operators */\n            struct bool_node_t *expr2;   /**< for binary operators */\n\n            /* this tag indicates if expressions 1 and 2\n             * are allocated by the owner of this structure\n             * (boolean expression or set of classes) */\n            unsigned int owner:1;\n        } bool_expr;\n    } content_u;\n} bool_node_t;\n\n/** give the  string for a compare oparation */\nconst char *op2str(compare_direction_t comp);\n\n/** Create a boolean condition */\nint CreateBoolCond(bool_node_t *p_out_node, compare_direction_t compar,\n                   compare_criteria_t crit, compare_value_t val,\n                   enum compare_flags flags);\n\n/** Append a boolean condition with bool op = AND */\nint AppendBoolCond(bool_node_t *p_in_out_node, compare_direction_t compar,\n                   compare_criteria_t crit, compare_value_t val,\n                   enum compare_flags flags);\n\n/** Return a constant boolean expression (true or false) */\nint ConstantBoolExpr(bool constant, bool_node_t *p_bool_node);\n\n/**\n * Free a boolean expression structure\n */\nint FreeBoolExpr(bool_node_t *p_expr, bool free_top_node);\n\n/**\n * Print a boolean expression to a string.\n */\nint BoolExpr2str(bool_node_t *p_bool_node, char *out_str, size_t str_size);\n\n/**\n * Compare 2 boolean expressions\n * @return 1 if expression structure changed.\n * @return 0 if they have the same structure,\n * @return  -1 on error.\n */\nint compare_boolexpr(const bool_node_t *expr1, const bool_node_t *expr2);\n\n/**\n * Update the numerical values of a boolean expression.\n * /!\\ compare_boolexpr() must have returned 0 (else, unguarantied behavior).\n * @param tgt Boolean expression to be updated\n * @param src Boolean expression to take values from.\n * @return true if expression values have been changed\n * @return false if nothing has been changed\n */\nbool update_boolexpr(bool_node_t *tgt, const bool_node_t *src);\n\n/**\n * Set attribute value in attrs, given the criteria name and\n * the text representation of the value.\n */\nstruct attr_set_t;\nint set_attr_value_from_strings(const char *name, const char *val,\n                                struct attr_set_t *attrs, const struct sm_instance *smi);\n\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_cfg.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n *  \\file rbh_cfg.h\n *  \\brief Module for configuration management and parsing.\n *\n * \\addtogroup CONFIG_PARSING\n * @{\n */\n\n#ifndef _ROBINHOOD_CONFIG_H\n#define _ROBINHOOD_CONFIG_H\n\n#include \"config_parsing.h\"\n#include <sys/types.h>\n#include <stdio.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include <limits.h>\n\n/**\n * Generic definitions for config management\n * \\addtogroup MODULE_CONFIG_FUNCTIONS\n * @{\n */\n\n/** configuration functions for modules */\ntypedef struct mod_cfg_funcs {\n    const char  *module_name;\n    void *      (*new)(void);      /**< allocate a new config structure */\n    void        (*free)(void *);   /**< free a config structure */\n\n    void        (*set_default)(void *);  /**< fill config structure with default\n                                              parameters */\n    /** Read parameters from config file */\n    int         (*read)(config_file_t config, void *cfg, char *msg_out);\n    /** Set the module config */\n    int         (*set_config)(void *cfg, bool reload);\n\n    void        (*write_default)(FILE *output);      /**< write defaults */\n    void        (*write_template)(FILE *output);     /**< write a template */\n} mod_cfg_funcs_t;\n\n/** configuration function for context-dependant modules\n * (configuration is included in another module's block)\n * that can have multiple instances.\n * e.g. scheduling parameters in <policy>_parameters.\n */\ntypedef struct ctx_cfg_funcs {\n    const char  *module_name;\n    void *      (*new)(void);      /**< allocate a new config structure */\n    void        (*free)(void *);   /**< free a config structure */\n\n    void        (*set_default)(void *);  /**< fill config structure with default\n                                              parameters */\n    /** Read parameters from a given block */\n    int         (*read_from_block)(config_item_t block, void *cfg,\n                                   char *msg_out);\n    /** Update the module config when reloading the configuration\n     * (ctx is context dependant) */\n    int         (*update)(void *ctx, void *cfg);\n\n    void        (*write_default)(int indent, FILE *output);  /**< write defaults */\n    void        (*write_template)(int indent, FILE *output); /**< write a template */\n} ctx_cfg_funcs_t;\n\n\n/** @} */\n\n/* Get config file for the current process (can be used to replace '{cfg}' in\n * external commands) */\nconst char *config_file_path(void);\n\n/* behavior flags for all modules */\ntypedef enum run_flags {\n    RUNFLG_DRY_RUN      = (1 << 0),\n    RUNFLG_IGNORE_POL   = (1 << 1),\n    RUNFLG_ONCE         = (1 << 2),\n    RUNFLG_NO_LIMIT     = (1 << 3),\n    RUNFLG_CHECK_ONLY   = (1 << 4),  /* only check triggers, don't purge */\n    RUNFLG_NO_GC        = (1 << 5),  /* don't clean orphan entries after scan */\n    RUNFLG_FORCE_RUN    = (1 << 6),  /* force running policy even if no scan was\n                                        complete */\n} run_flags_t;\n\n/* Config module masks:\n * Global, Log, and List Manager are always initialized.\n * Entry processor, Info Collector, policy runs are optional\n * are optionnal.\n */\n#define MODULE_MASK_ENTRY_PROCESSOR 0x00000001\n#define MODULE_MASK_FS_SCAN         0x00000002\n#define MODULE_MASK_EVENT_HDLR      0x00000004\n#define MODULE_MASK_POLICY_RUN      0x00000008\n\n#define MODULE_MASK_ALWAYS          0x10000000\n\n/**\n * Read robinhood's configuration file and set modules configuration.\n * if everything is OK, returns 0 and fills the structure\n * else, returns an error code and sets a contextual error message in\n * err_msg_out.\n */\nint rbh_cfg_load(int module_mask, char *file_path, char *msg_out);\n\n/**\n * Reload robinhood's configuration file (the one used for last call to rbh_cfg_load())\n * and change only parameters that can be modified on the fly.\n */\nint rbh_cfg_reload(int curr_module_mask);\n\n/**\n * Write a documented template of configuration file,\n * to the given file path.\n * returns 0 on success, else it returns a posix error code.\n */\nint rbh_cfg_write_template(FILE *stream);\n\n/**\n * Write all default configuration values,\n * to the given file path.\n * returns 0 on success, else it returns a posix error code.\n */\nint rbh_cfg_write_default(FILE *stream);\n\n/* ==== Tools for writing config templates ==== */\n\nvoid print_begin_block(FILE *output, unsigned int indent,\n                       const char *blockname, const char *id);\nvoid print_end_block(FILE *output, unsigned int indent);\nvoid print_line(FILE *output, unsigned int indent, const char *format, ...);\n\n#endif\n\n/** @} */\n"
  },
  {
    "path": "src/include/rbh_cfg_helpers.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n *  \\file rbh_cfg_helpers.h\n *  \\brief Helper functions for parsing configuration values.\n *\n * \\addtogroup CONFIG_PARSING\n * @{\n */\n\n#ifndef _RBH_CFG_HLPR_H\n#define _RBH_CFG_HLPR_H\n\n#include \"rbh_cfg.h\"\n\n/* parameter flags */\ntypedef enum param_flags {\n    PFLG_MANDATORY          = (1 << 0),\n    PFLG_ABSOLUTE_PATH      = (1 << 1),\n    PFLG_REMOVE_FINAL_SLASH = (1 << 2),\n    PFLG_NO_WILDCARDS       = (1 << 3),\n    PFLG_MAIL               = (1 << 4),\n    PFLG_STDIO_ALLOWED      = (1 << 5),\n\n    /* for int and float params */\n    PFLG_POSITIVE           = (1 << 6),\n    PFLG_NOT_NULL           = (1 << 7),\n\n    /* float params only */\n    PFLG_ALLOW_PCT_SIGN     = (1 << 8),\n\n    /* extra flags for values in policy expressions */\n    PFLG_ALLOW_ANY_DEPTH    = (1 << 9),    /**< allow '**' */\n    PFLG_NO_SLASH           = (1 << 10),\n    PFLG_COMPARABLE         = (1 << 11),\n    PFLG_XATTR              = (1 << 12),\n    PFLG_STATUS             = (1 << 13),  /**< only allowed in some particular\n                                               context (policy scope) */\n\n    PFLG_NOT_EMPTY          = PFLG_NOT_NULL\n} param_flags_t;\n\n/* ==== Tools for retrieving parameters from conf and checking them ==== */\n\n/* constraint flags on parameters */\n\n/**\n *  Retrieve a string parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetStringParam(config_item_t block, const char *block_name,\n                   const char *var_name, param_flags_t flags, char *target,\n                   unsigned int target_size, char ***extra_args_tab,\n                   unsigned int *nb_extra_args, char *err_msg);\n/**\n *  Retrieve a boolean parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetBoolParam(config_item_t block, const char *block_name,\n                 const char *var_name, param_flags_t flags, bool *target,\n                 char ***extra_args_tab, unsigned int *nb_extra_args,\n                 char *err_msg);\n\n/**\n *  Retrieve a duration parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetDurationParam(config_item_t block, const char *block_name,\n                     const char *var_name, param_flags_t flags, time_t *target,\n                     char ***extra_args_tab,\n                     unsigned int *nb_extra_args, char *err_msg);\n/**\n *  Retrieve a size parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetSizeParam(config_item_t block, const char *block_name,\n                 const char *var_name, param_flags_t flags,\n                 unsigned long long *target, char ***extra_args_tab,\n                 unsigned int *nb_extra_args, char *err_msg);\n\n/**\n *  Retrieve an integer parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetIntParam(config_item_t block, const char *block_name,\n                const char *var_name, param_flags_t flags, int *target,\n                char ***extra_args_tab, unsigned int *nb_extra_args,\n                char *err_msg);\n\n/**\n *  Retrieve a 64 bits integer parameter and check its format.\n *  (a suffix can be used in config file).\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetInt64Param(config_item_t block, const char *block_name,\n                  const char *var_name, param_flags_t flags, uint64_t *target,\n                  char ***extra_args_tab, unsigned int *nb_extra_args,\n                  char *err_msg);\n\n/**\n *  Retrieve a float parameter and check its format\n *  @return 0 on success\n *          ENOENT if the parameter does not exist in the block\n *          EINVAL if the parameter does not satisfy restrictions\n */\nint GetFloatParam(config_item_t block, const char *block_name,\n                  const char *var_name, param_flags_t flags, double *target,\n                  char ***extra_args_tab, unsigned int *nb_extra_args,\n                  char *err_msg);\n\n/**\n * Get a Config block and check it is unique.\n */\nint get_cfg_block(config_file_t config, const char *name, config_item_t *item,\n                  char *msg_out);\n\n/**\n * Get a config sub-block and check it is unique.\n */\nint get_cfg_subblock(config_item_t block, const char *name,\n                     config_item_t *subblock, char *msg_out);\n\n\n/**\n * Types and function to parse a list of simple scalar configuration variables (with no extra args).\n */\ntypedef enum {\n    PT_STRING,\n    PT_BOOL,\n    PT_DURATION,\n    PT_SIZE,\n    PT_INT,\n    PT_INT64,\n    PT_FLOAT,\n    PT_CMD,\n    PT_TYPE\n} cfg_param_type;\n\n/** generic config parsing using structure {type, name, flags, tgtptr} */\n\ntypedef struct cfg_param_t {\n    const char     *name;   /* NULL for last name */\n    cfg_param_type  type;\n    param_flags_t   flags;\n    void           *ptr;\n    size_t          ptrsize;\n} cfg_param_t;\n#define END_OF_PARAMS {NULL, 0, 0, NULL, 0}\n\nint read_scalar_params(config_item_t block, const char *block_name,\n                       const cfg_param_t *params, char *msgout);\n\n/**\n * Build a policy boolean expression from the given block\n * \\param[in] smi  When specifying a policy scope, indicates the related\n *                 status manager ('status' criteria is policy dependent)\n */\n#include \"rbh_boolexpr.h\"\n#include \"list_mgr.h\"\nstruct sm_instance;\nint GetBoolExpr(config_item_t block, const char *block_name,\n                bool_node_t *p_bool_node, attr_mask_t *p_attr_mask,\n                char *err_msg, const struct sm_instance *smi);\n\n/**\n * Build a policy boolean expression from a union/intersection of filesets\n */\nstruct policies_t;\nint GetSetExpr(config_item_t block, const char *block_name,\n               bool_node_t *p_bool_node, attr_mask_t *p_attr_mask,\n               const struct policies_t *policies, char *err_msg);\n\n/**\n * Check that no unknown parameter or block is found.\n * @param param_array NULL terminated array of allowed parameters.\n */\nvoid CheckUnknownParameters(config_item_t block, const char *block_name,\n                            const char *const *param_array);\n\n#endif\n\n/** @} */\n"
  },
  {
    "path": "src/include/rbh_const.h",
    "content": "#ifndef _RBH_CONST_H\n#define _RBH_CONST_H\n\n#include <limits.h>\n\n#define RBH_PATH_MAX    PATH_MAX\n#define RBH_NAME_MAX    256\n#define MAX_POOL_LEN    17     /* LOV_MAXPOOLNAME + 1 */\n#define RBH_LOGIN_MAX\t128    /* user/group max name length */\n\n#define MAIL_ADDRESS_MAX 256\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_logs.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *  \\file rbh_logs.h\n *  \\brief Logs management.\n */\n\n#ifndef _ROBINHOOD_LOGS_H\n#define _ROBINHOOD_LOGS_H\n\n#include \"config_parsing.h\"\n#include \"rbh_const.h\"\n#include \"rbh_cfg.h\"\n#include <stdarg.h>\n#include <stdbool.h>\n#include <stdio.h>  /* for FILE */\n#include <sys/param.h>  /* for RBH_PATH_MAX */\n\ntypedef enum {\n    LVL_CRIT    =  0,\n    LVL_MAJOR   =  2,\n    LVL_EVENT   =  5,\n    LVL_VERB    =  10,\n    LVL_DEBUG   =  50,\n    LVL_FULL    = 100\n} log_level;\n\ntypedef struct log_config__ {\n    log_level   debug_level;\n    bool        force_debug_level; /**< do not take the configuration into\n                                    *   account, use a value from cmd line */\n    char        log_file[RBH_PATH_MAX];\n    bool        force_log_file; /**< do not take the configuration into\n                                 * account, use a path from cmd line */\n    char        report_file[RBH_PATH_MAX];\n\n    char        alert_mail[MAIL_ADDRESS_MAX];\n    char        alert_file[RBH_PATH_MAX];\n\n    char        changelogs_file[RBH_PATH_MAX];\n\n    int         syslog_facility;\n    int         syslog_priority;\n\n    /* batching of alerts:\n     * 0=unlimited, 1=no batching,\n     * >1 maximum number of reported alerts per summary\n     */\n    int         batch_alert_max;\n\n    time_t      stats_interval;\n\n    /* display entry attributes for each entry in alert reports */\n    bool        alert_show_attrs;\n    bool        log_process;  /* display process name in the log line header */\n    bool        log_host;     /* display hostname in the log line header */\n\n} log_config_t;\n\n/* Allow forcing log files etc... */\nextern log_config_t log_config;\n\n/** config parsing helpers */\nextern mod_cfg_funcs_t log_cfg_hdlr;\n\n/* Converts a debug level string to the associated\n * verbosity level.\n * Returns (log_level)-1 in case of an error.\n */\nlog_level str2debuglevel(char *str);\n\n/**\n * Force debug level.\n * Won't be overridden by configuration.\n */\nvoid force_debug_level(log_level level);\n\n/**\n * Force log file.\n * Won't be overridden by configuration.\n */\nvoid force_log_file(const char *file);\n\n/**\n * Indicates if traces of the given level are to be displayed.\n */\nint TestDisplayLevel(log_level level);\n\n/* Open log and report files,\n * Returns -1 and sets error in case of an error.\n */\nint InitializeLogs(const char *prog_name);\n\n/* flush logs */\nvoid FlushLogs(void);\n\n/**\n * Adjust log levels of external components (such as libraries) we get\n * messages from.\n */\nvoid rbh_adjust_log_level_external(void);\n\n/**\n * Display a log message.\n * This should not be called directly but used via the DisplayLog macro below.\n */\nvoid DisplayLogFn(log_level debug_level, const char *tag,\n                  const char *format, ...)\n    __attribute__ ((format(printf, 3, 4)));\n\n/**\n * Display a log message (variable arguments version).\n * This should not be called directly but used via the vDisplayLog macro below.\n */\nvoid vDisplayLogFn(log_level debug_level, const char *tag, const char *format,\n                   va_list ap);\n\n/**\n * Emit a log record if the message is of high enough importance.\n */\n#define DisplayLog(dbg_level, tag, ...) \\\n    do { \\\n        if (log_config.debug_level >= (dbg_level)) \\\n            DisplayLogFn((dbg_level), (tag), __VA_ARGS__); \\\n    } while (0)\n\n/**\n * va_list-version of DisplayLog.\n */\n#define vDisplayLog(dbg_level, tag, format, args) \\\n    do { \\\n        if (log_config.debug_level >= (dbg_level)) \\\n            vDisplayLogFn((dbg_level), (tag), (format), args); \\\n    } while (0)\n\n/* Abort due to a bug */\n#define RBH_BUG(_msg)   do { DisplayLog(LVL_CRIT, \\\n                                 \"BUG\", \"in %s::%s(), line %u: %s\",   \\\n                                 __FILE__, __func__, __LINE__, _msg); \\\n                             FlushLogs(); \\\n                             abort();   \\\n                        } while (0)\n\n/* Displays a line in the report file */\nvoid DisplayReport(const char *format, ...)\n    __attribute__ ((format(printf, 1, 2))); /* 1=format 2=params */\n\n#ifdef HAVE_CHANGELOGS\nvoid DisplayChangelogs(const char *format, ...)\n    __attribute__ ((format(printf, 1, 2))); /* 1=format 2=params */\n#endif\n\n/* Displays a line in the alert file / send a mail */\nvoid RaiseAlert(const char *title, const char *format, ...)\n    __attribute__ ((format(printf, 2, 3))); /* 2=format 3=params */\n\nvoid RaiseEntryAlert(const char *alert_name,    /* alert name (if set) */\n                     const char *alert_string,  /* alert description */\n                     const char *entry_path,    /* entry path */\n                     const char *entry_info);   /* alert related attributes */\n\n/* Start grouping several entry alerts in the same email */\nvoid Alert_StartBatching(void);\nvoid Alert_EndBatching(void);\n\n/* Wait for next stat deadline */\nvoid WaitStatsInterval(void);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_misc.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file  rbh_misc.h\n * \\brief Common tools for parsing, converting, checking FS status...\n */\n#ifndef _ROBINHOOD_MISC_H\n#define _ROBINHOOD_MISC_H\n\n#include \"xplatform_print.h\"\n#include \"list_mgr.h\"\n#include <sys/stat.h>\n#ifndef __FreeBSD__\n#include <sys/vfs.h>\n#endif\n#include <dirent.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include <glib.h>\n#include <semaphore.h>\n#include <unistd.h>\n#include \"rbh_logs.h\"\n\n/* displaying FID */\n#ifndef _HAVE_FID\n#undef DFID\n#undef DFID_NOBRACE\n#undef PFID\n#undef SFID\n#undef RFID\n\n#define DFID \"%\"PRIX64\"/%\"PRI_STI\n#define DFID_NOBRACE DFID\n#define PFID(_pid) (_pid)->fs_key, (_pid)->inode\n#define SFID \"0X%\"PRIX64\"/%\"PRI_STI\n#define RFID(_pid) &((_pid)->fs_key), &((_pid)->inode)\n#define FID_SCAN_CNT 2\n#define RBH_FID_LEN 64\n\n#else\n#define FID_SCAN_CNT 3\n#endif\n\n/**\n * Common info\n */\n#define DEFAULT_CFG_VAR \"RBH_CFG_DEFAULT\"\n\n/**\n *  Miscellaneous parsing macros\n */\n#define EMPTY_STRING(s)       ((s)[0] == '\\0')\n#define FINAL_SLASH(s)        ((strlen(s) > 1) && (s[strlen(s)-1] == '/'))\n#define SLASH_IN(s)           (strchr(s, '/') != NULL)\n#define REMOVE_FINAL_SLASH(s) ((s)[strlen(s)-1] = '\\0')\n#define IS_ABSOLUTE_PATH(s)   (((s)[0]) && ((s)[0] == '/'))\n\n#define ANY_LEVEL_MATCH(_s_)  (strstr(_s_, \"**\") != NULL)\n\n#define WILDCARDS_IN(s) (strchr(s, '*') || strchr(s, '?') || strchr(s, '[') \\\n                         || strchr(s, ']') || strchr(s, '{') || strchr(s, '}'))\n#define STAR_SLASH_BEGIN(s) (((s)[0] == '*') && ((s)[1] == '/'))\n\n#define GSTRING_SAFE(_g) (((_g) == NULL || ((_g)->str == NULL)) ? \"\" : \\\n                            (_g)->str)\n#define GSTRING_EMPTY(_g) (EMPTY_STRING(GSTRING_SAFE(_g)))\n\n#define bool2str(_b_)   ((_b_) ? \"yes\" : \"no\")\n\n#define rh_strncpy(_s1, _s2, _sz) do { \\\n        if (_sz > 0) {                 \\\n            strncpy(_s1, _s2, _sz);    \\\n            (_s1)[_sz-1] = '\\0';       \\\n        }                              \\\n    } while (0)\n\n/**\n *  Other useful definitions\n */\n#define MIN2(_a_, _b_) ((_a_) < (_b_) ? (_a_) : (_b_))\n#define MIN3(_a_, _b_, _c_) (MIN2(MIN2((_a_) , (_b_)) , (_c_)))\n\n#define MAX2(_a_, _b_) ((_a_) > (_b_) ? (_a_) : (_b_))\n#define MAX3(_a_, _b_, _c_) (MAX2(MAX2((_a_) , (_b_)) , (_c_)))\n\n#ifndef P\n#define P(_mutex_) pthread_mutex_lock(&(_mutex_))\n#endif\n#ifndef V\n#define V(_mutex_) pthread_mutex_unlock(&(_mutex_))\n#endif\n\n#ifdef __GNUC__\n#define likely(x)       __builtin_expect(!!(x), 1)\n#define unlikely(x)     __builtin_expect(!!(x), 0)\n\n/**\n * GCC hint for unreachable code\n * See: https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html\n */\n#define UNREACHED       __builtin_unreachable\n\n#else\n#define likely(x)       (x)\n#define unlikely(x)     (x)\n#endif\n\n#ifndef MEMBER_SIZE\n#define MEMBER_SIZE(_type, _member) sizeof(((_type *)0)->_member)\n#endif\n\n#ifndef ARRAY_SIZE\n#define ARRAY_SIZE(_array) (sizeof(_array) / sizeof((_array)[0]))\n#endif\n\n/**\n * Send a mail\n */\nint SendMail(const char *recipient, const char *subject, const char *message);\n\n/**\n * Search for Robinhood config file\n */\nint SearchConfig(const char *cfg_in, char *cfg_out, bool *changed,\n                 char *unmatched, size_t max_len);\n\n/**\n * This function is blocking as long as the lock file is present.\n * Optionaly updates an action timestamp, at each test.\n */\nvoid TestLockFile(time_t *p_last_action);\n\n/**\n * Convert a POSIX attribute structure (returned by lstat)\n * to a robinhood attribute set.\n * @param size_info indicates if size info is set in the stat structure.\n */\nvoid stat2rbh_attrs(const struct stat *p_inode, attr_set_t *p_attr_set,\n                    bool size_info);\n\n/**\n * Convert a robinhood attribute set to a posix struct stat.\n */\nvoid rbh_attrs2stat(const attr_set_t *p_attr_set, struct stat *p_inode);\n\n/* convert file mode to DB type string */\nconst char *mode2type(mode_t mode);\n\n/**\n * Retrieve the name associated to a user (or the text representation of its\n * uid if the user doesn't exist)\n */\nchar *uid2str(uid_t uid, char *username);\n\n/**\n * Retrieve the name associated to a user (or the text representation of its\n * uid if the user doesn't exist).\n */\nchar *gid2str(gid_t gid, char *groupname);\n\n/**\n * Check mount point and FS type.\n * Also return the associated device number.\n * (for STAY_IN_FS security option).\n */\nint check_fs_info(const char *path, const char *expected_type,\n                  dev_t *p_fs_dev, char *fsname_out,\n                  bool check_mounted, bool save_fs);\n\n/**\n * Initialize filesystem access and retrieve current devid/fs_key\n * - global_config must be set\n * - initialize mount_point, fsname and dev_id\n */\nint InitFS(void);\n\n/**\n * This is to be called after a dev_id change was detected\n * return 0 if fskey is unchanged and update mount_point, fsname and dev_id\n * else, return -1\n */\nint ResetFS(void);\n\n/**\n *  Check that FS path is the same as the last time.\n */\nint CheckLastFS(void);\n\n/* retrieve FS info */\nconst char *get_mount_point(unsigned int *plen);\n#ifdef _HAVE_FID\nconst char *get_fid_dir(void);\nconst char *get_dot_lustre_dir(void);\nconst entry_id_t *get_dot_lustre_fid(void);\nconst entry_id_t *get_fid_fid(void);\n#endif\n\nconst char *get_fsname(void);\ndev_t get_fsdev(void);\nuint64_t get_fskey(void);\nconst entry_id_t *get_root_id(void);\n\n/**\n * extract relative path from full path.\n */\nint relative_path(const char *fullpath, const char *root, char *rel_path);\n\n/**\n * create parent directory, and return its id (even if it already exists).\n */\nint create_parent_of(const char *child_path, entry_id_t *p_parent_id);\n\n/**\n * create an object with the given attributes.\n */\nint create_from_attrs(const attr_set_t *attrs_in,\n                      attr_set_t *attrs_out,\n                      entry_id_t *new_id, bool overwrite, bool setstripe);\n\nenum path_check_return {\n    PCR_NO_CHANGE = 0, /**< no attribute updated */\n    PCR_UPDATED,    /**< at least an attribute is updated. */\n    PCR_ORPHAN,\n};\n/**\n * Update parent id, name and/or full path, according to attr_mask.\n * @return one of the values defined in enum path_check_return.\n */\nenum path_check_return path_check_update(const entry_id_t *p_id,\n                                         const char *fid_path,\n                                         attr_set_t *p_attrs,\n                                         attr_mask_t attr_mask);\n\n#ifdef _LUSTRE\n\n/** initialize access to lustre */\nint Lustre_Init(void);\n\n/** Retrieve stripe info for a file */\nint File_GetStripeByPath(const char *entry_path, stripe_info_t *p_stripe_info,\n                         stripe_items_t *p_stripe_items);\n\nint File_GetStripeByDirFd(int dirfd, const char *fname,\n                          stripe_info_t *p_stripe_info,\n                          stripe_items_t *p_stripe_items);\n/**\n * check if a file has data on the given OST.\n */\nbool DataOnOST(size_t fsize, unsigned int ost_index,\n               const stripe_info_t *sinfo, const stripe_items_t *sitems);\n\n/**\n * compute the number of blocks of a file on a given OST.\n */\nblkcnt_t BlocksOnOST(blkcnt_t blocks, unsigned int ost_index,\n                     const stripe_info_t *sinfo,\n                     const stripe_items_t *sitems);\n\n#ifdef HAVE_LLAPI_GETPOOL_INFO\n/** Create a file with the given stripe information */\nint CreateStriped(const char *path, const stripe_info_t *old_stripe,\n                  int overwrite);\nint CreateWithoutStripe(const char *path, mode_t mode, int overwrite);\n#endif\n\n#ifdef _HAVE_FID\nint BuildFidPath(const entry_id_t *p_id /* IN */ , char *path /* OUT */);\nint Lustre_GetFullPath(const entry_id_t *p_id, char *fullpath,\n                       unsigned int len);\nint Lustre_GetFidFromPath(const char *fullpath, entry_id_t *p_id);\nint Lustre_GetFidByFd(int fd, entry_id_t *p_id);\nint Lustre_GetNameParent(const char *path, int linkno,\n                         lustre_fid *pfid, char *name, int namelen);\n\n#define FID_IS_ZERO(_pf) (((_pf)->f_seq == 0) && ((_pf)->f_oid == 0))\n\n#endif\n\n#ifdef HAVE_CHANGELOGS\n/* if the FS has changelogs, define function for converting changelog time */\nstatic inline time_t cltime2sec(uint64_t cltime)\n{\n    /* extract secs from time field */\n    return cltime >> 30;\n}\n\nstatic inline unsigned int cltime2nsec(uint64_t cltime)\n{\n    /* extract nanosecs: */\n    return cltime & ((1 << 30) - 1);\n}\n#endif\n\n/** Retrieve OST usage info ('ost df') */\nint Get_OST_usage(const char *fs_path, unsigned int ost_index,\n                  struct statfs *ost_statfs);\n\n#ifdef HAVE_LLAPI_GETPOOL_INFO\n/** Retrieve pool usage info */\nint Get_pool_usage(const char *poolname, struct statfs *pool_statfs);\n#endif\n\n/**\n * Retrieve file information from MDS.\n * @return 0 on success, -errno on error.\n */\nint lustre_mds_stat(const char *fullpath, int parentfd, struct stat *inode);\n#ifdef _HAVE_FID\nint lustre_mds_stat_by_fid(const entry_id_t *p_id, struct stat *inode);\n#endif\n\n#ifndef _MDT_SPECIFIC_LOVEA\n/**\n * build LOVEA buffer from stripe information\n * @return size of significant information in buffer.\n */\nssize_t BuildLovEA(const entry_id_t *p_id, const attr_set_t *p_attrs,\n                   void *buff, size_t buf_sz);\n#endif\n\n#endif /* lustre */\n\n/**\n * Shoot a thread.\n */\nint TerminateThread(pthread_t thread_id);\n\n/**\n * Clean termination of the daemon + display message in log\n */\nvoid Exit(int error_code);\n\n/**\n * Format functions\n */\nchar *FormatFileSize(char *buff, size_t str_sz, uint64_t file_size);\nchar *FormatDuration(char *buff, size_t str_sz, time_t duration);\nchar *FormatDurationFloat(char *buff, size_t str_sz, time_t duration);\n\n#ifdef _LUSTRE\n/**\n * Append a list of stripes to a GString.\n * @param[in,out] str  Allocated GString.\n */\nvoid append_stripe_list(GString *str, const stripe_items_t *p_stripe_items,\n                        bool brief);\n\n/**\n * Lustre Project ID\n * \\return positive project id on success, -errno on error\n */\nint lustre_project_get_id(const char *pathname);\n\n#endif\n\n/*\n * Parsing functions\n */\n\n/**\n * Convert a string to a boolean\n * @return -1 on error.\n */\nint str2bool(const char *str);\n\n/**\n * Convert a string to an integer\n * @return -1 on error.\n */\nstatic inline int str2int(const char *str)\n{\n    char suffix[256];\n    int nb_read, value;\n\n    if (str == NULL)\n        return -1;\n\n    nb_read = sscanf(str, \"%d%s\", &value, suffix);\n\n    if (nb_read <= 0)\n        return -1;  /* invalid format */\n\n    if ((nb_read == 1) || (suffix[0] == '\\0'))\n        return value;   /* no suffix => 0K */\n    else\n        return -1;\n}\n\n/**\n * Convert a string to a long integer\n * @return -1 on error.\n */\nlong long str2bigint(const char *str);\n\n/**\n * Convert a string to a duration in seconds\n * @return -1 on error.\n */\nint str2duration(const char *str);\n\n/**\n * Convert a string to a size (in bytes)\n * @return -1 on error.\n */\nuint64_t str2size(const char *str);\n\n/** parse date/time yyyymmdd[HH[MM[SS]]] */\ntime_t str2date(const char *str);\n\n/** convert mode to rwxrwxrwx string */\nconst char *mode_string(mode_t mode, char *buf);\n\n/**\n *  Print attributes to a GString.\n *  @param[in,out] str           Allocated GString (contents is overwritten).\n *  @param         overide_mask  If != 0, override attrmask with this one.\n *  @param         brief         Brief notation for diff output.\n */\nvoid print_attrs(GString *str, const attr_set_t *p_attr_set,\n                 attr_mask_t overide_mask, bool brief);\n\n/**\n *  Apply attribute changes\n *  \\param change_mask mask of attributes to be changed\n */\nint ApplyAttrs(const entry_id_t *p_id,\n               const attr_set_t *p_attr_new, const attr_set_t *p_attr_old,\n               attr_mask_t change_mask, bool dry_run);\n\n/** Compute greatest common divisor (GCD) of 2 numbers */\nunsigned int gcd(unsigned int x, unsigned int y);\n\n/** Ensure that the thread is suspended for a given amount\n * of time, event if the process gets interrupts.\n */\nvoid rh_sleep(unsigned int seconds);\n\n/* signal safe semaphore ops with error logging */\n/* man (3) sem_wait/sem_post: on error, the value of the semaphore is left\n * unchanged */\nstatic inline void sem_wait_safe(sem_t *sem)\n{\n    while (sem_wait(sem)) {\n        if (errno != EINTR && errno != EAGAIN)\n            DisplayLog(LVL_CRIT, \"sem\", \"ERROR: sem_wait operation failed: %s\", \\\n                       strerror(errno));\n    }\n}\n\nstatic inline void sem_post_safe(sem_t *sem)\n{\n    while (sem_post(sem)) {\n            if (errno != EINTR && errno != EAGAIN)\n                DisplayLog(LVL_CRIT, \"sem\",\n                           \"ERROR: sem_post operation failed: %s\",\n                           strerror(errno));\n    }\n}\n\n/**\n * Interuptible sleep.\n * returns when _v != 0.\n */\n#define rh_intr_sleep(_s, _v) do { \\\n        unsigned int _i; for (_i = 0; (_i < _s) && !(_v); _i++) rh_sleep(1); \\\n    } while (0)\n\n#define rh_usleep(_usec) usleep(_usec)\n\n/** replace a pattern in a string with another sub-string\n * \\param str_in_out must be large enough to receive\n *  the resulting string, and cannot exceed 1024.\n */\nint str_subst(char *str_in_out, const char *to_be_replaced,\n              const char *replacement);\n\nstatic inline void subst_char(char *str, char c1, char c2)\n{\n    char *curr;\n    for (curr = str; *curr != '\\0'; curr++)\n        if (*curr == c1)\n            *curr = c2;\n}\n\n/** escape every special character in a regex\n *\n * \\param dest      the string to copy the escaped regex to\n * \\param dest_size the size of dest (including the terminating char)\n * \\param src       the null terminated string representing the regex to\n *                  escape\n * \\param charset   a string that contains every char to escape\n *\n * \\return          0 on success, -error_code on error\n */\nint str_escape_charset(char *dest, size_t dest_size, const char *src,\n                       char *charset);\n\n/**\n * Callback function to parse command output.\n * The function can freely modify line contents\n * without impacting program working.\n *\n * \\param[in,out] cb_arg    argument passed to command_call\n * \\param[in]     line      the line to be parsed\n * \\param[in]     size      size of the line buffer\n * \\param[in]     stream    fileno of the stream the line comes from\n */\ntypedef int (*parse_cb_t) (void *cb_arg, char *line, size_t size, int stream);\n\n/**\n * Callback function for execute_shell_command() that redirects stderr to\n * to robinhood log.\n * @param arg[in] arg   Desired log level, cast to (void *).\n */\nint cb_stderr_to_log(void *arg, char *line, size_t size, int stream);\n\n/**\n * Execute a shell command and call cb_func for each output line\n * (ignore output if cb_func is null).\n */\nint execute_shell_command(char **cmd, parse_cb_t cb_func, void *cb_arg);\n\n/**\n * Quote an argument for shell commande line.\n * The caller must free the returned string. */\nchar *quote_shell_arg(const char *arg);\n\n/**\n * Get the mask for placeholders in the given string.\n * @param[in] str string to be parsed.\n * @param[in] str_descr string context description to be displayed in\n *                      error messages (e.g. \"cfg_block::foo_param line 42\").\n * @param[our] err this boolean is set to true if an syntax in encountered.\n */\nattr_mask_t params_mask(const char *str, const char *str_descr, bool *err);\n\nstruct sm_instance;\n/**\n * Replace special parameters {cfg}, {fspath}, ... in the given string.\n * Result string is allocated by the function and must be released using\n * g_free().\n * @param[in] str_in    Input string with {} placeholders.\n * @param[in] str_descr String description (for logging).\n * @param[in] p_id      Pointer to entry id (if the command is executed on an\n *                      entry).\n * @param[in] p_attrs   Pointer to entry attrs (if the command is executed on an\n *                      entry).\n * @param[in] params    List of action parameters.\n * @param[in] subst_array   char** of param1, value1,\n *                      param2, value2, ..., NULL, NULL.\n * @param[in] smi       When applying a policy, pointer to the current status\n *                      manager instance.\n * @param[in] quote     If true, escape and quote the replaced values as shell\n *                      arguments.\n * @param[in] strict_braces If true, only allow braces for variable names like\n *                      {var}.\n */\nchar *subst_params(const char *str_in,\n                   const char *str_descr,\n                   const entry_id_t *p_id,\n                   const attr_set_t *p_attrs,\n                   const action_params_t *params,\n                   const char **subst_array,\n                   const struct sm_instance *smi,\n                   bool quote, bool strict_braces);\n\n/**\n * Replace special parameters {cfg}, {fspath}, ... in the given string.\n * Result is formated as argc/argv for shell by the function in cmd_out,\n * cmd_out.av must be released using g_strfreev().\n * returns 0 on success, -errno on error.\n * @param[in] cmd_in    Input command, av contains strings with {} placeholders.\n * @param[in] str_descr String description (for logging).\n * @param[in] p_id      Pointer to entry id (if the command is executed on an\n *                      entry).\n * @param[in] p_attrs   Pointer to entry attrs (if the command is executed on an\n *                      entry).\n * @param[in] params    List of action parameters.\n * @param[in] subst_array char** of param1, value1, param2, value2, ...,\n *                      NULL, NULL.\n * @param[in] smi       When applying a policy, pointer to the current status\n *                      manager instance.\n * @param[in] strict_braces If true, only allow braces for variable names like\n *                      {var}.\n * @param[out] cmd_out  parsed argc/argv after subst\n */\nint subst_shell_params(char **cmd_in,\n                       const char *str_descr,\n                       const entry_id_t *p_id,\n                       const attr_set_t *p_attrs,\n                       const action_params_t *params,\n                       const char **subst_array,\n                       const struct sm_instance *smi,\n                       bool strict_braces, char ***cmd_out);\n\n/**\n * concatenate a string array into a string\n * The returned string must be freed with free().\n */\nchar *concat_cmd(char **argv);\n\n/** compare commands */\nint compare_cmd(char **c1, char **c2);\n\n/** convert to upper case */\nvoid upperstr(char *str);\n/** convert to lower case */\nvoid lowerstr(char *str);\n\n/** recursively create a directoy and return its id */\nint mkdir_recurse(const char *full_path, mode_t mode, entry_id_t *dir_id);\n\n/**\n * Get id for the given path.\n * @param[in] st stat struct if available.\n */\nint path2id(const char *path, entry_id_t *id, const struct stat *st);\n\nint set_uid_val(const char *username, db_type_u *val);\nint set_gid_val(const char *groupname, db_type_u *val);\nconst char *id_as_str(db_type_u *val);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_modules.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015-2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file   modules.h\n * \\author Henri Doreau\n * \\brief  Dynamic modules management\n */\n\n#ifndef _RBH_MODULES_H\n#define _RBH_MODULES_H\n\n#include \"policy_rules.h\"\n#include \"policy_run.h\"\n#include \"status_manager.h\"\n\n\n/**\n * Operations exposed by robinhood dynamic modules. These should be invoked\n * using the static inline wrappers defined below.\n *\n * 'mod_get_name' and 'mod_get_version' are mandatory.\n * Other operations are optional.\n */\nstruct rbh_module_operations {\n    const char         *(*mod_get_name)(void);\n    int                 (*mod_get_version)(void);\n    status_manager_t   *(*mod_get_status_manager)(void);\n    action_func_t       (*mod_get_action)(const char *);\n    action_scheduler_t *(*mod_get_scheduler)(const char *);\n};\n\n/** current version of modules */\n#define RBH_MODULE_VERSION  1\n\ntypedef struct rbh_module {\n    const char                      *name;      /**< Module name */\n    int                              version;   /**< Module version */\n    void                            *sym_hdl;   /**< Private dlsym handle */\n    struct rbh_module_operations     mod_ops;   /**< Module operation vector */\n} rbh_module_t;\n\n\n/**\n * Get the status manager associated to a robinhood dynamic module. This\n * function will dlopen() the appropriate module if need be. The library handle\n * will then remain cached until module_unload_all() is called.\n *\n * \\param[in] name Module name from which to acquire the SM.\n *\n * \\return The status manager or NULL on error\n */\nstatus_manager_t *module_get_status_manager(const char *name);\n\n/**\n * Get an action function from a robinhood dynamic module. Actions function\n * names are of the form <module_name>.<action>. We expect the whole string\n * here. This function will dlopen() the appropriate module if need be. The\n * library handle will then remain cached until module_unload_all() is called.\n *\n * \\param[in] name  The function name, <module_name>.<action>\n *\n * \\return A pointer to the desired function or NULL if no suitable action\n *         of this name was found.\n */\naction_func_t module_get_action(const char *name);\n\n/**\n * Get an action scheduler from a robinhood dynamic module.\n * Scheduler are names of the form <module_name>.<sched_name>.\n * We expect the whole string here.\n * This function will dlopen() the appropriate module if needed.\n * The library handle will then remain cached until module_unload_all() is\n * called.\n *\n * \\param[in] name  The scheduler name: <module_name>.<sched_name>.\n *\n * \\return A pointer to the desired scheduler or NULL if no item matches this\n *         name.\n */\naction_scheduler_t *module_get_scheduler(const char *name);\n\n/**\n * Release resources associated to robinhood dynamic modules.\n *\n * \\return 0 on success, appropriate negative error code on failure\n */\nint module_unload_all(void);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_params.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * @file   rbh_params.h\n * @author Thomas Leibovici\n * @author Henri Doreau\n * @brief  Handling a generic list of key/values.\n */\n\n#ifndef _RBH_PARAMS_H\n#define _RBH_PARAMS_H\n\n#include <glib.h>\n#include <stdbool.h>\n\nstruct rbh_params {\n    GHashTable *param_set;\n};\n\ntypedef int (*rbh_params_iter_t)(const char *key, const char *val, void *udata);\n\n/**\n * Create or update a key-value item in the param set\n * @param override (bool) allow overriding a previous value.\n */\nint rbh_param_set(struct rbh_params *params, const char *key,\n                  const char *value, bool override);\n\n/** get a key-value item by key name */\nconst char *rbh_param_get(const struct rbh_params *params, const char *key);\n\n/** empty the parameter list and release memory */\nvoid rbh_params_free(struct rbh_params *params);\n\n/** behavior flags for rbh_params_serialize() */\ntypedef enum {\n    RBH_PARAM_CSV       = (1 << 0), /**< dump attrs to CSV format */\n    RBH_PARAM_COMPACT   = (1 << 1)  /**< generate a compact output\n                                         (e.g. eliminates superfluous spaces) */\n} rbh_param_flags_e;\n\n/**\n * Convert a list of variables to a parameter set that can be used\n * for instance as exclude_set parameter of rbh_params_serialize().\n * This allows more efficient searches in this set.\n * @param[out]  params Output parameter set.\n * @param[in]   list   List of parameter names (list terminated by NULL).\n * @param[in]   key_values List items are {key1, value1, key2, value2, ...}\n */\nint rbh_list2params(struct rbh_params *params, const char **list,\n                    bool key_values);\n\n/**\n * Serialize a parameter list to a string.\n *\n * @param[in]     params The parameter list to serialize.\n * @param[in/out] str    Allocated GString to write the parameters to.\n * @param[in]     exclude_set Set of parameters to exclude from output\n * @param[in]     flags  Behavior flags for the serialization.\n *\n * @return 0 on success, non-zero value on failure.\n */\nint rbh_params_serialize(const struct rbh_params *params, GString *str,\n                         const struct rbh_params *exclude_set,\n                         rbh_param_flags_e flags);\n\n/**\n * Invoke a callback on all items of the parameter set.\n * Iteration stops if the callback returns a non-zero value,\n * which is then propagated back to the caller.\n *\n * @param[in]       params  The parameter set to iterate over.\n * @param[in]       cb      The processing callback.\n * @param[in,out]   udata   User data to be passed in to the callback.\n *\n * @return 0 on success or first non-zero value returned by the callback.\n */\nint rbh_params_foreach(const struct rbh_params *params, rbh_params_iter_t cb,\n                       void *udata);\n\n\n/**\n * copy source parameters to a target parameter set.\n */\nint rbh_params_copy(struct rbh_params *tgt, const struct rbh_params *src);\n\n#endif\n"
  },
  {
    "path": "src/include/rbh_types.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _RBH_TYPE_H\n#define _RBH_TYPE_H\n\n#include <stdint.h>\n\n/** shorter alias for \"unsigned long long\" */\ntypedef unsigned long long ull_t;\n\n#ifdef _LUSTRE\n/* stripe info for Lustre */\n\ntypedef struct stripe_item_t {\n    unsigned int ost_idx; /* ost index */\n    unsigned int ost_gen; /* always 0 ? */\n    uint64_t obj_id;      /* object index on OST */\n    uint64_t obj_seq;     /* sequence from object fid */\n} stripe_item_t;\n\ntypedef struct stripe_info_t {\n    uint64_t       stripe_size;\n    unsigned int   stripe_count;\n    char           pool_name[MAX_POOL_LEN];\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n    int            validator;\n#endif\n} stripe_info_t;\n\ntypedef struct stripe_items_t {\n    unsigned int   count;\n    stripe_item_t *stripe;   /* list of stripe pieces */\n} stripe_items_t;\n#else\ntypedef int stripe_items_t; /* dummy type */\ntypedef int stripe_info_t; /* dummy type */\n#endif\n\n/** type of fields in database */\ntypedef enum {\n    DB_ID = 0,       /**< entry id */\n    DB_STRIPE_INFO,  /**< stripe info */\n    DB_STRIPE_ITEMS,  /**< stripe items */\n    DB_TEXT,    /**< string/text        */\n    DB_INT,     /**< signed integer     */\n    DB_UINT,    /**< unsigned integer   */\n    DB_SHORT,   /**< short integer     */\n    DB_USHORT,  /**< short unsigned integer   */\n    DB_BIGINT,  /**< 64 bits integer    */\n    DB_BIGUINT, /**< 64 bits unsigned integer */\n    DB_BOOL,    /**< boolean            */\n    DB_ENUM_FTYPE, /**< file type enumeration */\n    DB_UIDGID, /**< type depending on uid/gid format (configuration driven) */\n} db_type_e;\n\n/** generic function for generating fields:\n * 1st parameter points to the field to be generated.\n * 2nd parameter is the source field.\n */\ntypedef int (*gen_func_t)(void *, const void *);\n\n\n/** generic field definition for all applications */\ntypedef struct field_info_t {\n    char          *field_name;\n    db_type_e      db_type;\n    unsigned int   db_type_size; /**< size for strings */\n    int            flags;\n    off_t          offset;\n    int            gen_index;   /* source attr index for generating this info */\n    gen_func_t     gen_func;    /* function for automatic generation */\n} field_info_t;\n\n/* access pattern for fields in database */\n#define INIT_ONLY    0x00000001 /* set at insert only: stored in an annex table (can't be modified) */\n#define ANNEX_INFO   0x00000002 /* annex information, rarely accessed: stored in an annex table */\n#define FREQ_ACCESS  0x00000004 /* frequently updated, or used as select filter: stored in the main table */\n#define REMOVED      0x00000008 /* this attribute only applies to removed entries */\n#define SEPD_LIST    0x00000010 /* list with separators (text) */\n#define DNAMES       0x01000000 /* field in DNAMES table. */\n#define FUNC_ATTR    0x02000000 /* special attr built using a DB function */\n#define GENERATED    0x10000000 /* field not stored in database: generated in SELECT requests (read-only) */\n#define INDEXED      0x20000000 /* this field must be indexed */\n#define DIR_ATTR     0x40000000 /* need to aggregate directory info (specific DB request) */\n#define SLINK_ATTR   0x80000000 /* specific attr for symlinks */\n\n/** type of operation to be performed on database */\ntypedef enum operation_type_e {\n    OP_TYPE_NONE = 0,\n    OP_TYPE_INSERT,\n    OP_TYPE_UPDATE,\n    OP_TYPE_REMOVE_ONE,         /* remove name only; inode still exists */\n    OP_TYPE_REMOVE_LAST,        /* remove last name to inode and inode */\n    OP_TYPE_SOFT_REMOVE\n} operation_type_e;\n\n#endif\n"
  },
  {
    "path": "src/include/status_manager.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  status_manager.h\n * \\brief status manager definitions\n */\n\n#ifndef _STATUS_MGR_H\n#define _STATUS_MGR_H\n\n#include \"list_mgr.h\"\n#include \"policy_rules.h\"\n#include \"rbh_cfg_helpers.h\"\n\nstruct sm_instance;\n\n/** function prototype to get the status of an entry. */\ntypedef int (*sm_status_func_t)(struct sm_instance *smi,\n                                const entry_id_t *id,\n                                const attr_set_t *attrs,\n                                attr_set_t *refreshed_attrs);\n\n/**\n * Changelog callback can indicate an  action for the record or the related\n * entry. Actions are ordered by priority (if a policy returns a higher value\n * than others, this corresponding action is undertaken).\n */\ntypedef enum {\n    PROC_ACT_NONE = 0,         /* no specific action */\n    PROC_ACT_RM_ALL,           /* remove entry from DB */\n    PROC_ACT_SOFTRM_IF_EXISTS, /* soft remove the entry if it was in DB */\n    PROC_ACT_SOFTRM_ALWAYS,    /* insert into SOFTRM even if it was not in DB */\n} proc_action_e;\n\n#ifdef HAVE_CHANGELOGS\n/** function prototype for changelog callback */\ntypedef int (*sm_cl_cb_func_t)(struct sm_instance *smi,\n                               const CL_REC_TYPE * logrec,\n                               const entry_id_t *id, const attr_set_t *attrs,\n                               attr_set_t *refreshed_attrs, bool *getit,\n                               proc_action_e *rec_action);\n#endif\n\n/** function prototype for status manager \"executor\" */\ntypedef int (*sm_executor_func_t)(struct sm_instance *smi,\n                                  const char *implements,\n                                  const policy_action_t *action,\n                                  /* arguments for the action : */\n                                  const entry_id_t *id, attr_set_t *attrs,\n                                  const action_params_t *params,\n                                  post_action_e *what_after,\n                                  db_cb_func_t db_cb_fn, void *db_cb_arg);\n\n/** function prototype for action callbacks\n * @param[in,out] smi        Status manager instance\n * @param[in]     implements Action type name\n * @param[in]     id         The impacted entry id\n * @param[in,out] attrs      Entry attributes\n * @param[in,out] what_after What to do with the entry (already set by the\n *                           action, but can be overriden in this action\n *                           callback).\n */\ntypedef int (*sm_action_cb_func_t)(struct sm_instance *smi,\n                                   const char *implements, int action_status,\n                                   const entry_id_t *id, attr_set_t *attrs,\n                                   post_action_e *what_after);\n\n/** When an entry is deleted, this function indicates what action is to be taken\n * regarding the given status manager (remove from DB, move to softrm, ...)\n */\ntypedef proc_action_e (*softrm_filter_func_t)(struct sm_instance *smi,\n                                              const entry_id_t *id,\n                                              const attr_set_t *attrs);\n\n/** Function to undelete an entry.\n *  If multiple status manager can undelete an entry,\n *  the first one, or the best one (option driven) create it,\n *  and the next satatus managers are called with already_recovered = true.\n * @paramo[in] p_old_id old entry id\n * @paramo[in] p_old_attrs old entry attributes (from SOFTRM table)\n * @param [in,out] p_new_id new entry id (already set if\n *                          already_recovered==true)\n * @param [in,out] p_new_attrs new entry attributes (already set if\n *                             already_recovered==true)\n * @param [out] p_new_attrs new entry attributes\n */\ntypedef recov_status_t (*undelete_func_t)(struct sm_instance *smi,\n                                          const entry_id_t *p_old_id,\n                                          const attr_set_t *p_old_attrs,\n                                          entry_id_t *p_new_id,\n                                          attr_set_t *p_new_attrs,\n                                          bool already_recovered);\n\ntypedef int (*init_func_t)(struct sm_instance *smi, run_flags_t flags);\n\n#define SM_NAME_MAX 128\n\ntypedef enum {\n    SM_SHARED       = (1 << 0), /**< indicate the status manager can be shared\n                                     between policies */\n    SM_DELETED      = (1 << 1), /**< this status manager can manage deleted\n                                     entries */\n    SM_MULTI_ACTION = (1 << 2)  /**< this status manager handles multiple type\n                                     of actions */\n} sm_flags;\n\n/** descriptor of SM specific info */\ntypedef struct sm_info_def {\n    const char     *user_name;  /**< full name for user interface (config,\n                                     display...) */\n    const char     *db_name;    /**< short name for db storage */\n    db_type_e       db_type;\n    unsigned int    db_type_size; /**< size for strings */\n    db_type_u       db_default;   /**< default value */\n    cfg_param_type  crit_type;    /**< type for config criteria */\n} sm_info_def_t;\n\n/** Status manager definition */\ntypedef struct status_manager {\n    const char          *name;\n    sm_flags             flags;\n\n    /** possible values for status */\n    const char         **status_enum;\n    unsigned int         status_count;\n\n    /** number of policy specific information */\n    unsigned int         nb_info;\n    /** type and size of policy specific information */\n    const sm_info_def_t *info_types;\n\n    /** masks of needed attributes (cached or fresh) to get the status of an entry */\n    attr_mask_t          status_needs_attrs_cached;\n    attr_mask_t          status_needs_attrs_fresh;\n\n    /** retrieve the status of an entry */\n    sm_status_func_t     get_status_func;\n\n#ifdef HAVE_CHANGELOGS\n    /** callback for changelogs */\n    sm_cl_cb_func_t      changelog_cb;\n#endif\n\n    /** for multi-action status managers, check the status manager knowns\n     * the given action name */\n      bool (*check_action_name)(const char *);\n\n    /** callback for policy actions (action_name) */\n    sm_action_cb_func_t  action_cb;\n\n    /** If provided, the status manager wraps the action run */\n    sm_executor_func_t   executor;\n\n    /* ---- mask and function to manage deleted entries ---- */\n\n    /** needed attributes to determine if the entry is to be moved to softrm */\n    attr_mask_t          softrm_filter_mask;\n    /** determine if a deleted entry must be inserted to softrm table */\n    softrm_filter_func_t softrm_filter_func;\n\n    /** mask of attributes to be saved in SOFTRM table (needed to re-create the\n     * inode, schedule the 'remove' policy and recover/rebind the entry). */\n    attr_mask_t          softrm_table_mask;\n\n    /** undelete an entry */\n    undelete_func_t      undelete_func;  /**< NULL if the status manager can't\n                                              run 'undelete' */\n\n    /* XXX about full disaster recovery: must recreate all metadata\n     * (incl. symlinks => need link field)\n     * not only the entries managed by the policy. */\n\n    /* ---- setup functions ---- */\n\n    /** functions to load Status Manager configuration */\n    const mod_cfg_funcs_t *cfg_funcs;\n\n    /** Initialize status manager resources */\n    init_func_t          init_func;\n\n} status_manager_t;\n\n/** Status manager instance.\n * There can be one instance of a status manager\n * by policy, in the case status manager is not shared.\n */\ntypedef struct sm_instance {\n    /** status manager instance name:\n     * If the status manager is shared between policies,\n     * it just consists of the status manager name.\n     * Else, it is named as the policy.\n     * The corresponding DB field name is:\n     *    <policy_name(truncated)> + \"_status\".\n     */\n    char            *instance_name;\n    /** name of the related field in DB, using for storing status. */\n    char            *db_field;\n    /** name for user interface (config, reports...) */\n    char            *user_name;\n    /** pointer to the status manager definition */\n    const status_manager_t *sm;\n    /** instance index: useful for status attribute index. */\n    unsigned int     smi_index;\n\n    /** offset of specific info in attr_set_t.sm_info array */\n    unsigned int     sm_info_offset;\n\n    /** translated masks to get status */\n    attr_mask_t      status_mask_fresh;\n    attr_mask_t      status_mask_cached;\n\n    /** translated mask for softrm filter */\n    attr_mask_t      softrm_filter_mask;\n\n    /** translated mask to insert into SOFTRM table */\n    attr_mask_t      softrm_table_mask;\n\n    /** status manager global context */\n    void            *context;\n\n} sm_instance_t;\n\n/** number of loaded status manager instances */\nextern unsigned int sm_inst_count;  /* defined in 'status_manager.c' */\n\n/** number of status manager specific informations */\nextern unsigned int sm_attr_count;\n\nstatic inline bool is_std_attr(unsigned int index)\n{\n    return (((index & ATTR_INDEX_FLG_MASK) == 0) && (index < ATTR_COUNT));\n}\n\nstatic inline bool is_status(unsigned int index)\n{\n    return ((index & ATTR_INDEX_FLG_STATUS) &&\n            (attr2status_index(index) < sm_inst_count));\n}\n\nstatic inline bool is_sm_info(unsigned int index)\n{\n    return ((index & ATTR_INDEX_FLG_SMINFO) &&\n            (attr2sminfo_index(index) < sm_attr_count));\n}\n\n/** pointers to SM specific information */\nstruct _sm_attr_info {\n    const char          *db_attr_name;\n    const char          *user_attr_name;\n    const sm_info_def_t *def;\n    sm_instance_t       *smi;\n};\nextern struct _sm_attr_info *sm_attr_info;\n\n/** allocate status array */\nvoid sm_status_ensure_alloc(char const ***p_tab);\n/** free status array */\nvoid sm_status_free(char const ***p_tab);\n\n/** allocate sm_info array */\nvoid sm_info_ensure_alloc(void ***p_tab);\n/** free info array */\nvoid sm_info_free(void ***p_tab);\n\n/** create a status manager instance */\nsm_instance_t *create_sm_instance(const char *pol_name, const char *sm_name);\n\n/** get the Nth status manager instance */\nsm_instance_t *get_sm_instance(unsigned int n);\n\n/** wraps config handlers for all status managers */\nextern mod_cfg_funcs_t smi_cfg_hdlr;\n\n/** set status and attribute masks of status manager instances,\n * once they are all loaded */\nvoid smi_update_masks(void);\n\n/** initialize all status managers (if they have init functions)\n * @param flags daemon runtime flags\n */\nint smi_init_all(run_flags_t flags);\n\n/** get the constant string that matches the input string\n * @param[in] sm status manager that manages the matched status name\n * @param[in] in_str status name to match\n */\nconst char *get_status_str(const status_manager_t *sm, const char *in_str);\n\n/** return the list of allowed statuses for a status manager\n * (to be displayed in command help).\n * @param[in]     sm   status manager to query for its status list\n * @param[in,out] buf  buffer to write status list string\n * @param[in]     sz   buffer size\n * @return buf\n */\nchar *allowed_status_str(const status_manager_t *sm, char *buf, int sz);\n\n#ifdef HAVE_CHANGELOGS\n/** Call changelog callbacks for all status manager instances\n * @param[in]     logrec   incoming changelog record\n * @param[in]     id       related entry id\n * @param[in]     attrs    related entry attrs (current)\n * @param[out]    refreshed_attrs  updated entry attrs\n * @param[in,out] status_need   points to the mask of needed attributes\n *                              to determine entry status.\n * @param[in]     status_mask   mask of status managers that apply to the entry\n *                              (determined by policy scopes).\n * @param[out]    post_action  action to take with this changelog record or\n *                             the related entry.\n */\nint run_all_cl_cb(const CL_REC_TYPE *logrec,\n                  const entry_id_t  *id,\n                  const attr_set_t  *attrs,\n                  attr_set_t        *refreshed_attrs,\n                  attr_mask_t       *status_need,\n                  uint32_t           status_mask,\n                  proc_action_e     *post_action);\n#endif\n\n/** When an entry is deleted, this function indicates what action is to be taken\n * by querying all status manager (remove from DB, move to softrm, ...)\n */\nproc_action_e match_all_softrm_filters(const entry_id_t *id,\n                                       const attr_set_t *attrs);\n\n/** return a mask with n bits 1 starting from offset.\n * e.g. bit_range(5,3) = 011100000\n */\nstatic inline uint64_t bit_range(unsigned int offset, unsigned int bits)\n{\n    return ((1LL << bits) - 1) << offset;\n}\n\n/** return the mask of all statuses */\nstatic inline uint32_t all_status_mask(void)\n{\n    return bit_range(0, sm_inst_count);\n}\n\n/** return the mask of all specific info */\nstatic inline uint64_t all_sm_info_mask(void)\n{\n    return bit_range(0, sm_attr_count);\n}\n\n/**\n * As status managers don't know their index instance by advance,\n * they provide generic masks as if there were only their own status and\n * attributes.\n * This macro if a helper for setting a mask of policy-specific attributes.\n */\n#define GENERIC_INFO_OFFSET  (0)\n#define GENERIC_INFO_BIT(_i) (1LL << (_i))\n\nstatic inline unsigned int smi_status_index(const sm_instance_t *smi)\n{\n    return ATTR_INDEX_FLG_STATUS | smi->smi_index;\n}\n\n/** return the attribute index of the <n>th status manager specific info */\nstatic inline unsigned int smi_info_index(const sm_instance_t *smi,\n                                          unsigned int n)\n{\n    return ATTR_INDEX_FLG_SMINFO | (smi->sm_info_offset + n);\n}\n\n/** return the sm_info mask of <n>th status manager specific info */\nstatic inline uint64_t smi_info_bit(const sm_instance_t *smi, unsigned int n)\n{\n    return 1LL << (smi->sm_info_offset + n);\n}\n\n/** return the attribute mask for all specific info of the status manager */\nstatic inline uint64_t smi_info_bits(const sm_instance_t *smi)\n{\n    return bit_range(smi->sm_info_offset, smi->sm->nb_info);\n}\n\n/** helper to set/overwrite a SM info */\nint set_sm_info(const sm_instance_t *smi, attr_set_t *pattrs,\n                unsigned int attr_index, void *val);\n\n/** Translate a generic mask SMI_MASK(0) and GENERIC_INFO_OFFSET to all status\n * and info masks */\nattr_mask_t translate_all_status_mask(attr_mask_t mask);\n\n/**\n * Return needed attributes to determine entry status for the given\n * status manager instance.\n * @param fresh true, to get the list of attributes that must be up-to-date,\n *              false, to get the list of attribute that can be cached\n *                  (retrieved from DB).\n */\nstatic inline attr_mask_t smi_needed_attrs(const sm_instance_t *smi,\n                                           bool fresh)\n{\n    if (smi == NULL)\n        return null_mask;\n\n    if (fresh)\n        return smi->status_mask_fresh;\n    else\n        return smi->status_mask_cached;\n}\n\n/**\n * Get attribute mask to get status in the given mask.\n * Note: it doesn't check policy scope, as is its supposed to\n * be checked to build the input mask.\n */\nstatic inline attr_mask_t attrs_for_status_mask(uint32_t status_mask,\n                                                bool fresh)\n{\n    int i = 0;\n    uint32_t m;\n    attr_mask_t ret = { 0 };\n\n    for (i = 0, m = 1; i < sm_inst_count; i++, m <<= 1) {\n        if (status_mask & m) {\n            attr_mask_t attr_need = smi_needed_attrs(get_sm_instance(i), fresh);\n\n            ret = attr_mask_or(&ret, &attr_need);\n        }\n    }\n\n    return ret;\n}\n\n/** indicate if the status manager handles file deletion */\nstatic inline bool smi_manage_deleted(sm_instance_t *smi)\n{\n    if (smi == NULL)\n        return false;\n    /* the status manager handles file removal */\n    return smi->sm->flags & SM_DELETED;\n}\n\n/** indicate if the status manager handles several types of actions */\nstatic inline bool smi_multi_action(sm_instance_t *smi)\n{\n    if (smi == NULL)\n        return false;\n    /* the status manager handles multiple types of actions */\n    return smi->sm->flags & SM_MULTI_ACTION;\n}\n\n/** check the status manager knows the given action name */\nstatic inline bool smi_support_action(sm_instance_t *smi, const char *name)\n{\n    if (smi == NULL || smi->sm == NULL || smi->sm->check_action_name == NULL)\n        return false;\n    return smi->sm->check_action_name(name);\n}\n\n/**\n * Retrieve the mask of attributes to be saved in SOFTRM table for all policies.\n * (needed to re-create the inode, schedule the 'remove' policy,\n * and recover/rebind the entry).\n */\nstatic inline attr_mask_t sm_softrm_fields(void)\n{\n    attr_mask_t all = null_mask;\n    int i = 0;\n    sm_instance_t *smi;\n\n    /** XXX based on policies or status managers? what about the scope? */\n    while ((smi = get_sm_instance(i)) != NULL) {\n        if (smi_manage_deleted(smi)) {\n            all = attr_mask_or(&all, &smi->softrm_table_mask);\n        }\n        i++;\n    }\n    return all;\n}\n\n/**\n * Retrieve the mask of attributes to check if an entry must be saved in SOFTRM table.\n */\nstatic inline attr_mask_t sm_softrm_mask(void)\n{\n    attr_mask_t all = null_mask;\n    int i = 0;\n    sm_instance_t *smi;\n\n    /** XXX based on policies or status managers? what about the scope? */\n    while ((smi = get_sm_instance(i)) != NULL) {\n        if (smi_manage_deleted(smi))\n            all = attr_mask_or(&all, &smi->softrm_filter_mask);\n        i++;\n    }\n    return all;\n}\n\n/** build a string with the list of statuses in the given mask */\nchar *name_status_mask(uint32_t status_mask, char *buf, int sz);\n\n/** retrieve a status manager from its name */\nsm_instance_t *smi_by_name(const char *smi_name);\n\n/** Search the given attribute name (status or policy specific info).\n * If there is no smi in the context, name must be of the form '<sm_instance_name>.<attr_name>'\n * else, it can be just <attr_name> (implicit sm_instance name).\n * @return   0 on success, < 0 on failure.\n * @retval  -ENOENT if the requested attribute is not set in attributes structure.\n * @retval  -EINVAL if status manager or attr name is invalid.\n */\nint sm_attr_get(const sm_instance_t *smi, const attr_set_t *p_attrs,\n                const char *name, void **val, const sm_info_def_t **ppdef,\n                unsigned int *attr_index);\n\n#endif\n"
  },
  {
    "path": "src/include/uidgidcache.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007, 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *\n * \\file    uidgidcache.h\n * \\brief   Cache user and groups relative informations.\n *\n * Cache user and groups relative informations\n */\n#ifndef UIDGID_CACHE_H\n#define UIDGID_CACHE_H\n\n#include \"config.h\"\n\n#if HAVE_SYS_TYPES_H\n#   include <sys/types.h>\n#endif\n\n#include \"Memory.h\"\n\n#include <grp.h>\n#include <pwd.h>\n\nint InitUidGid_Cache(void);\n\nconst struct passwd *GetPwUid(uid_t owner);\nconst struct group *GetGrGid(gid_t gid);\n\n/* Cache statistics */\nextern unsigned int pw_nb_set;\nextern unsigned int pw_nb_get;\nextern unsigned int gr_nb_set;\nextern unsigned int gr_nb_get;\n\n#endif\n"
  },
  {
    "path": "src/include/update_params.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010, 2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file  update_params.h\n * \\brief Db attribute update strategy\n */\n\n#ifndef _UPDT_PARAMS_H\n#define _UPDT_PARAMS_H\n\n#include \"config_parsing.h\"\n#include \"list_mgr.h\"\n\n/**\n * Update behaviors\n * \\addtogroup Update management\n * @{\n */\n\n/**\n * path and metadata update strategies\n */\nenum updt_when {\n    UPDT_NEVER,     /* get info once, and never refresh it */\n    UPDT_ALWAYS,    /* always update info when processing an entry */\n    UPDT_ON_EVENT,  /* get info on related event */\n    UPDT_ON_EVENT_PERIODIC, /* default: get info on related event, with a\n                             * min interval + periodic update (max interval) */\n    UPDT_PERIODIC   /* update info periodically */\n};\n\n/**\n * update policy item\n */\ntypedef struct updt_param_item_t {\n    enum updt_when      when;\n    unsigned int        period_min;    /* 0=no min */\n    unsigned int        period_max;    /* 0=no periodic update */\n} updt_param_item_t;\n\n/**\n *  update parameters\n */\ntypedef struct updt_params_t {\n    updt_param_item_t   md;\n#ifdef _HAVE_FID\n    updt_param_item_t   path;\n#endif\n    updt_param_item_t   fileclass; /* only never/always/periodic allowed */\n} updt_params_t;\n\n/**\n *  Check if the fileclass needs to be updated\n */\nbool need_fileclass_update(const attr_set_t *p_attrs);\n\n/**\n *  Check if path or metadata needs to be updated\n *  \\param p_allow_event [out] if set to true, the path\n *         must be updated on related event.\n */\ntypedef enum { UPDT_PATH, UPDT_MD } type_info_t;\nbool need_info_update(const attr_set_t *p_attrs, bool *update_on_event,\n                      type_info_t type_info);\n\n#define need_path_update(_pa, _pu)    need_info_update((_pa), (_pu), UPDT_PATH)\n#define need_md_update(_pa, _pu)      need_info_update((_pa), (_pu), UPDT_MD)\n\n/** config handlers */\nextern mod_cfg_funcs_t updt_params_hdlr;\n\n/** make parameters available to all modules */\nextern updt_params_t updt_params;\n/** @} */\n\n#endif\n"
  },
  {
    "path": "src/include/xplatform_print.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2007-2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.ceciL.info) and that you\n * accept its terms.\n */\n/**\n * \\file xplatform_print.h\n * \\brief Define printing formats for aL platforms.\n */\n\n#ifndef XPLATFORM_PRINT_H\n#define XPLATFORM_PRINT_H\n\n#include \"config.h\"\n#include <inttypes.h>\n\n#if SIZEOF_DEV_T == 8\n#\tdefine PRI_DT PRIX64\n#else\n#\tdefine PRI_DT PRIX32\n#endif\n\n#define PRI_TT \"lu\"\n\n#define PRI_SZ \"zu\"\n\n#if SIZEOF_PTHREAD_T == 8\n#\tdefine PRI_PTH \"Lx\"\n#else\n#\tdefine PRI_PTH \"x\"\n#endif\n\n#if SIZEOF_NLINK_T == 8\n#\tdefine PRI_STNL \"Lu\"\n#else\n#\tdefine PRI_STNL \"u\"\n#endif\n\n#define PRI_STSZ \"zu\"\n\n\n#if SIZEOF_INO_T == 8\n#if __WORDSIZE == 64\n#       define PRI_STI \"lu\"\n#else\n#       define PRI_STI \"Lu\"\n#endif\n#else\n#       define PRI_STI \"u\"\n#endif\n\n#endif /* XPLATFORM_PRINT_H */\n"
  },
  {
    "path": "src/list_mgr/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nnoinst_LTLIBRARIES=liblistmgr.la\n\nif USE_MYSQL_DB\nDB_WRAPPER_SRC=mysql_wrapper.c\nendif\n\nif USE_SQLITE_DB\nDB_WRAPPER_SRC=sqlite_wrapper.c\nendif\n\nif HSM_LITE\nDB_PURPOSE_SRC=listmgr_recov.c\nendif\n\nif LUSTRE\nLUSTRE_SRC=listmgr_stripe.c listmgr_stripe.h\nendif\n\nliblistmgr_la_SOURCES=\tlistmgr_init.c listmgr_common.c listmgr_common.h \\\n\t\t\tlistmgr_get.c listmgr_insert.c $(LUSTRE_SRC) \\\n\t\t\tlistmgr_update.c listmgr_filters.c listmgr_remove.c listmgr_iterators.c \\\n\t\t\tlistmgr_tags.c listmgr_reports.c listmgr_config.c listmgr_internal.h database.h \\\n\t\t\tlistmgr_vars.c listmgr_ns.c $(DB_WRAPPER_SRC) $(DB_PURPOSE_SRC)\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/list_mgr/database.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _GENERIC_DB_H\n#define _GENERIC_DB_H\n\n#include \"list_mgr.h\"\n\n#define MAIN_TABLE\t        \"ENTRIES\"\n#define DNAMES_TABLE        \"NAMES\"\n#define ANNEX_TABLE\t        \"ANNEX_INFO\"\n#define STRIPE_INFO_TABLE\t\"STRIPE_INFO\"\n#define STRIPE_ITEMS_TABLE\t\"STRIPE_ITEMS\"\n#define SOFT_RM_TABLE       \"SOFT_RM\"\n#define VAR_TABLE           \"VARS\"\n#define ACCT_TABLE          \"ACCT_STAT\"\n#define ACCT_TRIGGER_INSERT \"ACCT_ENTRY_INSERT\"\n#define ACCT_TRIGGER_UPDATE \"ACCT_ENTRY_UPDATE\"\n#define ACCT_TRIGGER_DELETE \"ACCT_ENTRY_DELETE\"\n#define ACCT_FIELD_COUNT    \"count\"\n#define ACCT_DEFAULT_OWNER  \"unknown\"\n#define ACCT_DEFAULT_GROUP  \"unknown\"\n#define ACCT_DEFAULT_PROJID 0\n#define SZRANGE_FUNC        \"sz_range\"\n#define ONE_PATH_FUNC       \"one_path\"\n#define THIS_PATH_FUNC      \"this_path\"\n\n/* for HSM flavors only */\n#define  RECOV_TABLE     \"RECOVERY\"\n\n/* name of sz fields */\n#define ACCT_SIZE_PREFIX \"sz\"\nstatic const __attribute__ ((__unused__))\nchar * sz_field[SZ_PROFIL_COUNT] =\n{\n    ACCT_SIZE_PREFIX\"0\",\n    ACCT_SIZE_PREFIX\"1\",\n    ACCT_SIZE_PREFIX\"32\",\n    ACCT_SIZE_PREFIX\"1K\",\n    ACCT_SIZE_PREFIX\"32K\",\n    ACCT_SIZE_PREFIX\"1M\",\n    ACCT_SIZE_PREFIX\"32M\",\n    ACCT_SIZE_PREFIX\"1G\",\n    ACCT_SIZE_PREFIX\"32G\",\n    ACCT_SIZE_PREFIX\"1T\"\n};\n\nextern lmgr_config_t lmgr_config;\n\n/* -------------------- Connexion management ---------------- */\n\n/* create client connection */\nint            db_connect( db_conn_t * conn );\n\n/* close connection */\nint            db_close_conn( db_conn_t * conn );\n\n\n/* -------------------- SQL queries/result management ---------------- */\n\n/* execute sql directive (optionnaly with returned result) */\nint            db_exec_sql( db_conn_t * conn, const char *query, result_handle_t * p_result );\n\n/* like db_exec_sql, but expects duplicate key or no such table errors */\nint            db_exec_sql_quiet( db_conn_t * conn, const char *query,\n                                  result_handle_t * p_result );\n\n/* get the next record from result */\nint            db_next_record( db_conn_t * conn,\n                               result_handle_t * p_result,\n                               char *outtab[], unsigned int outtabsize );\n\n/* retrieve number of records in result */\nint            db_result_nb_records( db_conn_t * conn, result_handle_t * p_result );\n\n/* free result resources */\nint            db_result_free( db_conn_t * conn, result_handle_t * p_result );\n\n/* indicate if the error is retryable (transaction must be restarted) */\nbool db_is_retryable(int db_err);\n\ntypedef enum {DBOBJ_TABLE, DBOBJ_TRIGGER, DBOBJ_FUNCTION, DBOBJ_PROC, DBOBJ_INDEX} db_object_e;\n\nstatic inline const char *dbobj2str(db_object_e ot)\n{\n    switch(ot)\n    {\n        case DBOBJ_TABLE:    return \"table\";\n        case DBOBJ_TRIGGER:  return \"trigger\";\n        case DBOBJ_FUNCTION: return \"function\";\n        case DBOBJ_PROC:     return \"procedure\";\n        case DBOBJ_INDEX:    return \"index\";\n    }\n    return NULL;\n}\n\n\n/** remove a database component (table, trigger, function, ...) */\nint            db_drop_component( db_conn_t * conn, db_object_e obj_type, const char *name );\n\n/**\n * check a component exists in the database\n * \\param arg depends on the object type: src table for triggers, NULL for others.\n */\nint db_check_component(db_conn_t *conn, db_object_e obj_type, const char *name, const char *arg);\n\n\n/* create a trigger */\nint            db_create_trigger( db_conn_t * conn, const char *name, const char *event,\n                               const char *table, const char *body );\n\n/* -------------------- miscellaneous routines ---------------- */\n\n/* escape a string in a SQL request */\nint db_escape_string(db_conn_t *conn, char *str_out, size_t out_size, const char *str_in);\n\n/* retrieve error message */\nchar          *db_errmsg( db_conn_t * conn, char *errmsg, unsigned int buflen );\n\n/** list table fields, their type, and default value */\nint db_list_table_info(db_conn_t * conn, const char *table,\n                       char **field_tab, char **type_tab, char **default_tab,\n                       unsigned int outtabsize,\n                       char *inbuffer, unsigned int inbuffersize);\n\n/* id of the last inserted row */\nunsigned long long db_last_id( db_conn_t * conn );\n\ntypedef enum { TRANS_NEXT, TRANS_SESSION } what_trans_e;\ntypedef enum { TXL_SERIALIZABLE,\n               TXL_REPEATABLE_RD,\n               TXL_READ_COMMITTED,\n               TXL_READ_UNCOMMITTED } tx_level_e;\n\n/** set transaction level (optimize performance or locking) */\nint db_transaction_level(db_conn_t * conn, what_trans_e what_tx, tx_level_e tx_level);\n\n\n#endif\n"
  },
  {
    "path": "src/list_mgr/listmgr_common.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_common.h\"\n#include \"database.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"listmgr_stripe.h\"\n#include \"xplatform_print.h\"\n#include <stdio.h>\n\nvolatile bool lmgr_cancel_retry = false;\n\nvoid printdbtype(db_conn_t *pconn, GString *str, db_type_e type,\n                 const db_type_u *value_ptr)\n{\n    switch (type) {\n    case DB_ID:\n        {\n            DEF_PK(tmpstr);\n\n            /* convert id to str */\n            entry_id2pk(&value_ptr->val_id, tmpstr);\n            g_string_append_printf(str, DPK, tmpstr);\n            break;\n        }\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers) {\n            g_string_append_printf(str, \"%d\", value_ptr->val_int);\n            break;\n        }\n        /* UID/GID is TEXT. Fall throught ... */\n\n    case DB_TEXT:\n        {\n            if (value_ptr->val_str == NULL) {\n                g_string_append(str, \"NULL\");\n            } else if (!pconn) {\n                /* don't escape if no DB connection is given */\n                g_string_append_printf(str, \"'%s'\", value_ptr->val_str);\n            } else {\n                /* length required by MySQL manual */\n                int len = 2 * strlen(value_ptr->val_str) + 1;\n                char *tmpstr = MemAlloc(len);\n\n                /* escape special characters in value */\n                db_escape_string(pconn, tmpstr, len, value_ptr->val_str);\n                g_string_append_printf(str, \"'%s'\", tmpstr);\n                MemFree(tmpstr);\n            }\n            break;\n        }\n    case DB_INT:\n        g_string_append_printf(str, \"%d\", value_ptr->val_int);\n        break;\n    case DB_UINT:\n        g_string_append_printf(str, \"%u\", value_ptr->val_uint);\n        break;\n    case DB_SHORT:\n        g_string_append_printf(str, \"%hd\", value_ptr->val_short);\n        break;\n    case DB_USHORT:\n        g_string_append_printf(str, \"%hu\", value_ptr->val_ushort);\n        break;\n    case DB_BIGINT:\n        g_string_append_printf(str, \"%lld\", value_ptr->val_bigint);\n        break;\n    case DB_BIGUINT:\n        g_string_append_printf(str, \"%llu\", value_ptr->val_biguint);\n        break;\n    case DB_BOOL:\n        if (value_ptr->val_bool)\n            g_string_append(str, \"1\");\n        else\n            g_string_append(str, \"0\");\n        break;\n    case DB_ENUM_FTYPE:\n        /* don't escape: type value is trusted (not from user) */\n        g_string_append_printf(str, \"'%s'\", value_ptr->val_str);\n        break;\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n    }\n}\n\n/** print attribute value to display to the user\n * @param quote string to quote string types (eg. \"'\") */\nint ListMgr_PrintAttr(GString *str, db_type_e type,\n                      const db_type_u *value_ptr, const char *quote)\n{\n    switch (type) {\n    case DB_ID:\n        g_string_append_printf(str, DFID, PFID(&value_ptr->val_id));\n        return 0;\n\n    case DB_TEXT:\n        g_string_append_printf(str, \"%s%s%s\", quote, value_ptr->val_str, quote);\n        return 0;\n\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers)\n            g_string_append_printf(str, \"%d\", value_ptr->val_int);\n        else\n            g_string_append_printf(str, \"%s%s%s\", quote, value_ptr->val_str,\n                                   quote);\n        return 0;\n\n    case DB_INT:\n        g_string_append_printf(str, \"%d\", value_ptr->val_int);\n        return 0;\n\n    case DB_UINT:\n        g_string_append_printf(str, \"%u\", value_ptr->val_uint);\n        return 0;\n\n    case DB_SHORT:\n        g_string_append_printf(str, \"%hd\", value_ptr->val_short);\n        return 0;\n\n    case DB_USHORT:\n        g_string_append_printf(str, \"%hu\", value_ptr->val_ushort);\n        return 0;\n\n    case DB_BIGINT:\n        g_string_append_printf(str, \"%lld\", value_ptr->val_bigint);\n        return 0;\n\n    case DB_BIGUINT:\n        g_string_append_printf(str, \"%llu\", value_ptr->val_biguint);\n        return 0;\n\n    case DB_BOOL:\n        g_string_append_c(str, value_ptr->val_bool ? '1' : '0');\n        return 0;\n\n    case DB_ENUM_FTYPE:\n        g_string_append_printf(str, \"%s%s%s\", quote, value_ptr->val_str, quote);\n        return 0;\n\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n    }\n    DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error: unhandled type %d in %s\",\n               type, __func__);\n    return -EINVAL;\n}\n\nint ListMgr_PrintAttrPtr(GString *str, db_type_e type, void *value_ptr,\n                         const char *quote)\n{\n    db_type_u u;\n\n    assign_union(&u, type, value_ptr);\n    return ListMgr_PrintAttr(str, type, &u, quote);\n}\n\n/* return the number of parsed items (1) on success */\nint parsedbtype(char *str_in, db_type_e type, db_type_u *value_out)\n{\n    int rc;\n    int tmp;\n    switch (type) {\n    case DB_ID:\n        /* convert str to id */\n        rc = pk2entry_id(NULL, str_in, &value_out->val_id);\n        if (rc)\n            return 0;\n        return 1;\n\n    case DB_TEXT:\n    case DB_ENUM_FTYPE:\n        value_out->val_str = str_in;\n        return 1;\n\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers) {\n            return sscanf(str_in, \"%d\", &value_out->val_int);\n        } else {\n            value_out->val_str = str_in;\n            return 1;\n        }\n\n    case DB_INT:\n        return sscanf(str_in, \"%d\", &value_out->val_int);\n\n    case DB_UINT:\n        return sscanf(str_in, \"%u\", &value_out->val_uint);\n\n    case DB_SHORT:\n        return sscanf(str_in, \"%hd\", &value_out->val_short);\n\n    case DB_USHORT:\n        return sscanf(str_in, \"%hu\", &value_out->val_ushort);\n\n    case DB_BIGINT:\n        return sscanf(str_in, \"%lld\", &value_out->val_bigint);\n\n    case DB_BIGUINT:\n        return sscanf(str_in, \"%llu\", &value_out->val_biguint);\n\n    case DB_BOOL:\n        rc = sscanf(str_in, \"%d\", &tmp);\n        if (rc > 0)\n            value_out->val_bool = !(tmp == 0);\n        return rc;\n\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n        return 0;\n    }\n    DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error: unhandled type %d in %s\", type,\n               __func__);\n    return 0;\n}\n\nstatic void separated_list2db(const char *list, char *db, int size)\n{\n    snprintf(db, size, LIST_SEP_STR_ESC \"%s\" LIST_SEP_STR_ESC, list);\n}\n\nstatic void separated_list2match(const char *list, char *db, int size)\n{\n    /* <item>  is matched using expression '%+<item>+%' */\n    snprintf(db, size, \"%%\" LIST_SEP_STR_ESC \"%s\" LIST_SEP_STR_ESC \"%%\", list);\n}\n\nstatic void separated_db2list(const char *db, char *list, int size)\n{\n    int len = MIN(strlen(db) - 2, size - 1);\n    strncpy(list, db + 1, len);\n    list[len] = '\\0';\n}\n\nvoid separated_db2list_inplace(char *list)\n{\n    int len = strlen(list);\n    int i;\n    for (i = 1; i < len - 1; i++)\n        list[i - 1] = list[i];\n    list[len - 2] = '\\0';\n}\n\nbool match_table(table_enum t, unsigned int attr_index)\n{\n    return ((t == T_MAIN) && is_main_field(attr_index)) ||\n        ((t == T_DNAMES) && is_names_field(attr_index)) ||\n        ((t == T_ANNEX) && is_annex_field(attr_index)) ||\n        ((t == T_RECOV) && is_recov_field(attr_index)) ||\n        ((t == T_SOFTRM || t == T_TMP_SOFTRM)\n         && is_softrm_field(attr_index)) ||\n        ((t == T_ACCT) && (is_acct_field(attr_index)\n                           || is_acct_pk(attr_index))) ||\n        ((t == T_ACCT_PK) && is_acct_pk(attr_index)) ||\n        ((t == T_ACCT_VAL) && is_acct_field(attr_index));\n}\n\n/** get the table for the given attr index */\nstatic inline table_enum field2table(int i)\n{\n    if (is_main_field(i))\n        return T_MAIN;\n    else if (is_names_field(i))\n        return T_DNAMES;\n    else if (is_annex_field(i))\n        return T_ANNEX;\n    else if (i == ATTR_INDEX_stripe_info)\n        return T_STRIPE_INFO;\n    else if (i == ATTR_INDEX_stripe_items)\n        return T_STRIPE_ITEMS;\n    else\n        return T_NONE;\n}\n\n/* precomputed masks for testing attr sets efficiently\n   (global = zero initially) */\nattr_mask_t main_attr_set = { 0 };\nattr_mask_t names_attr_set = { 0 };\nattr_mask_t annex_attr_set = { 0 };\nattr_mask_t gen_attr_set = { 0 };\nattr_mask_t stripe_attr_set = { 0 };\nattr_mask_t dir_attr_set = { 0 };\nattr_mask_t slink_attr_set = { 0 };\nattr_mask_t acct_attr_set = { 0 };\nattr_mask_t acct_pk_attr_set = { 0 };\nattr_mask_t softrm_attr_set = { 0 };\nattr_mask_t readonly_attr_set = { 0 };\nattr_mask_t func_attr_set = { 0 };\n\nvoid init_attrset_masks(const lmgr_config_t *lmgr_config)\n{\n    int i, cookie;\n    attr_mask_t tmp_mask;\n\n    main_attr_set = null_mask;\n    names_attr_set = null_mask;\n    annex_attr_set = null_mask;\n    gen_attr_set = null_mask;\n    stripe_attr_set = null_mask;\n    dir_attr_set = null_mask;\n    slink_attr_set = null_mask;\n    acct_attr_set = null_mask;\n    acct_pk_attr_set = null_mask;\n    softrm_attr_set = null_mask;\n    readonly_attr_set = null_mask;\n    func_attr_set = null_mask;\n\n    /* Always set them, even if accounting is disabled. */\n    acct_pk_attr_set.std |= ATTR_MASK_uid;\n    acct_pk_attr_set.std |= ATTR_MASK_gid;\n#ifdef _LUSTRE\n    /* in case lustre_projid is enabled, allow instant stats per projid */\n    if (global_config.lustre_projid)\n        acct_pk_attr_set.std |= ATTR_MASK_projid;\n#endif\n    acct_pk_attr_set.std |= ATTR_MASK_type;\n    acct_pk_attr_set.status |= all_status_mask();\n\n    /* The following fields must be in SOFT_RM table:\n     * Posix attributes + fullpath + fields with REMOVED flag\n     * + fields indicated by status managers for SOFT_RM.\n     */\n    softrm_attr_set.std |= POSIX_ATTR_MASK | ATTR_MASK_fullpath;\n    tmp_mask = sm_softrm_fields();\n    softrm_attr_set = attr_mask_or(&softrm_attr_set, &tmp_mask);\n\n    /* size: also used for size range stats */\n    acct_attr_set.std |= ATTR_MASK_size | ATTR_MASK_blocks;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        /* is it read only ? */\n        if (is_read_only_field(i))\n            attr_mask_set_index(&readonly_attr_set, i);\n\n        /* (fields with remove flag) other flags set previously */\n        if (test_field_flag(i, REMOVED))\n            attr_mask_set_index(&softrm_attr_set, i);\n\n        /* The ID field is both in NAMES and MAIN. (XXX not an attribute) */\n        if (is_names_field(i))\n            attr_mask_set_index(&names_attr_set, i);\n\n        if (is_main_field(i))\n            attr_mask_set_index(&main_attr_set, i);\n        else if (is_gen_field(i))\n            attr_mask_set_index(&gen_attr_set, i);\n        else if (is_annex_field(i))\n            attr_mask_set_index(&annex_attr_set, i);\n        else if (is_stripe_field(i))\n            attr_mask_set_index(&stripe_attr_set, i);\n        else if (is_dirattr(i))\n            attr_mask_set_index(&dir_attr_set, i);\n        else if (is_funcattr(i))\n            attr_mask_set_index(&func_attr_set, i);\n\n        /* not mutually exclusive with previous */\n        if (is_slinkattr(i))\n            attr_mask_set_index(&slink_attr_set, i);\n    }\n}\n\n/** return byte address where the given attribute is stored */\nstatic inline void *attr_address(attr_set_t *attrs, int attr_index)\n{\n    return (char *)&attrs->attr_values + field_infos[attr_index].offset;\n}\n\n/** const version */\nstatic inline const void *attr_address_const(const attr_set_t *attrs,\n                                             int attr_index)\n{\n    return (char *)&attrs->attr_values + field_infos[attr_index].offset;\n}\n\n/**\n * Add source info of generated fields to attr mask.\n * only apply to std attrs.\n */\nvoid add_source_fields_for_gen(uint32_t *std_mask)\n{\n    int i;\n    uint32_t mask = 1;\n\n    /* add attr mask for source info of generated fields */\n    for (i = 0; i < ATTR_COUNT; i++, mask <<= 1) {\n        if (((*std_mask) & mask)\n            && ((field_infos[i].flags & GENERATED)\n                || (field_infos[i].flags & DIR_ATTR))\n            && (field_infos[i].gen_index != -1)) {\n            (*std_mask) |= (1 << field_infos[i].gen_index);\n        }\n    }\n}\n\n/** generate fields */\nvoid generate_fields(attr_set_t *p_set)\n{\n    int i;\n    uint32_t mask = 1;\n\n    /* only work on standard attributes */\n    for (i = 0; i < ATTR_COUNT; i++, mask <<= 1) {\n        if ((p_set->attr_mask.std & mask)\n            && (field_infos[i].flags & GENERATED)) {\n            const void *src_data;\n            void *tgt_data;\n\n            if (field_infos[i].gen_func == NULL) {\n                /* cannot generate a field without a function */\n                DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                           \"generated field without generation function: %s\",\n                           field_infos[i].field_name);\n                p_set->attr_mask.std &= ~mask;\n                continue;\n            }\n\n            /* is it generated from another field ? */\n            if (field_infos[i].gen_index != -1) {\n                uint32_t src_mask = (1 << field_infos[i].gen_index);\n                /* is source set? */\n                if ((p_set->attr_mask.std & src_mask) == 0) {\n                    DisplayLog(LVL_FULL, LISTMGR_TAG,\n                               \"Source info '%s' of generated field '%s' is not set \"\n                               \"in the database\",\n                               field_infos[field_infos[i].gen_index].field_name,\n                               field_infos[i].field_name);\n                    p_set->attr_mask.std &= ~mask;\n                    continue;\n                }\n\n                src_data = attr_address_const(p_set, field_infos[i].gen_index);\n            } else {\n                /* nothing needed to generate it */\n                src_data = NULL;\n            }\n\n            tgt_data = attr_address(p_set, i);\n\n            if (field_infos[i].gen_func(tgt_data, src_data) != 0)\n                p_set->attr_mask.std &= ~mask;\n            else\n                DisplayLog(LVL_FULL, LISTMGR_TAG, \"Field '%s' auto-generated\",\n                           field_infos[i].field_name);\n\n        }   /* end if generated */\n    }   /* end for attr list */\n}\n\n/**\n * Generate fields automatically from already existing fields,\n * and check the target mask is satisfied.\n */\nint ListMgr_GenerateFields(attr_set_t *p_set, attr_mask_t target_mask)\n{\n    attr_mask_t save_mask = p_set->attr_mask;\n\n    /* are there generated fields that are not set for the target */\n    if (target_mask.std & gen_attr_set.std) {\n        /* try to generate missing fields */\n        p_set->attr_mask.std |= (target_mask.std & gen_attr_set.std);\n        generate_fields(p_set);\n\n        /* still missing? */\n        if (target_mask.std & ~p_set->attr_mask.std) {\n            DisplayLog(LVL_VERB, LISTMGR_TAG,\n                       \"Field still missing (can't be generated): %#\" PRIX32,\n                       target_mask.std & ~p_set->attr_mask.std);\n            /* never leave the function with less info than when entering! */\n            p_set->attr_mask.std |= save_mask.std;\n            return DB_ATTR_MISSING;\n        }\n    }\n\n    /* never leave the function with less info than when entering! */\n    p_set->attr_mask.std |= save_mask.std;\n\n    return DB_SUCCESS;\n}\n\n/* function attr_index, arg table, function_name, {arguments} */\ntypedef struct function_def {\n    int attr_index;\n    table_enum arg_table;\n    char *fn_name;\n    char **fn_args;\n} function_def_t;\n\nstatic const function_def_t functions[] = {\n    {ATTR_INDEX_fullpath, T_DNAMES, THIS_PATH_FUNC,\n     (char *[]){\"parent_id\", \"name\", NULL}},\n    {-1, 0, NULL, NULL}\n};\n\nstatic const function_def_t *get_function_by_attr(int attr_index)\n{\n    int i;\n    for (i = 0; functions[i].fn_name != NULL; i++) {\n        if (functions[i].attr_index == attr_index)\n            return &functions[i];\n    }\n    return NULL;\n}\n\n/* print function call */\nstatic void print_func_call(GString *str, int func_index, const char *prefix)\n{\n    const function_def_t *func = get_function_by_attr(func_index);\n    char **args;\n    if (func == NULL)   /* unexpected: BUG */\n        RBH_BUG(\"call for non-function attr\");\n\n    g_string_append_printf(str, \"%s(\", func->fn_name);\n    for (args = func->fn_args; *args != NULL; args++) {\n        if (args == func->fn_args)  /* first arg */\n            g_string_append_printf(str, \"%s%s\", prefix, *args);\n        else\n            g_string_append_printf(str, \",%s%s\", prefix, *args);\n    }\n    g_string_append(str, \")\");\n}\n\n/** test if there are read only fields in the given mask\n * and print an error in this case.\n * @retval false if not.\n * @retval true if some fields in attr_mask are read only.\n */\n\nstatic bool _check_read_only_fields(const attr_mask_t *mask,\n                                    const char *file_name,\n                                    const char *func_name)\n{\n    attr_mask_t tmp;\n\n    if (!readonly_fields(*mask))\n        return false;\n\n    tmp = attr_mask_and(mask, &readonly_attr_set);\n    DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error in %s:%s: attributes \" DMASK\n               \" are read-only\", file_name, func_name, PMASK(&tmp));\n    return true;\n}\n\n#define check_read_only_fields(_m) \\\n            _check_read_only_fields(_m, __FILE__, __func__)\n\n/**\n * @param table T_MAIN, T_ANNEX, T_ACCT\n * @param prefix\n * @param suffix\n * @param separator\n * @return nbr of fields\n */\nint attrmask2fieldlist(GString *str, attr_mask_t attr_mask, table_enum table,\n                       const char *prefix, const char *suffix,\n                       attrset_op_flag_e flags)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n    bool leading_comma = flags & AOF_LEADING_SEP;\n\n    /* optim: exit immediately if no field matches */\n    if ((table == T_MAIN) && !main_fields(attr_mask))\n        return 0;\n    if ((table == T_ANNEX) && !annex_fields(attr_mask))\n        return 0;\n    if ((table == T_DNAMES) && !names_fields(attr_mask))\n        return 0;\n\n    if ((table == T_STRIPE_INFO) || (table == T_STRIPE_ITEMS))\n        return -DB_NOT_SUPPORTED;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&attr_mask, i)) {\n            if (match_table(table, i)) {\n                if (leading_comma || (nbfields > 0))\n                    g_string_append(str, \",\");\n\n                if (is_funcattr(i) &&\n                    /* exception: fullpath is a real field in SOFT_RM\n                       and temporary softrm table */\n                    !((table == T_SOFTRM || table == T_TMP_SOFTRM)\n                      && (i == ATTR_INDEX_fullpath))) {\n                    print_func_call(str, i, prefix);\n                    if (suffix && suffix[0])\n                        g_string_append_printf(str, \"%s\", suffix);\n                } else {\n                    g_string_append_printf(str, \"%s%s%s\", prefix, field_name(i),\n                                           suffix);\n                }\n                nbfields++;\n            }\n        }\n    }\n    return nbfields;\n}\n\n/**\n * Generate operation like incrementation or decrementation on fields.\n * @param str\n * @param attr_mask\n * @param table T_MAIN, T_ANNEX, T_ACCT\n * @param prefix\n * @param operation\n * @return nbr of fields\n */\nint attrmask2fieldoperation(GString *str, attr_mask_t attr_mask,\n                            table_enum table, const char *prefix,\n                            operation_type operation)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n    char operator;\n\n    if (operation == OT_SUBTRACT)\n        operator = '-';\n    else\n        operator = '+';\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&attr_mask, i)) {\n            if (match_table(table, i)) {\n                g_string_append_printf(str,\n                                       \"%s%s=CAST(%s as SIGNED)%cCAST(%s%s as SIGNED)\",\n                                       nbfields == 0 ? \"\" : \",\", field_name(i),\n                                       field_name(i), operator, prefix,\n                                       field_name(i));\n                nbfields++;\n            }\n        }\n    }\n    return nbfields;\n}\n\n/**\n * Generate comparaison on fields.\n * @param str\n * @param attr_mask\n * @param table T_MAIN, T_ANNEX, T_ACCT\n * @param left_prefix\n * @param right_prefix\n * @param comparator\n * @param separator\n * @return nbr of fields\n */\nint attrmask2fieldcomparison(GString *str, attr_mask_t attr_mask,\n                             table_enum table, const char *left_prefix,\n                             const char *right_prefix,\n                             const char *comparator, const char *separator)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&attr_mask, i)) {\n            if (match_table(table, i)) {\n                g_string_append_printf(str, \"%s %s%s%s%s%s \",\n                                       nbfields == 0 ? \"\" : separator,\n                                       left_prefix, field_name(i), comparator,\n                                       right_prefix, field_name(i));\n                nbfields++;\n            }\n        }\n    }\n    return nbfields;\n}\n\nstatic void print_attr_value(lmgr_t *p_mgr, GString *str,\n                             const attr_set_t *p_set, unsigned int attr_index)\n{\n    char tmp[1024];\n    db_type_u typeu;\n    db_type_e t;\n\n    if (attr_index < ATTR_COUNT) {\n        assign_union(&typeu, field_infos[attr_index].db_type,\n                     attr_address_const(p_set, attr_index));\n\n        if (is_sepdlist(attr_index)) {\n            separated_list2db(typeu.val_str, tmp, sizeof(tmp));\n            typeu.val_str = tmp;\n        }\n        t = field_infos[attr_index].db_type;\n    } else if (is_status_field(attr_index)) {\n        unsigned int status_idx = attr2status_index(attr_index);\n\n        assign_union(&typeu, DB_TEXT, p_set->attr_values.sm_status[status_idx]);\n        t = DB_TEXT;\n    } else if (is_sm_info_field(attr_index)) {\n        unsigned int info_idx = attr2sminfo_index(attr_index);\n\n        t = sm_attr_info[info_idx].def->db_type;\n        assign_union(&typeu, t, (char *)p_set->attr_values.sm_info[info_idx]);\n    } else\n        RBH_BUG(\"Attribute index is not in a valid range\");\n\n    printdbtype(&p_mgr->conn, str, t, &typeu);\n}\n\n/**\n * @param table T_MAIN, T_ANNEX\n * @return nbr of fields\n */\nint attrset2valuelist(lmgr_t *p_mgr, GString *str, const attr_set_t *p_set,\n                      table_enum table, attrset_op_flag_e flags)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n    bool leading_comma = flags & AOF_LEADING_SEP;\n\n    if ((table == T_STRIPE_INFO) || (table == T_STRIPE_ITEMS))\n        return -DB_NOT_SUPPORTED;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&p_set->attr_mask, i)) {\n            if (match_table(table, i)) {\n                if (leading_comma || (nbfields > 0))\n                    g_string_append(str, \",\");\n\n                print_attr_value(p_mgr, str, p_set, i);\n                nbfields++;\n            }\n        }\n    }\n    return nbfields;\n}\n\n/**\n * @param table T_MAIN, T_ANNEX\n * @return nbr of fields\n */\nint attrset2updatelist(lmgr_t *p_mgr, GString *str, const attr_set_t *p_set,\n                       table_enum table, attrset_op_flag_e flags)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n    bool leading_comma = flags & AOF_LEADING_SEP;\n    bool generic_value = flags & AOF_GENERIC_VAL;\n\n    if ((table == T_STRIPE_INFO) || (table == T_STRIPE_ITEMS))\n        return -DB_NOT_SUPPORTED;\n\n    if (check_read_only_fields(&p_set->attr_mask))\n        return -DB_READ_ONLY_ATTR;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&p_set->attr_mask, i)\n            && match_table(table, i)) {\n            if (leading_comma || (nbfields > 0))\n                g_string_append(str, \",\");\n\n            g_string_append_printf(str, \"%s=\", field_name(i));\n\n            if (generic_value)\n                g_string_append_printf(str, \"VALUES(%s)\", field_name(i));\n            else\n                print_attr_value(p_mgr, str, p_set, i);\n\n            nbfields++;\n        }\n    }\n    return nbfields;\n}\n\nint fullpath_attr2db(const char *attr, char *db)\n{\n    DEF_PK(root_pk);\n    char rel[RBH_PATH_MAX];\n\n    /* fullpath 2 relative */\n    if (relative_path(attr, global_config.fs_path, rel)) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"fullpath %s is not under FS root %s\", attr,\n                   global_config.fs_path);\n        return -EINVAL;\n    }\n    /* prefix with root id */\n    entry_id2pk(get_root_id(), PTR_PK(root_pk));\n    sprintf(db, \"%s/%s\", root_pk, rel);\n    return 0;\n}\n\nvoid fullpath_db2attr(const char *db, char *attr)\n{\n    DEF_PK(id_from_db);\n    DEF_PK(root_pk);\n\n    entry_id2pk(get_root_id(), PTR_PK(root_pk));\n    const char *c = strchr(db, '/');\n    if (!c) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Unexpected path format from DB: '%s'\", db);\n        /* use c = db */\n        c = db;\n    } else {\n        memset(PTR_PK(id_from_db), 0, sizeof(id_from_db));\n        strncpy(id_from_db, db, (ptrdiff_t) (c - db));\n\n        /* check FS root */\n        if (strcmp(root_pk, id_from_db) != 0) {\n            DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                       \"Entry has incomplete path in DB: \"\n                       \"parent_id='%s', relative_path='%s'\", id_from_db, c + 1);\n            /* copy as is */\n            sprintf(attr, \"%s\", db);\n            return;\n        }\n        c++;    /* skip '/' */\n    }\n\n    /* relative 2 full */\n    if (!strcmp(global_config.fs_path, \"/\"))    /* FS root is '/' */\n        sprintf(attr, \"/%s\", c);\n    else\n        sprintf(attr, \"%s/%s\", global_config.fs_path, c);\n}\n\nint result2attrset(table_enum table, char **result_tab,\n                   unsigned int res_count, attr_set_t *p_set)\n{\n    int i, cookie;\n    unsigned int nbfields = 0;\n    db_type_u typeu;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&p_set->attr_mask, i)\n            && match_table(table, i)) {\n            if (log_config.debug_level >= LVL_FULL && result_tab != NULL) {\n                DisplayLog(LVL_FULL, LISTMGR_TAG, \"result[%u]: %s = %s\",\n                           nbfields, field_name(i),\n                           result_tab[nbfields] ? result_tab[nbfields] :\n                           \"<null>\");\n            }\n\n            /* Parse nbfield'th value */\n            if (nbfields >= res_count) {\n                return DB_BUFFER_TOO_SMALL;\n            }\n#ifdef _LUSTRE\n            if (i < ATTR_COUNT && field_infos[i].db_type == DB_STRIPE_INFO) {\n                if ((result_tab == NULL)\n                    || (result_tab[nbfields] == NULL)\n                    || (result_tab[nbfields + 1] == NULL)\n                    || (result_tab[nbfields + 2] == NULL)) {\n                    /* must skip 3 columns in this case */\n                    attr_mask_unset_index(&p_set->attr_mask, i);\n                    nbfields += 3;\n                    continue;\n                }\n                ATTR(p_set, stripe_info).stripe_count =\n                    atoi(result_tab[nbfields]);\n                ATTR(p_set, stripe_info).stripe_size =\n                    atoi(result_tab[nbfields + 1]);\n                rh_strncpy(ATTR(p_set, stripe_info).pool_name,\n                           result_tab[nbfields + 2], MAX_POOL_LEN);\n\n                /* stripe count, stripe size and pool_name */\n                nbfields += 3;\n                continue;\n            } else\n#endif\n            if ((result_tab == NULL) || (result_tab[nbfields] == NULL)) {\n                attr_mask_unset_index(&p_set->attr_mask, i);\n                nbfields++;\n                continue;\n            } else\n                if (!parsedbtype(result_tab[nbfields], field_type(i), &typeu)) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Error: cannot parse field value '%s' (position %u) for %s\",\n                           result_tab[nbfields], nbfields, field_name(i));\n                RBH_BUG(\"DB value cannot be parsed: DB may be corrupted\");\n                attr_mask_unset_index(&p_set->attr_mask, i);\n                nbfields++;\n                continue;\n            }\n\n            if ((i == ATTR_INDEX_fullpath) && (table != T_SOFTRM)) {\n                /* special case for fullpath which must be converted from\n                   relative to aboslute */\n                /* fullpath already includes root for SOFT_RM table */\n                fullpath_db2attr(typeu.val_str, ATTR(p_set, fullpath));\n            } else if (is_status_field(i)) {\n                unsigned int status_idx = attr2status_index(i);\n\n                /* allocate status array */\n                sm_status_ensure_alloc(&p_set->attr_values.sm_status);\n                /* get the matching status from status enum */\n                p_set->attr_values.sm_status[status_idx] =\n                    get_status_str(get_sm_instance(status_idx)->sm,\n                                   typeu.val_str);\n\n                /* status = '' => not set */\n                if (p_set->attr_values.sm_status[status_idx] == NULL)\n                    attr_mask_unset_index(&p_set->attr_mask, i);\n            } else if (is_sm_info_field(i)) {\n                unsigned int info_idx = attr2sminfo_index(i);\n\n                /* allocate info array */\n                sm_info_ensure_alloc(&p_set->attr_values.sm_info);\n\n                /* allocate a copy of the value */\n                p_set->attr_values.sm_info[info_idx] =\n                    dup_value(field_type(i), typeu);\n\n                /* status = '' => not set */\n                if (p_set->attr_values.sm_info[info_idx] == NULL)\n                    attr_mask_unset_index(&p_set->attr_mask, i);\n            } else if (is_sepdlist(i))\n                /* note: C size is db_type_size+1 */\n                separated_db2list(typeu.val_str, attr_address(p_set, i),\n                                  field_infos[i].db_type_size + 1);\n            else\n                union_get_value(attr_address(p_set, i), field_infos[i].db_type,\n                                &typeu);\n            nbfields++;\n        }\n    }\n    return 0;\n\n}\n\nchar *compar2str(filter_comparator_t compar)\n{\n    switch (compar) {\n    case EQUAL:\n        return \"=\";\n    case NOTEQUAL:\n        return \"<>\";\n    case LESSTHAN:\n        return \"<=\";\n    case MORETHAN:\n        return \">=\";\n    case LESSTHAN_STRICT:\n        return \"<\";\n    case MORETHAN_STRICT:\n        return \">\";\n#ifdef _MYSQL\n        /* MySQL is case insensitive.\n         * To force case-sensitivity, use BINARY keyword. */\n    case LIKE:\n        return \" LIKE BINARY \";\n    case UNLIKE:\n        return \" NOT LIKE BINARY \";\n    case RLIKE:\n        return \" RLIKE BINARY \";\n    case ILIKE:\n        return \" LIKE \";\n    case IUNLIKE:\n        return \" NOT LIKE \";\n#else\n    case LIKE:\n        return \" LIKE \";\n    case UNLIKE:\n        return \" NOT LIKE \";\n#endif\n    case IN:\n        return \" IN \";\n    case NOTIN:\n        return \" NOT IN \";\n    case ISNULL:\n        return \" IS NULL\";\n    case NOTNULL:\n        return \" IS NOT NULL\";\n    }\n\n    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n               \"Default sign for filter: should never happen !!!\");\n    return \"=\";\n}\n\n/**\n * @param filter_str initialized GString.\n * @retval FILTERDIR_NONE if there is no filter on dirattrs\n * @retval FILTERDIR_EMPTY if the test is 'dircount == 0' (no junction needed)\n * @retval FILTERDIR_NONEMPTY if the test is on dircount != 0, >= 0, condition on avgsize\n *                           junction needed, depending on the filter\n *                           test looks like \"dirattr >= x\"\n */\nfilter_dir_e dir_filter(lmgr_t *p_mgr, GString *filter_str,\n                        const lmgr_filter_t *p_filter,\n                        unsigned int *dir_attr_index, const char *prefix)\n{\n    int i;\n\n    if (p_filter->filter_type == FILTER_SIMPLE) {\n        for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n            unsigned int index = p_filter->filter_simple.filter_index[i];\n            if (!is_dirattr(index))\n                continue;\n\n            /* condition about empty directory (dircount == 0)? */\n            if ((index == ATTR_INDEX_dircount)\n                && (p_filter->filter_simple.filter_value[i].value.val_uint == 0)\n                && (p_filter->filter_simple.filter_compar[i] == EQUAL)) {\n                DisplayLog(LVL_FULL, LISTMGR_TAG,\n                           \"Special filter on empty directory\");\n\n                /* empty directories are not in parent_ids of NAMES table */\n                if (filter_str != NULL) {   /* allow passing no string */\n                    if (prefix)\n                        g_string_append_printf(filter_str,\n                                               \"%s.id NOT IN (SELECT distinct(parent_id) \"\n                                               \"FROM \" DNAMES_TABLE \")\",\n                                               prefix);\n                    else\n                        g_string_append(filter_str,\n                                        \"id NOT IN (SELECT distinct(parent_id) \"\n                                        \"FROM \" DNAMES_TABLE \")\");\n                }\n                if (dir_attr_index != NULL) /* allow passing no index */\n                    *dir_attr_index = index;\n\n                return FILTERDIR_EMPTY;\n            } else {\n                /* dirattrN <comparator> */\n                if (filter_str != NULL) {\n                    g_string_append_printf(filter_str, \"dirattr%s\",\n                                           compar2str(p_filter->filter_simple.\n                                                      filter_compar[i]));\n\n                    /* value: (list only apply to OSTs XXX for now) */\n                    db_type_u typeu =\n                        p_filter->filter_simple.filter_value[i].value;\n                    printdbtype(&p_mgr->conn, filter_str,\n                                field_infos[index].db_type, &typeu);\n                }\n\n                if (dir_attr_index != NULL)\n                    *dir_attr_index = index;\n                return FILTERDIR_OTHER;\n            }\n        }\n    }\n    return FILTERDIR_NONE;\n}\n\n/**\n * build filter for stored FUNCTIONs\n * @param filter_str    initialized GString\n * @return the number of filtered values\n */\nint func_filter(lmgr_t *p_mgr, GString *filter_str,\n                const lmgr_filter_t *p_filter, table_enum table,\n                attrset_op_flag_e flags)\n{\n    int i;\n    char param1[128];\n    char param2[128];\n    unsigned int nb_fields = 0;\n    bool leading_and = flags & AOF_LEADING_SEP;\n    bool prefix_table = flags & AOF_PREFIX;\n\n    if (p_filter->filter_type == FILTER_SIMPLE) {\n        for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n            unsigned int index = p_filter->filter_simple.filter_index[i];\n\n            if (is_funcattr(index)) {\n                db_type_u typeu;\n\n                param1[0] = '\\0';\n                param2[0] = '\\0';\n\n                if (filter_str == NULL) /* no need to check other stuff */\n                    goto end_of_loop;\n\n                /* add prefixes or parenthesis, etc. */\n                if (leading_and || (nb_fields > 0)) {\n                    if (p_filter->filter_simple.\n                        filter_flags[i] & FILTER_FLAG_OR)\n                        g_string_append(filter_str, \" OR \");\n                    else\n                        g_string_append(filter_str, \" AND \");\n                }\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_BEGIN)\n                    g_string_append(filter_str, \"(\");\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_NOT) {\n                    /* NOT (x <cmp> <val>) */\n                    g_string_append(filter_str, \" NOT (\");\n                }\n\n                if (index == ATTR_INDEX_fullpath) {\n                    char relative[RBH_PATH_MAX];\n\n                    if (fullpath_attr2db\n                        (p_filter->filter_simple.filter_value[i].value.val_str,\n                         relative)) {\n                        /* condition is always false */\n                        g_string_append(filter_str, \"FALSE\");\n                        return 1;\n                    }\n                    typeu.val_str = relative;\n\n                    /* if the filter applies to DNAMES, exactly filter on each\n                     * row, else, filter on any path */\n                    if (table == T_DNAMES) {\n                        if (prefix_table) {\n                            snprintf(param1, sizeof(param1), \"%s.parent_id\",\n                                     table2name(table));\n                            snprintf(param2, sizeof(param2), \"%s.name\",\n                                     table2name(table));\n                        } else {\n                            rh_strncpy(param1, \"parent_id\", sizeof(param1));\n                            rh_strncpy(param2, \"name\", sizeof(param2));\n                        }\n\n                        if (p_filter->filter_simple.\n                            filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                            g_string_append(filter_str, \"(\");\n\n                        g_string_append_printf(filter_str,\n                                               THIS_PATH_FUNC \"(%s,%s)%s\",\n                                               param1, param2,\n                                               compar2str(p_filter->\n                                                          filter_simple.\n                                                          filter_compar[i]));\n                        printdbtype(&p_mgr->conn, filter_str,\n                                    field_infos[index].db_type, &typeu);\n\n                        if (p_filter->filter_simple.\n                            filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                            g_string_append_printf(filter_str,\n                                                   \" OR \" THIS_PATH_FUNC\n                                                   \"(%s,%s) IS NULL)\", param1,\n                                                   param2);\n                    } else {\n                        if (prefix_table)\n                            snprintf(param1, sizeof(param1), \"%s.id\",\n                                     table2name(table));\n                        else\n                            rh_strncpy(param1, \"id\", sizeof(param1));\n\n                        if (p_filter->filter_simple.\n                            filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                            g_string_append(filter_str, \"(\");\n\n                        g_string_append_printf(filter_str,\n                                               ONE_PATH_FUNC \"(%s)%s\", param1,\n                                               compar2str(p_filter->\n                                                          filter_simple.\n                                                          filter_compar[i]));\n                        printdbtype(&p_mgr->conn, filter_str,\n                                    field_infos[index].db_type, &typeu);\n\n                        if (p_filter->filter_simple.\n                            filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                            g_string_append_printf(filter_str,\n                                                   \" OR \" ONE_PATH_FUNC\n                                                   \"(%s) IS NULL)\", param1);\n                    }\n                }\n\n                /* add closing parenthesis, etc... */\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_NOT)\n                    /* NOT (x <cmp> <val>) */\n                    g_string_append(filter_str, \")\");\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_END)\n                    g_string_append(filter_str, \")\");\n\n end_of_loop:\n                nb_fields++;\n            }\n        }\n    }\n    return nb_fields;\n}\n\nstatic void attr2filter_field(GString *str, table_enum table,\n                              unsigned int attr, bool prefix_table)\n{\n    if (match_table(table, attr)\n        || (table == T_NONE && !is_stripe_field(attr))) {\n        /* exception: fullpath is a real field in SOFT_RM and temporary softrm\n           table */\n        if (is_funcattr(attr) && !((table == T_SOFTRM || table == T_TMP_SOFTRM)\n                                   && (attr == ATTR_INDEX_fullpath))) {\n            char prefix[128] = \"\";\n\n            if (prefix_table)\n                snprintf(prefix, sizeof(prefix), \"%s.\",\n                         table2name(table ==\n                                    T_NONE ? field2table(attr) : table));\n\n            print_func_call(str, attr, prefix);\n        } else {    /* std field */\n\n            if (prefix_table)\n                g_string_append_printf(str, \"%s.\",\n                                       table2name(table ==\n                                                  T_NONE ? field2table(attr) :\n                                                  table));\n\n            g_string_append(str, field_name(attr));\n        }\n    } else if ((table == T_STRIPE_ITEMS || table == T_NONE)\n               && (field_type(attr) == DB_STRIPE_ITEMS)) {\n        if (prefix_table)\n            g_string_append_printf(str, \"%s.\", STRIPE_ITEMS_TABLE);\n\n        g_string_append(str, \"ostidx\");\n    } else if ((table == T_STRIPE_INFO || table == T_NONE)\n               && (field_type(attr) == DB_STRIPE_INFO)) {\n        /* XXX Assume that the only possible filter here is on pool_name */\n        if (prefix_table)\n            g_string_append_printf(str, \"%s.\", STRIPE_INFO_TABLE);\n\n        g_string_append(str, \"pool_name\");\n    }\n}\n\n/** find index of table for begin_and blocks. */\nstatic int find_my_table_idx(const lmgr_filter_t *p_filter, int cur_index)\n{\n   /*\n    * find table where begin_end block is required\n    * search forward for BEGIN and backward for END\n    */\n    if (p_filter->filter_simple.filter_flags[cur_index]\n        & (FILTER_FLAG_BEGIN_BLOCK | FILTER_FLAG_END_BLOCK)) {\n        int j = 0;\n        int fc = p_filter->filter_simple.filter_count;\n\n        /*\n         * If begin block, search forward for next filter that is not\n         * a begin block\n         * for end block perform backward search\n         */\n        if (p_filter->filter_simple.filter_flags[cur_index]\n            & FILTER_FLAG_BEGIN_BLOCK)\n            for (j = cur_index; j < fc; j++) {\n                if (!(p_filter->filter_simple.filter_flags[j]\n                    & (FILTER_FLAG_BEGIN_BLOCK\n                       | FILTER_FLAG_END_BLOCK)))\n                    break;\n            }\n        else\n            for (j = cur_index; j >= 0; j--) {\n                if (!(p_filter->filter_simple.filter_flags[j]\n                    & (FILTER_FLAG_BEGIN_BLOCK\n                       | FILTER_FLAG_END_BLOCK)))\n                    break;\n            }\n        return j;\n    }\n    else\n        return cur_index;\n}\n\nint filter2str(lmgr_t *p_mgr, GString *str, const lmgr_filter_t *p_filter,\n               table_enum table, attrset_op_flag_e flags)\n{\n    int i;\n    unsigned int nbfields = 0;\n    db_type_u typeu;\n    bool leading_and = flags & AOF_LEADING_SEP;\n    bool prefix_table = flags & AOF_PREFIX;\n\n    if (p_filter->filter_type == FILTER_SIMPLE) {\n\n        for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n            unsigned int index = p_filter->filter_simple.filter_index[\n                                                find_my_table_idx(p_filter, i)];\n            bool case_sensitive = true;\n            bool match = match_table(table, index)\n                || ((table == T_STRIPE_ITEMS) && (index < ATTR_COUNT)\n                    && (field_infos[index].db_type == DB_STRIPE_ITEMS))\n                || ((table == T_STRIPE_INFO) && (index < ATTR_COUNT)\n                    && (field_infos[index].db_type == DB_STRIPE_INFO));\n\n            /* filter on generated fields are not allowed */\n            if (is_dirattr(index)) {\n                DisplayLog(LVL_FULL, LISTMGR_TAG,\n                           \"Special filter on dir attribute '%s'\",\n                           field_name(index));\n                continue;\n            } else if (is_gen_field(index)) {\n                DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                           \"Ignoring filter on generated field '%s'\",\n                           field_name(index));\n                continue;\n            }\n\n            if (match || (table == T_NONE)) {\n                /*\n                 * if this is a begin_end block add new parenthesing\n                 * layer and continue\n                 * skip new block creation if table is not correct\n                 */\n                switch(p_filter->filter_simple.filter_flags[i]) {\n                    case FILTER_FLAG_BEGIN_BLOCK:\n                        if (nbfields || leading_and)\n                            g_string_append(str, \" AND (\");\n                        else\n                            g_string_append_c(str, '(');\n                        nbfields = 0;\n                        continue;\n                        break;\n                    case FILTER_FLAG_END_BLOCK:\n                        g_string_append_c(str, ')');\n                        continue;\n                        break;\n                    case FILTER_FLAG_OR | FILTER_FLAG_BEGIN_BLOCK:\n                        if (nbfields)\n                            g_string_append(str, \" OR (\");\n                        else\n                            g_string_append_c(str, '(');\n                        nbfields = 0;\n                        continue;\n                        break;\n                }\n\n                /* add prefixes or parenthesis, etc. */\n                if (leading_and || (nbfields > 0)) {\n                    if (p_filter->filter_simple.\n                        filter_flags[i] & FILTER_FLAG_OR)\n                        g_string_append(str, \" OR \");\n                    else\n                        g_string_append(str, \" AND \");\n                }\n\n                /* NOT_BEGIN is for expressions like: NOT ( <x> ...\n                 * and is to be terminated by END.\n                 * whereas BEGIN + NOT will result in (NOT (<x>) ...\n                 */\n                if (p_filter->filter_simple.\n                    filter_flags[i] & FILTER_FLAG_NOT_BEGIN)\n                    g_string_append(str, \"NOT (\");\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_BEGIN)\n                    g_string_append_c(str, '(');\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_NOT) {\n                    if (p_filter->filter_simple.\n                        filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                        /* (NOT (x <cmp> <val>) OR x IS NULL) */\n                        g_string_append(str, \" (NOT (\");\n                    else\n                        /* NOT (x <cmp> <val>) */\n                        g_string_append(str, \" NOT (\");\n                } else if (p_filter->filter_simple.\n                           filter_flags[i] & FILTER_FLAG_ALLOW_NULL)\n                    /* (x <cmp> <val> OR x IS NULL) */\n                    g_string_append_c(str, '(');\n\n                /* If the field is a VARBINARY and the matching must be\n                 * insensitive, convert it to varchar */\n                if ((p_filter->filter_simple.filter_compar[i] == ILIKE\n                     || p_filter->filter_simple.filter_compar[i] == IUNLIKE)\n                    && field_infos[index].db_type == DB_TEXT) {\n                    case_sensitive = false;\n                    g_string_append(str, \"CONVERT(\");\n                }\n            }\n\n            // Avoid messing up the query with unecessary table JOINS\n            if (p_filter->filter_simple.filter_flags[i]\n                 & (FILTER_FLAG_BEGIN_BLOCK | FILTER_FLAG_END_BLOCK))\n                continue;\n\n            /* append field name or function call */\n            attr2filter_field(str, table, index, prefix_table);\n\n            if (!case_sensitive)\n                g_string_append(str, \" USING latin1)\");\n\n            if (match_table(table, index)\n                || (table == T_NONE && !is_stripe_field(index))) {\n                /* append comparator */\n                if (is_sepdlist(index)) {\n                    /* always match '%+<item>+%' => use LIKE and UNLIKE */\n                    if (p_filter->filter_simple.filter_compar[i] == EQUAL)\n                        p_filter->filter_simple.filter_compar[i] = LIKE;\n                    else if (p_filter->filter_simple.filter_compar[i] ==\n                             NOTEQUAL)\n                        p_filter->filter_simple.filter_compar[i] = UNLIKE;\n                }\n                g_string_append(str,\n                                compar2str(p_filter->filter_simple.\n                                           filter_compar[i]));\n\n                /* no expected value after IS NULL or IS NOT NULL */\n                if (p_filter->filter_simple.filter_compar[i] != ISNULL\n                    && p_filter->filter_simple.filter_compar[i] != NOTNULL) {\n                    /* fullpath already includes root for SOFT_RM table */\n                    if ((index == ATTR_INDEX_fullpath) && (table != T_SOFTRM)) {\n                        char relative[RBH_PATH_MAX];\n\n                        if (fullpath_attr2db\n                            (p_filter->filter_simple.filter_value[i].value.\n                             val_str, relative)) {\n                            /* condition is always false */\n                            g_string_append(str, \"FALSE\");\n                        } else {\n                            typeu.val_str = relative;\n                            printdbtype(&p_mgr->conn, str,\n                                        field_infos[index].db_type, &typeu);\n                        }\n                    } else {\n                        char tmp[1024];\n\n                        if (is_sepdlist(index)) {\n                            /* match '%+<item>+%' */\n                            separated_list2match(p_filter->filter_simple.\n                                                 filter_value[i].value.val_str,\n                                                 tmp, sizeof(tmp));\n                            typeu.val_str = tmp;\n                        } else\n                            /* single value (list only apply to OSTs XXX for\n                             * now) */\n                            typeu =\n                                p_filter->filter_simple.filter_value[i].value;\n\n                        printdbtype(&p_mgr->conn, str, field_type(index),\n                                    &typeu);\n                    }\n                }\n                nbfields++;\n            } else if ((table == T_STRIPE_ITEMS || table == T_NONE)\n                       && (field_type(index) == DB_STRIPE_ITEMS)) {\n                /* single value or a list? */\n                if (p_filter->filter_simple.filter_compar[i] == IN\n                    || (p_filter->filter_simple.filter_compar[i] == NOTIN)) {\n                    unsigned int j;\n                    db_type_u *list;\n\n                    g_string_append_printf(str, \"%s(\",\n                                           compar2str(p_filter->filter_simple.\n                                                      filter_compar[i]));\n\n                    list = p_filter->filter_simple.filter_value[i].list.values;\n                    for (j = 0;\n                         j < p_filter->filter_simple.filter_value[i].list.count;\n                         j++) {\n                        g_string_append_printf(str, \"%s%u\", j == 0 ? \"\" : \",\",\n                                               list[j].val_uint);\n                    }\n                    g_string_append(str, \")\");\n                } else {    /* single value */\n\n                    g_string_append_printf(str, \"%s%u\",\n                                           compar2str(p_filter->filter_simple.\n                                                      filter_compar[i]),\n                                           p_filter->filter_simple.\n                                           filter_value[i].value.val_uint);\n                }\n                nbfields++;\n            } else if ((table == T_STRIPE_INFO || table == T_NONE)\n                       && (field_type(index) == DB_STRIPE_INFO)) {\n                g_string_append_printf(str, \"%s'%s'\",\n                                       compar2str(p_filter->filter_simple.\n                                                  filter_compar[i]),\n                                       p_filter->filter_simple.filter_value[i].\n                                       value.val_str);\n                nbfields++;\n            }\n\n            if (match || table == T_NONE) {\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_NOT) {\n                    if (p_filter->filter_simple.filter_flags[i]\n                        & FILTER_FLAG_ALLOW_NULL) {\n                        /* (NOT (x <cmp> <val>) OR x IS NULL) */\n                        g_string_append(str, \") OR \");\n                        attr2filter_field(str, table, index, prefix_table);\n                        g_string_append(str, \" IS NULL)\");\n                    } else\n                        /* NOT (x <cmp> <val>) */\n                        g_string_append(str, \")\");\n                } else if (p_filter->filter_simple.filter_flags[i]\n                           & FILTER_FLAG_ALLOW_NULL) {\n                    /* OR x IS NULL */\n                    g_string_append(str, \" OR \");\n                    attr2filter_field(str, table, index, prefix_table);\n                    g_string_append(str, \" IS NULL)\");\n                }\n\n                if (p_filter->filter_simple.filter_flags[i] & FILTER_FLAG_END)\n                    g_string_append(str, \")\");\n                if (p_filter->filter_simple.\n                    filter_flags[i] & FILTER_FLAG_NOT_END)\n                    g_string_append(str, \")\");\n            }\n        }   /* end for */\n    } else {\n        return -DB_NOT_SUPPORTED;\n    }\n    return nbfields;\n}   /* filter2str */\n\nconst char *dirattr2str(unsigned int attr_index)\n{\n    switch (attr_index) {\n    case ATTR_INDEX_dircount:\n        return \"COUNT(*)\";\n    case ATTR_INDEX_avgsize:\n        return \"ROUND(AVG(size),0)\";\n    default:\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Unexpected attr index %u in %s\",\n                   attr_index, __func__);\n        return NULL;\n    }\n}\n\n/** Helper to build a where clause from a list of fields to be filtered\n * @param where initialized empty GString.\n * @param[out] counts count of filter fields in each table.\n * @return the number of created filters.\n */\nint filter_where(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                 struct field_count *counts, GString *where,\n                 attrset_op_flag_e flags)\n{\n    int nb; /* can be < 0 */\n    unsigned int all = 0;\n\n    /* on which table are the filters ?  */\n    nb = filter2str(p_mgr, where, p_filter, T_MAIN,\n                    (flags & AOF_LEADING_SEP) | AOF_PREFIX);\n    if (nb > 0) {\n        counts->nb_main += nb;\n        all += nb;\n    }\n\n    nb = filter2str(p_mgr, where, p_filter, T_ANNEX,\n                    (all > 0 ? AOF_LEADING_SEP : 0) |\n                    (flags & AOF_LEADING_SEP) | AOF_PREFIX);\n    if (nb > 0) {\n        counts->nb_annex += nb;\n        all += nb;\n    }\n\n    if ((flags & AOF_SKIP_NAME) == 0) {\n        nb = filter2str(p_mgr, where, p_filter, T_DNAMES,\n                        (all > 0 ? AOF_LEADING_SEP : 0) |\n                        (flags & AOF_LEADING_SEP) | AOF_PREFIX);\n        if (nb > 0) {\n            counts->nb_names += nb;\n            all += nb;\n        }\n    }\n\n    /* stripes are only managed for Lustre filesystems */\n#ifdef _LUSTRE\n    nb = filter2str(p_mgr, where, p_filter, T_STRIPE_INFO,\n                    (all > 0 ? AOF_LEADING_SEP : 0) |\n                    (flags & AOF_LEADING_SEP) | AOF_PREFIX);\n    if (nb > 0) {\n        counts->nb_stripe_info += nb;\n        all += nb;\n    }\n\n    nb = filter2str(p_mgr, where, p_filter, T_STRIPE_ITEMS,\n                    (all > 0 ? AOF_LEADING_SEP : 0) |\n                    (flags & AOF_LEADING_SEP) | AOF_PREFIX);\n    if (nb > 0) {\n        counts->nb_stripe_items += nb;\n        all += nb;\n    }\n#endif\n\n    return all;\n}\n\n/** helper to lighten filter_from function */\nstatic inline void append_from_clause(table_enum tab, GString *from,\n                                      table_enum *first_table)\n{\n    const char *tname = table2name(tab);\n\n    if (*first_table == T_NONE) {\n        *first_table = tab;\n        g_string_append(from, tname);\n    } else\n        /* XXX LEFT JOIN or INNER JOIN? */\n        /* XXX INNER join if there is a criteria on right table? */\n        g_string_append_printf(from, \" LEFT JOIN %s ON %s.id=%s.id\", tname,\n                               table2name(*first_table), tname);\n}\n\n/** Helper to build a 'from' clause (table junction) depending on filter counts\n * It must be called only if filter is non empty.\n * @param[in] counts filter counts filled-in by filter_where() function.\n * @param[in,out] from initialized empty GString.\n * @param[in, out] first_table the first table in the junction.\n * @param[out] select_distinct_id indicate if the request must select distinct\n *             ids.\n * @param[in] flags or'ed AOF_LEADING_SEP if there is a previous table,\n *                  AOF_SKIP_NAME to skip name field.\n */\nvoid filter_from(lmgr_t *p_mgr, const struct field_count *counts,\n                 GString *from, table_enum *first_table,\n                 bool *select_distinct_id, attrset_op_flag_e flags)\n{\n    /* no separator means no previous table */\n    if ((flags & AOF_LEADING_SEP) == 0)\n        *first_table = T_NONE;\n\n    if (counts->nb_main)\n        append_from_clause(T_MAIN, from, first_table);\n    if (counts->nb_annex)\n        append_from_clause(T_ANNEX, from, first_table);\n    if (counts->nb_names && !(flags & AOF_SKIP_NAME)) {\n        *select_distinct_id = true;\n        append_from_clause(T_DNAMES, from, first_table);\n    }\n    if (counts->nb_stripe_info)\n        append_from_clause(T_STRIPE_INFO, from, first_table);\n    if (counts->nb_stripe_items) {\n        *select_distinct_id = true;\n        append_from_clause(T_STRIPE_ITEMS, from, first_table);\n    }\n}\n\n/* special masks values for id2pk and pk2id */\n#define MASK_ID2PK  0\n#define MASK_PK2ID  1\n\nvoid entry_id2pk(const entry_id_t *p_id, PK_PARG_T p_pk)\n{\n#ifndef FID_PK\n    snprintf(p_pk, PK_LEN, \"%\" PRI_DT \":%LX\", p_id->fs_key,\n             (unsigned long long)p_id->inode);\n#else /* FID_PK */\n    snprintf(p_pk, DB_FID_LEN, DFID_NOBRACE, PFID(p_id));\n#endif\n}\n\nint pk2entry_id(lmgr_t *p_mgr, PK_ARG_T pk, entry_id_t *p_id)\n{\n#ifndef FID_PK\n    unsigned long long tmp_ino;\n\n    if (sscanf(pk, \"%\" PRI_DT \":%LX\", &p_id->fs_key, &tmp_ino) != FID_SCAN_CNT)\n        return DB_INVALID_ARG;\n    else {\n        p_id->inode = tmp_ino;\n        return DB_SUCCESS;\n    }\n#else /* FID_PK */\n    if (sscanf(pk, SFID, RFID(p_id)) != FID_SCAN_CNT)\n        return DB_INVALID_ARG;\n    else\n        return DB_SUCCESS;\n#endif\n}\n\nvoid append_size_range_fields(GString *str, bool leading_comma,\n                              const char *prefix)\n{\n    unsigned int i;\n\n    for (i = 0; i < SZ_PROFIL_COUNT; i++)\n        g_string_append_printf(str, \"%s%s%s\", leading_comma\n                               || (i > 0) ? \",\" : \"\", prefix, sz_field[i]);\n}\n\n/* those functions are used for begin/commit/rollback */\nint _lmgr_begin(lmgr_t *p_mgr, int behavior)\n{\n    if (behavior == 0)\n        /* autocommit */\n        return DB_SUCCESS;\n    else if (behavior == 1)\n        /* commit every transaction */\n        return db_exec_sql(&p_mgr->conn, \"BEGIN\", NULL);\n    else {\n        int rc = DB_SUCCESS;\n\n        /* if last operation was committed, issue a begin statement */\n        if (p_mgr->last_commit == 0) {\n            rc = db_exec_sql(&p_mgr->conn, \"BEGIN\", NULL);\n            if (rc)\n                return rc;\n        }\n\n        /* increment current op */\n        p_mgr->last_commit++;\n        return DB_SUCCESS;\n    }\n}\n\nvoid _lmgr_rollback(lmgr_t *p_mgr, int behavior)\n{\n    if (behavior == 0)\n        return;\n    else {\n        /* we must rollback all operations since the last commit,\n         * to keep database into persistent state */\n        db_exec_sql(&p_mgr->conn, \"ROLLBACK\", NULL);\n\n        p_mgr->last_commit = 0;\n    }\n}\n\nint _lmgr_commit(lmgr_t *p_mgr, int behavior)\n{\n    if (behavior == 0)\n        return DB_SUCCESS;\n    else if (behavior == 1)\n        return db_exec_sql(&p_mgr->conn, \"COMMIT\", NULL);\n    else {\n        /* if the transaction count is reached:\n         * commit operations and result transaction count\n         */\n        if ((p_mgr->last_commit % behavior == 0) || p_mgr->force_commit) {\n            int rc;\n            rc = db_exec_sql(&p_mgr->conn, \"COMMIT\", NULL);\n            if (rc)\n                return rc;\n\n            p_mgr->last_commit = 0;\n        }\n    }\n    return DB_SUCCESS;\n}\n\n/** Set force commit behavior */\nvoid ListMgr_ForceCommitFlag(lmgr_t *p_mgr, bool force_commit)\n{\n    p_mgr->force_commit = force_commit;\n}\n\n/** check if the last operation was really committed\n * @return TRUE if the last operation has been committed,\n * @return FALSE if commit is deferred.\n */\nbool ListMgr_GetCommitStatus(lmgr_t *p_mgr)\n{\n    /* operation was not committed if period > 1 and last_commit is not reset\n     * yet */\n    if ((lmgr_config.commit_behavior > 1) && (p_mgr->last_commit != 0))\n        return false;\n    else\n        return true;\n}\n\nint _lmgr_flush_commit(lmgr_t *p_mgr, int behavior)\n{\n    int rc;\n    if ((behavior > 1) && (p_mgr->last_commit != 0)) {\n        rc = db_exec_sql(&p_mgr->conn, \"COMMIT\", NULL);\n        if (rc)\n            return rc;\n\n        p_mgr->last_commit = 0;\n        return DB_SUCCESS;\n    } else\n        return DB_SUCCESS;\n}\n\nint lmgr_table_count(db_conn_t *pconn, const char *table, uint64_t *count)\n{\n    char *str_count = NULL;\n    result_handle_t result = NULL;\n    char *sql;\n    int rc;\n\n    if (asprintf(&sql, \"SELECT COUNT(*) FROM %s\", table) == -1)\n        return DB_NO_MEMORY;\n\n    /* execute the request */\n    rc = db_exec_sql(pconn, sql, &result);\n    if (rc)\n        goto out_free;\n\n    rc = db_next_record(pconn, &result, &str_count, 1);\n    if (rc)\n        goto out_free;\n\n    if (sscanf(str_count, \"%\" SCNu64, count) != 1)\n        rc = DB_REQUEST_FAILED;\n\n out_free:\n    if (result != NULL)\n        db_result_free(pconn, &result);\n    free(sql);\n    return rc;\n}\n\n/**\n * If p_target_attrset attributes are unset,\n * retrieve them from p_source_attrset.\n */\nvoid ListMgr_MergeAttrSets(attr_set_t *p_target_attrset,\n                           const attr_set_t *p_source_attrset, bool update)\n{\n    int i, cookie;\n    db_type_u typeu;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if ((update || !attr_mask_test_index(&p_target_attrset->attr_mask, i))\n            && attr_mask_test_index(&p_source_attrset->attr_mask, i)) {\n            /* status attr */\n            if (is_status_field(i)) {\n                unsigned int status_idx = attr2status_index(i);\n\n                if (p_source_attrset->attr_values.sm_status == NULL)\n                    RBH_BUG(\"status flag is set but status array is not \"\n                            \"allocated\");\n\n                sm_status_ensure_alloc(&p_target_attrset->attr_values.\n                                       sm_status);\n                /* copy the values as-is (static const strings, not\n                 * allocated) */\n                p_target_attrset->attr_values.sm_status[status_idx]\n                    = p_source_attrset->attr_values.sm_status[status_idx];\n            }\n            /* SM specific info */\n            else if (is_sm_info_field(i)) {\n                unsigned int idx = attr2sminfo_index(i);\n\n                if (p_source_attrset->attr_values.sm_info == NULL)\n                    RBH_BUG(\"specific info flag is set but info array is not \"\n                            \"allocated\");\n\n                sm_info_ensure_alloc(&p_target_attrset->attr_values.sm_info);\n\n                /* free the previous value, if any */\n                if (p_target_attrset->attr_values.sm_info[idx] != NULL)\n                    free(p_target_attrset->attr_values.sm_info[idx]);\n\n                /* duplicate the field according to its type */\n                assign_union(&typeu, field_type(i),\n                             p_source_attrset->attr_values.sm_info[idx]);\n\n                /* duplicate the value to target */\n                p_target_attrset->attr_values.sm_info[idx] =\n                    dup_value(field_type(i), typeu);\n            } else if (!is_stripe_field(i)) {\n                assign_union(&typeu, field_infos[i].db_type,\n                             attr_address_const(p_source_attrset, i));\n                union_get_value(attr_address(p_target_attrset, i),\n                                field_infos[i].db_type, &typeu);\n            }\n#ifdef _LUSTRE\n            else if (field_infos[i].db_type == DB_STRIPE_ITEMS) {\n                /* free previous value if set */\n                if (attr_mask_test_index(&p_target_attrset->attr_mask, i))\n                    free_stripe_items((stripe_items_t *)\n                                      attr_address(p_target_attrset, i));\n\n                dup_stripe_items((stripe_items_t *)\n                                 attr_address(p_target_attrset, i),\n                                 (stripe_items_t *)\n                                 attr_address_const(p_source_attrset, i));\n            } else if (field_infos[i].db_type == DB_STRIPE_INFO) {\n                memcpy(attr_address(p_target_attrset, i),\n                       attr_address_const(p_source_attrset, i),\n                       sizeof(stripe_info_t));\n            }\n#endif\n\n            attr_mask_set_index(&p_target_attrset->attr_mask, i);\n        }\n    }\n    return;\n}\n\nvoid ListMgr_FreeAttrs(attr_set_t *p_set)\n{\n    if (p_set == NULL)\n        return;\n\n#ifdef _LUSTRE\n    int i;\n\n    /* Free stripe count attributes (in std attrs) */\n    for (i = 0; i < ATTR_COUNT; i++) {\n        if ((field_infos[i].db_type == DB_STRIPE_ITEMS)\n            && attr_mask_test_index(&p_set->attr_mask, i)) {\n            free_stripe_items((stripe_items_t *) attr_address(p_set, i));\n        }\n    }\n#endif\n    sm_status_free(&p_set->attr_values.sm_status);\n    sm_info_free(&p_set->attr_values.sm_info);\n}\n\n/** return the mask of attributes that differ */\nattr_mask_t ListMgr_WhatDiff(const attr_set_t *p_tgt, const attr_set_t *p_src)\n{\n    int i, cookie;\n    attr_mask_t common_mask\n        = attr_mask_and(&p_tgt->attr_mask, &p_src->attr_mask);\n    attr_mask_t diff_mask = null_mask;\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (attr_mask_test_index(&common_mask, i)) {\n            bool is_diff = false;\n\n            /* status attr */\n            if (is_status_field(i)) {\n                unsigned int status_idx = attr2status_index(i);\n\n                /** array is allocated as we check the common mask */\n                if (p_tgt->attr_values.sm_status[status_idx]\n                    != p_src->attr_values.sm_status[status_idx])\n                    attr_mask_set_index(&diff_mask, i);\n            }\n            /* SM specific info */\n            else if (is_sm_info_field(i)) {\n                unsigned int idx = attr2sminfo_index(i);\n\n                is_diff =\n                    diff_union(field_type(i), p_src->attr_values.sm_info[idx],\n                               p_tgt->attr_values.sm_info[idx]);\n                if (is_diff)\n                    attr_mask_set_index(&diff_mask, i);\n            } else if (!is_stripe_field(i)) {\n                /* diff the values */\n                is_diff = diff_union(field_infos[i].db_type,\n                                     attr_address_const(p_src, i),\n                                     attr_address_const(p_tgt, i));\n                if (is_diff)\n                    attr_mask_set_index(&diff_mask, i);\n            }\n#ifdef _LUSTRE\n            else if (field_infos[i].db_type == DB_STRIPE_INFO) {\n                if ((ATTR(p_tgt, stripe_info).stripe_size\n                     != ATTR(p_src, stripe_info).stripe_size)\n                    || (ATTR(p_tgt, stripe_info).stripe_count\n                        != ATTR(p_src, stripe_info).stripe_count)\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n                    || (ATTR(p_tgt, stripe_info).validator\n                        != ATTR(p_src, stripe_info).validator)\n#endif\n                    || (strcmp(ATTR(p_tgt, stripe_info).pool_name,\n                               ATTR(p_src, stripe_info).pool_name) != 0)) {\n                    attr_mask_set_index(&diff_mask, i);\n                }\n            } else if (field_infos[i].db_type == DB_STRIPE_ITEMS) {\n                if (ATTR(p_tgt, stripe_items).count\n                    != ATTR(p_src, stripe_items).count)\n                    is_diff = true;\n                else {\n                    int i;\n                    for (i = 0; i < ATTR(p_tgt, stripe_items).count; i++) {\n                        if ((ATTR(p_tgt, stripe_items).stripe[i].ost_idx !=\n                             ATTR(p_src, stripe_items).stripe[i].ost_idx)\n                            ||\n                            (ATTR(p_tgt, stripe_items).stripe[i].ost_gen !=\n                             ATTR(p_src, stripe_items).stripe[i].ost_gen)\n                            ||\n                            (ATTR(p_tgt, stripe_items).stripe[i].obj_id !=\n                             ATTR(p_src, stripe_items).stripe[i].obj_id)\n                            ||\n                            (ATTR(p_tgt, stripe_items).stripe[i].obj_seq !=\n                             ATTR(p_src, stripe_items).stripe[i].obj_seq)) {\n                            is_diff = true;\n                            break;\n                        }\n                    }\n                }\n                if (is_diff)\n                    attr_mask_set_index(&diff_mask, i);\n            }\n#endif\n        }\n    }\n    return diff_mask;\n}\n\n/** Convert a set notation (eg. \"3,5-8,12\") to a list of values\n * \\param type[in] the type of output array (DB_INT, DB_UINT, ...)\n * \\param p_list[out] list of values (the function allocates a buffer for\n *                    p_list->values)\n */\nint lmgr_range2list(const char *set, db_type_e type, value_list_t *p_list)\n{\n    char *curr, *next;\n    char buffer[1024];\n\n    /* check args */\n    if (!p_list)\n        return -1;\n    /* only uint supported */\n    if (type != DB_UINT)\n        return -1;\n\n    /* local copy for strtok */\n    rh_strncpy(buffer, set, 1024);\n\n    /* inialize list */\n    p_list->count = 0;\n    p_list->values = NULL;\n\n    /* tokenize by ',' */\n    curr = strtok_r(buffer, \",\", &next);\n    while (curr) {\n        /* check for range notation */\n        char *dash = strchr(curr, '-');\n        if (!dash) {\n            /* single value */\n            int tmpval;\n            tmpval = str2int(curr);\n            if (tmpval == -1)\n                goto out_free;\n            p_list->values =\n                MemRealloc(p_list->values,\n                           (1 + p_list->count) * sizeof(*(p_list->values)));\n            if (!p_list->values)\n                goto out_free;\n            p_list->values[p_list->count].val_uint = tmpval;\n            p_list->count++;\n        } else {\n            /* range */\n            int val_start, val_end, i;\n            unsigned int j;\n            *dash = '\\0';   /* tokenize at '-' */\n            dash++; /*  points to end value */\n            val_start = str2int(curr);\n            val_end = str2int(dash);\n            if (val_start == -1 || val_end == -1 || val_end < val_start)\n                goto out_free;\n\n            p_list->values =\n                MemRealloc(p_list->values,\n                           (val_end - val_start + 1 +\n                            p_list->count) * sizeof(*(p_list->values)));\n            if (!p_list->values)\n                goto out_free;\n            for (i = 0, j = val_start; j <= val_end; i++, j++) {\n                p_list->values[p_list->count + i].val_uint = j;\n            }\n            p_list->count += val_end - val_start + 1;\n        }\n\n        curr = strtok_r(NULL, \",\", &next);\n    }\n    return 0;\n\n out_free:\n    if (p_list->values)\n        MemFree(p_list->values);\n    p_list->values = NULL;\n    p_list->count = 0;\n    return -1;\n}\n\n/** manage delayed retry of retryable errors\n * \\return 1 if the transaction must be restarted\n * \\return 2 if transaction must be cancelled\n */\nint _lmgr_delayed_retry(lmgr_t *lmgr, int errcode, const char *func, int line)\n{\n    if (!db_is_retryable(errcode)) {\n        /* if a retry was pending, display a success message */\n        if (lmgr->retry_delay != 0) {\n            struct timeval diff, now;\n            timerclear(&diff);\n            gettimeofday(&now, NULL);\n            timersub(&now, &lmgr->first_error, &diff);\n\n            /* Only notify success if the succeeded function\n             * is the same as the last error.\n             */\n            if ((lmgr->last_err_func == func) && (lmgr->last_err_line == line)\n                && errcode == DB_SUCCESS) {\n                DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                           \"DB operation succeeded after %u retries (%ld.%03ld sec)\",\n                           lmgr->retry_count, diff.tv_sec, diff.tv_usec / 1000);\n            }\n\n            /* reset retry delay if no error occurred,\n             * or if the error is not retryable */\n            lmgr->retry_delay = 0;\n            lmgr->retry_count = 0;\n            timerclear(&lmgr->first_error);\n        }\n        return 0;\n    }\n\n    /* Got TERM signal, must stop transactions and exit. */\n    if (lmgr_cancel_retry)\n        return 2;\n\n    /* transaction is about to be restarted,\n     * sleep for a given time */\n    if (lmgr->retry_delay == 0) {\n        /* first error, first sleep */\n        gettimeofday(&lmgr->first_error, NULL);\n        lmgr->retry_delay = lmgr_config.connect_retry_min;\n    } else {\n        lmgr->retry_delay *= 2;\n        if (lmgr->retry_delay > lmgr_config.connect_retry_max)\n            lmgr->retry_delay = lmgr_config.connect_retry_max;\n    }\n    lmgr->last_err_func = func;\n    lmgr->last_err_line = line;\n    if (lmgr->retry_count == 0)\n        DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                   \"Retryable DB error in %s l.%u. Retrying...\", func, line);\n    else    /* only display for debug level */\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                   \"Retryable DB error in %s l.%u. Restarting transaction in %u sec...\",\n                   func, line, lmgr->retry_delay);\n\n    rh_sleep(lmgr->retry_delay);\n    lmgr->retry_count++;\n    return 1;\n}\n\n/** check attribute mask compatibility for a given table */\nstatic inline bool table_mask_compat(attr_mask_t m1, attr_mask_t m2)\n{\n    /* attrs in a given table must be the same or 0 */\n    if (attr_mask_is_null(m1) || attr_mask_is_null(m2))\n        return true;\n    else\n        return attr_mask_equal(&m1, &m2);\n}\n\n/** Check mask compatibility for request batching. */\nbool lmgr_batch_compat(attr_mask_t m1, attr_mask_t m2)\n{\n    if (!table_mask_compat(attr_mask_and(&m1, &main_attr_set),\n                           attr_mask_and(&m2, &main_attr_set)))\n        return false;\n    if (!table_mask_compat(attr_mask_and(&m1, &names_attr_set),\n                           attr_mask_and(&m2, &names_attr_set)))\n        return false;\n    if (!table_mask_compat(attr_mask_and(&m1, &annex_attr_set),\n                           attr_mask_and(&m2, &annex_attr_set)))\n        return false;\n    if (!table_mask_compat(attr_mask_and(&m1, &stripe_attr_set),\n                           attr_mask_and(&m2, &stripe_attr_set)))\n        return false;\n\n    return true;\n}\n\nint parse_entry_id(lmgr_t *p_mgr, const char *str, PK_PARG_T p_pk,\n                   entry_id_t *p_id)\n{\n    int rc;\n\n    if (sscanf(str, SPK, p_pk) != 1) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Unexpected format for database key: '%s'\", str);\n        return DB_INVALID_ARG;\n    }\n\n    rc = pk2entry_id(p_mgr, p_pk, p_id);\n    if (rc)\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Unexpected format for database key: \" DPK, p_pk);\n    return rc;\n}\n\nint attr_index_iter(unsigned int init, int *cookie)\n{\n    assert(cookie != NULL);\n\n    if (*cookie == -1)\n        /* initial step */\n        *cookie = init;\n    else\n        (*cookie)++;\n\n    if (*cookie == ATTR_COUNT) {\n        /* reached the end of std attrs */\n        if (sm_inst_count > 0)\n            /* go to first status index */\n            *cookie = ATTR_INDEX_FLG_STATUS;\n        else if (sm_attr_count > 0)\n            /* go to first sminfo index */\n            *cookie = ATTR_INDEX_FLG_SMINFO;\n        else\n            return -1;\n    } else if (*cookie == (ATTR_INDEX_FLG_STATUS | sm_inst_count)) {\n        /* reached the end of status attrs */\n        if (sm_attr_count > 0)\n            /* go to first sminfo index */\n            *cookie = ATTR_INDEX_FLG_SMINFO;\n        else\n            return -1;\n    } else if (*cookie == (ATTR_INDEX_FLG_SMINFO | sm_attr_count)) {\n        /* the end of everything */\n        return -1;\n    }\n\n    return *cookie;\n}\n\n/** unset read-only attributes from mask */\nvoid attr_mask_unset_readonly(attr_mask_t *mask)\n{\n    *mask = attr_mask_and_not(mask, &readonly_attr_set);\n}\n\nint str2sort_order(const char *str)\n{\n    if (!strcasecmp(str, \"ASC\"))\n        return SORT_ASC;\n    if (!strcasecmp(str, \"DESC\"))\n        return SORT_DESC;\n    return -1;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_common.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _LIST_MGR_COMMON_H\n#define _LIST_MGR_COMMON_H\n\n#include \"list_mgr.h\"\n#include \"listmgr_internal.h\"\n#include \"database.h\"\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdint.h>\n#include <glib.h>\n\nstatic inline void assign_union(db_type_u *const tgt, db_type_e type,\n                                const void *src)\n{\n    switch(type) {\n    case DB_ID:\n        tgt->val_id = *(entry_id_t *)src;\n        break;\n    case DB_ENUM_FTYPE:\n    case DB_TEXT:\n        tgt->val_str = (char *)src;\n        break;\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers)\n            tgt->val_int = ((uidgid_u *)src)->num;\n        else\n            tgt->val_str = ((uidgid_u *)src)->txt;\n        break;\n    case DB_INT:\n        tgt->val_int = *(int *)src;\n        break;\n    case DB_UINT:\n        tgt->val_uint = *(unsigned int *)src;\n        break;\n    case DB_SHORT:\n        tgt->val_short = *(short *)src;\n        break;\n    case DB_USHORT:\n        tgt->val_ushort = *(unsigned short *)src;\n        break;\n    case DB_BIGINT:\n        tgt->val_bigint = *(long long *)src;\n        break;\n    case DB_BIGUINT:\n        tgt->val_biguint = *(unsigned long long *)src;\n        break;\n    case DB_BOOL:\n        tgt->val_bool = *(bool *)src;\n        break;\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n        break;\n    }\n}\n\nstatic inline void union_get_value(void *tgt, db_type_e type,\n                                   const db_type_u *src)\n{\n    switch(type) {\n    case DB_ID:\n        *(entry_id_t *)tgt = src->val_id;\n        break;\n    case DB_TEXT:\n    case DB_ENUM_FTYPE:\n        strcpy(tgt, src->val_str);\n        break;\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers)\n            ((uidgid_u *)tgt)->num = src->val_int;\n        else\n            strcpy(((uidgid_u *)tgt)->txt, src->val_str);\n        break;\n    case DB_INT:\n        *(int *)tgt =  src->val_int;\n        break;\n    case DB_UINT:\n        *(unsigned int *)tgt = src->val_uint;\n        break;\n    case DB_SHORT:\n        *(short *)tgt = src->val_short;\n        break;\n    case DB_USHORT:\n        *(unsigned short *)tgt = src->val_ushort;\n        break;\n    case DB_BIGINT:\n        *(long long *)tgt = src->val_bigint;\n        break;\n    case DB_BIGUINT:\n        *(unsigned long long *)tgt = src->val_biguint;\n        break;\n    case DB_BOOL:\n        *(bool *)tgt = src->val_bool;\n        break;\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n        break;\n    }\n}\n\nstatic inline int diff_union(db_type_e type, const void *addr1,\n                             const void *addr2)\n{\n    switch(type) {\n    case DB_ID:\n        return !entry_id_equal((const entry_id_t *)addr1,\n                               (const entry_id_t *)addr2);\n    case DB_ENUM_FTYPE:\n    case DB_TEXT:\n        return strcmp((char *)addr1, (char *)addr2);\n    case DB_UIDGID:\n        if (global_config.uid_gid_as_numbers)\n            return ((uidgid_u *)addr1)->num != ((uidgid_u *)addr2)->num;\n        else\n            return strcmp(((uidgid_u *)addr1)->txt, ((uidgid_u *)addr2)->txt);\n    case DB_INT:\n        return *(int *)addr1 != *(int *)addr2;\n    case DB_UINT:\n        return *(unsigned int *)addr1 != *(unsigned int *)addr2;\n    case DB_SHORT:\n        return *(short *)addr1 != *(short *)addr2;\n    case DB_USHORT:\n        return *(unsigned short *)addr1 != *(unsigned short *)addr2;\n    case DB_BIGINT:\n        return *(long long *)addr1 != *(long long *)addr2;\n    case DB_BIGUINT:\n        return *(ull_t *)addr1 != *(ull_t *)addr2;\n    case DB_BOOL:\n        return *(bool *)addr1 != *(bool *)addr2;\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n        break;\n    }\n    UNREACHED();\n}\n\n/** duplicate a value of the given C type */\n#define TYPE_DUP(_t, _tgt, _src)                \\\n    do {                                        \\\n        _t *__ptr;                              \\\n        __ptr = calloc(1, sizeof(_t));          \\\n        if (__ptr == NULL)                      \\\n            return NULL;                        \\\n        memcpy(__ptr, (_src), sizeof(_t));      \\\n        (_tgt) = __ptr;                         \\\n    } while(0)\n\n/** duplicate a value of the given DB type */\nstatic inline void *dup_value(db_type_e db_type, db_type_u uval)\n{\n    void *ptr = NULL;\n\n    switch(db_type) {\n    case DB_ID:\n        TYPE_DUP(entry_id_t, ptr, &uval.val_id);\n        break;\n    case DB_ENUM_FTYPE:\n    case DB_TEXT:\n        ptr = strdup(uval.val_str);\n        break;\n    case DB_UIDGID:\n        TYPE_DUP(uidgid_u, ptr, uval.val_str);\n        ptr = calloc(1, sizeof(uidgid_u));\n        if (ptr == NULL)\n            return NULL;\n        snprintf(((uidgid_u *)ptr)->txt, MEMBER_SIZE(uidgid_u, txt), \"%s\",\n                 uval.val_str);\n        break;\n    case DB_INT:\n        TYPE_DUP(int, ptr, &uval.val_int);\n        break;\n    case DB_UINT:\n        TYPE_DUP(unsigned int, ptr, &uval.val_uint);\n        break;\n    case DB_SHORT:\n        TYPE_DUP(short, ptr, &uval.val_short);\n        break;\n    case DB_USHORT:\n        TYPE_DUP(unsigned short, ptr, &uval.val_ushort);\n        break;\n    case DB_BIGINT:\n        TYPE_DUP(long long, ptr, &uval.val_bigint);\n        break;\n    case DB_BIGUINT:\n        TYPE_DUP(unsigned long long, ptr, &uval.val_biguint);\n        break;\n    case DB_BOOL:\n        TYPE_DUP(bool, ptr, &uval.val_bool);\n        break;\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        RBH_BUG(\"Unsupported DB type\");\n        break;\n    }\n    return ptr;\n}\n\n/** precomputed masks for testing attr sets efficiently.\n */\nextern attr_mask_t main_attr_set;\nextern attr_mask_t names_attr_set;\nextern attr_mask_t annex_attr_set;\nextern attr_mask_t gen_attr_set;\nextern attr_mask_t stripe_attr_set;\nextern attr_mask_t dir_attr_set;\nextern attr_mask_t slink_attr_set;\nextern attr_mask_t acct_attr_set;\nextern attr_mask_t acct_pk_attr_set;\nextern attr_mask_t softrm_attr_set;\nextern attr_mask_t readonly_attr_set;\nextern attr_mask_t func_attr_set;\n\nvoid init_attrset_masks(const lmgr_config_t *lmgr_config);\n\n/** indicate if there are main fields in attr_mask */\nstatic inline bool main_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &main_attr_set));\n}\n\n/** indicate if there name fields in attr_mask */\nstatic inline bool names_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &names_attr_set));\n}\n\n/** indicate if there are name field annex fields in attr_mask */\nstatic inline bool annex_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &annex_attr_set));\n}\n\n/** return the sub mask of generated fields in attr_mask */\nstatic inline attr_mask_t gen_fields(attr_mask_t attr_mask)\n{\n    return attr_mask_and(&attr_mask, &gen_attr_set);\n}\n\n/** indicate if there are  stripe fields in attr_mask */\nstatic inline bool stripe_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &stripe_attr_set));\n}\n\n/** indicate if there are readonly fields in attr_mask */\nstatic inline bool readonly_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &readonly_attr_set));\n}\n\n/** indicate if there are directory specific attributes in attr_mask */\nstatic inline bool dirattr_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &dir_attr_set));\n}\n\n/** return the sub mask of symlink specific attributes in attr_mask */\nstatic inline attr_mask_t slinkattr_fields(attr_mask_t attr_mask)\n{\n    return attr_mask_and(&attr_mask, &slink_attr_set);\n}\n\n/** indicate if there are generated fields in attr_mask */\nstatic inline bool generated_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &gen_attr_set));\n}\n\n/** indicate if there are function generated fields in attr_mask */\nstatic inline bool funcattr_fields(attr_mask_t attr_mask)\n{\n    return !attr_mask_is_null(attr_mask_and(&attr_mask, &func_attr_set));\n}\n\n/**\n * indicate if the field is in ACCT_STAT table\n * /!\\ Can only be used after init_attrset_masks() has been called\n */\nstatic inline bool is_acct_field(unsigned int attr_index)\n{\n    return attr_mask_test_index(&acct_attr_set, attr_index);\n}\n\n/**\n * indicate if the field is part of the ACCT_STAT primary key\n * /!\\ Can only be used after init_attrset_masks() has been called\n */\nstatic inline bool is_acct_pk(unsigned int attr_index)\n{\n    return attr_mask_test_index(&acct_pk_attr_set, attr_index);\n}\n\n/**\n * indicate if the field is part of the SOFTRM table\n * /!\\ Can only be used after init_attrset_masks() has been called\n */\nstatic inline bool is_softrm_field(unsigned int attr_index)\n{\n    return attr_mask_test_index(&softrm_attr_set, attr_index);\n}\n\n/* ------------ */\n\n/** indicate if the attribute is a status field */\nstatic inline bool is_status_field(unsigned int attr_index)\n{\n    return attr_index & ATTR_INDEX_FLG_STATUS;\n}\n\nstatic inline bool is_sm_info_field(unsigned int attr_index)\n{\n    return attr_index & ATTR_INDEX_FLG_SMINFO;\n}\n\n/** check if one of the given flags is set for the given field */\nstatic inline bool test_field_flag(unsigned int attr_index, int flags)\n{\n    if (attr_index >= ATTR_COUNT)\n        return false;\n\n    return field_infos[attr_index].flags & flags;\n}\n\n/** indicate if the field is read only */\nstatic inline bool is_read_only_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT\n           && test_field_flag(attr_index,\n                              GENERATED | DIR_ATTR | REMOVED | FUNC_ATTR);\n}\n\n/** indicate if the field is stripe information */\nstatic inline bool is_stripe_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT\n           && (field_infos[attr_index].db_type == DB_STRIPE_INFO\n               || field_infos[attr_index].db_type == DB_STRIPE_ITEMS);\n}\n\n/** indicate if the field is in NAMES table */\nstatic inline bool is_names_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, DNAMES);\n}\n\n/** indicate if the field is in main table */\nstatic inline bool is_main_field(unsigned int attr_index)\n{\n    return is_status_field(attr_index)\n           || is_sm_info_field(attr_index)\n           || (attr_index < ATTR_COUNT\n               && test_field_flag(attr_index, FREQ_ACCESS)\n               && !is_stripe_field(attr_index)\n               && !is_read_only_field(attr_index)\n               && !is_names_field(attr_index));\n}\n\nstatic inline bool is_gen_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, GENERATED);\n}\n\nstatic inline bool is_indexed_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, INDEXED);\n}\n\nstatic inline bool is_annex_field(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT\n           && test_field_flag(attr_index, ANNEX_INFO | INIT_ONLY)\n           && !is_stripe_field(attr_index)\n           && !is_read_only_field(attr_index)\n           && !is_names_field(attr_index);\n}\n\nstatic inline bool is_funcattr(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, FUNC_ATTR);\n}\n\nstatic inline bool is_dirattr(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, DIR_ATTR);\n}\n\nstatic inline bool is_slinkattr(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, SLINK_ATTR);\n}\n\nstatic inline bool is_sepdlist(unsigned int attr_index)\n{\n    return attr_index < ATTR_COUNT && test_field_flag(attr_index, SEPD_LIST);\n}\n\nstatic inline bool is_recov_field(unsigned int attr_index)\n{\n#if 0 /** TODO implement recovery in RBHv3 */\n    /* needed fields for disaster recovery */\n    return ((1LL << attr_index) & RECOV_ATTR_MASK);\n#else\n    return false;\n#endif\n}\n\n/** printing a value to a DB request */\nvoid printdbtype(db_conn_t *pconn, GString *str, db_type_e type,\n                 const db_type_u *value_ptr);\n\n/** parse a value from DB */\nint parsedbtype(char *instr, db_type_e type, db_type_u *value_out);\n\ntypedef enum {\n    T_NONE = 0,         /* not set */\n    T_MAIN,             /* fields in main table */\n    T_DNAMES,           /* files in dir names table */\n    T_ANNEX,            /* fields in annex table */\n    T_STRIPE_INFO,      /* field in stripe info table */\n    T_STRIPE_ITEMS,     /* field in stripe items table */\n    T_ACCT,             /* fields in accounting table */\n    T_ACCT_PK,          /* PK fields of ACCT table */\n    T_ACCT_VAL,         /* Other fileds of ACCT table */\n    T_SOFTRM,           /* fields in softrm table (backup and HSM flavors only) */\n    T_TMP_SOFTRM,       /* temporary table for filling SOFTRM */\n    T_RECOV             /* fields in recov table (HSM flavors only) */\n} table_enum;\n\nstatic inline const char *table2name(table_enum table)\n{\n    switch(table) {\n    case T_NONE:         return NULL;\n    case T_MAIN:         return MAIN_TABLE;\n    case T_DNAMES:       return DNAMES_TABLE;\n    case T_ANNEX:        return ANNEX_TABLE;\n    case T_STRIPE_INFO:  return STRIPE_INFO_TABLE;\n    case T_STRIPE_ITEMS: return STRIPE_ITEMS_TABLE;\n    case T_ACCT:      /* return ACCT_TABLE; */\n    case T_ACCT_PK:   /* return ACCT_TABLE; */\n    case T_ACCT_VAL:     return ACCT_TABLE;\n    case T_SOFTRM:       return SOFT_RM_TABLE;\n    case T_TMP_SOFTRM:   return \"TMP_TABLE_*\";\n    case T_RECOV:        return RECOV_TABLE;\n    default:             return NULL;\n    }\n}\n\ntypedef enum {\n    OT_ADD,\n    OT_SUBTRACT\n} operation_type;\n\n/**\n * Add needed source fields to build generated fields.\n * @param[in,out] std_mask   Mask of attributes where the source fields bits are\n *                           added.\n */\nvoid add_source_fields_for_gen(uint32_t *std_mask);\n\n/**\n * Generate fields in the given attr_set.\n * @param[in,out] p_set  Source fields are read from this attribute set, and\n *                       generated fields are written to it.\n */\nvoid generate_fields(attr_set_t *p_set);\n\nint parse_entry_id(lmgr_t *p_mgr, const char *str, PK_PARG_T p_pk,\n                   entry_id_t *p_id);\n\ntypedef enum {\n    AOF_LEADING_SEP = (1 << 0), /* add a separator at the beginning of the\n                                 * output */\n    AOF_GENERIC_VAL = (1 << 1), /* use field name in values (e.g. for a\n                                   \"on duplicate key ...\" statement) */\n    AOF_PREFIX      = (1 << 2), /* prefix field name with table name */\n    AOF_SKIP_NAME   = (1 << 3), /* skip name record */\n} attrset_op_flag_e;\n\nint attrmask2fieldlist(GString *str, attr_mask_t attr_mask, table_enum table,\n                       const char *prefix, const char *suffix,\n                       attrset_op_flag_e flags);\n\nint attrmask2fieldcomparison(GString *str, attr_mask_t attr_mask,\n                             table_enum table, const char *left_prefix,\n                             const char *right_prefix, const char *comparator,\n                             const char *separator);\n\nint attrmask2fieldoperation(GString *str, attr_mask_t attr_mask,\n                            table_enum table, const char *prefix,\n                            operation_type operation);\n\nint attrset2valuelist(lmgr_t *p_mgr, GString *str, const attr_set_t *p_set,\n                      table_enum table, attrset_op_flag_e flags);\n\nint attrset2updatelist(lmgr_t *p_mgr, GString *str, const attr_set_t *p_set,\n                       table_enum table, attrset_op_flag_e flags);\n\nchar *compar2str(filter_comparator_t compar);\n\nint filter2str(lmgr_t *p_mgr, GString *str, const lmgr_filter_t *p_filter,\n               table_enum table, attrset_op_flag_e flags);\n\nint func_filter(lmgr_t *p_mgr, GString *filter_str,\n                const lmgr_filter_t *p_filter, table_enum table,\n                attrset_op_flag_e flags);\n\nstruct field_count {\n    unsigned int nb_main;\n    unsigned int nb_annex;\n    unsigned int nb_names;\n    unsigned int nb_stripe_info;\n    unsigned int nb_stripe_items;\n};\n\nint filter_where(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                 struct field_count *counts, GString *where,\n                 attrset_op_flag_e flags);\nvoid filter_from(lmgr_t *p_mgr, const struct field_count *counts, GString *from,\n                 table_enum *first_table, bool *select_distinct_id,\n                 attrset_op_flag_e flags);\n\n/* return the number of filter tables */\nstatic inline unsigned int nb_field_tables(const struct field_count *counts)\n{\n   return (counts->nb_main ? 1 : 0) + (counts->nb_annex ? 1 : 0)\n          + (counts->nb_stripe_items ? 1 : 0) + (counts->nb_names ? 1 : 0)\n          + (counts->nb_stripe_info ? 1 : 0);\n}\n\n\ntypedef enum {\n    FILTERDIR_NONE = 0,    /* no dir filter */\n    FILTERDIR_EMPTY,       /* empty dir filter */\n    FILTERDIR_OTHER,       /* other condition on directory attribute */\n} filter_dir_e;\n\nfilter_dir_e dir_filter(lmgr_t *p_mgr, GString *filter_str,\n                        const lmgr_filter_t *p_filter,\n                        unsigned int *dir_attr_index, const char *prefix);\n\nvoid append_size_range_fields(GString *str, bool leading_comma,\n                              const char *prefix);\n\nint result2attrset(table_enum table, char **result_tab, unsigned int res_count,\n                   attr_set_t *p_set );\n\n/* return the attr string for a dirattr */\nconst char *dirattr2str(unsigned int attr_index);\n\nvoid entry_id2pk(const entry_id_t *p_id, PK_PARG_T p_pk);\nint pk2entry_id( lmgr_t *p_mgr, PK_ARG_T pk, entry_id_t *p_id );\n\n/* those functions are used for begin/commit/rollback */\nint _lmgr_begin(lmgr_t *p_mgr, int behavior);\nvoid _lmgr_rollback(lmgr_t *p_mgr, int behavior);\nint _lmgr_commit(lmgr_t *p_mgr, int behavior);\nint _lmgr_flush_commit(lmgr_t *p_mgr, int behavior);\n\nstatic inline int lmgr_begin(lmgr_t *p_mgr)\n{\n    return _lmgr_begin(p_mgr, lmgr_config.commit_behavior);\n}\nstatic inline void lmgr_rollback(lmgr_t *p_mgr)\n{\n    _lmgr_rollback(p_mgr, lmgr_config.commit_behavior);\n}\nstatic inline int lmgr_commit(lmgr_t *p_mgr)\n{\n    return _lmgr_commit(p_mgr, lmgr_config.commit_behavior);\n}\n\n/* to be called before closing a connection */\nstatic inline int lmgr_flush_commit(lmgr_t *p_mgr)\n{\n    return _lmgr_flush_commit(p_mgr, lmgr_config.commit_behavior);\n}\n\n/** manage delayed retry of retryable errors\n * \\return != 0 if the transaction must be restarted\n */\n#define lmgr_delayed_retry(_l, _e) _lmgr_delayed_retry(_l, _e, __func__, \\\n                                                       __LINE__)\nint _lmgr_delayed_retry(lmgr_t *lmgr, int errcode, const char *func, int line);\n\n/* get/set variable in DB */\nint lmgr_get_var(db_conn_t *pconn, const char *varname, char *value,\n                 int bufsize);\nint lmgr_set_var(db_conn_t *pconn, const char *varname, const char *value);\n\nint fullpath_attr2db(const char *attr, char *db);\nvoid fullpath_db2attr(const char *db, char *attr);\n\nstatic inline attr_mask_t sum_masks(attr_set_t **p_attrs, unsigned int count,\n                                    attr_mask_t t_mask)\n{\n    attr_mask_t sum = {0};\n    unsigned int i;\n\n    for (i = 0; i < count; i++) {\n        attr_mask_t filtered = attr_mask_and(&p_attrs[i]->attr_mask, &t_mask);\n\n        sum = attr_mask_or(&sum, &filtered);\n    }\n    return sum;\n}\n\nvoid separated_db2list_inplace(char *list);\n\nstatic inline const char *field_name(unsigned int index)\n{\n    if (is_std_attr(index)) {\n        assert(index < ATTR_COUNT);\n        return field_infos[index].field_name;\n    }\n\n    if (is_status(index))\n        return get_sm_instance(attr2status_index(index))->db_field;\n\n    if (is_sm_info(index))\n        return sm_attr_info[attr2sminfo_index(index)].db_attr_name;\n\n    return NULL;\n}\n\nstatic inline db_type_e field_type(unsigned int index)\n{\n    if (is_std_attr(index)) { /* ensure index < ATTR_COUNT */\n        assert(index < ATTR_COUNT);\n        return field_infos[index].db_type;\n    }\n\n    if (is_status(index))\n        return DB_TEXT;\n\n    if (is_sm_info(index))\n        return sm_attr_info[attr2sminfo_index(index)].def->db_type;\n\n    RBH_BUG(\"Unexpected field type\");\n}\n\n/** helper to check empty filter */\nstatic inline bool no_filter(const lmgr_filter_t *p_filter)\n{\n    return p_filter == NULL\n           || (p_filter->filter_type == FILTER_SIMPLE\n               && p_filter->filter_simple.filter_count == 0)\n           || (p_filter->filter_type == FILTER_BOOLEXPR\n               && p_filter->filter_boolexpr == NULL);\n}\n\nbool match_table(table_enum t, unsigned int attr_index);\n\nint lmgr_table_count(db_conn_t *pconn, const char *table, uint64_t *count);\n\n#endif\n"
  },
  {
    "path": "src/list_mgr/listmgr_config.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include <errno.h>\n\n#define LMGR_CONFIG_BLOCK \"ListManager\"\n\n#define MYSQL_CONFIG_BLOCK \"MySQL\"\n#define SQLITE_CONFIG_BLOCK \"SQLite\"\n\n/* tag for logging */\n#define TAG \"LmgrConfig\"\n\n/** exported variable available for list_mgr internals */\nlmgr_config_t lmgr_config;\n\nstatic void lmgr_cfg_set_default(void *module_config)\n{\n    lmgr_config_t *conf = (lmgr_config_t *) module_config;\n\n    conf->commit_behavior = 1;  /* transaction */\n    conf->connect_retry_min = 1;\n    conf->connect_retry_max = 30;\n\n#ifdef _MYSQL\n    strcpy(conf->db_config.server, \"localhost\");\n    conf->db_config.db[0] = '\\0';\n    strcpy(conf->db_config.user, \"robinhood\");\n    conf->db_config.password[0] = '\\0';\n    conf->db_config.port = 0;\n    conf->db_config.socket[0] = '\\0';\n    strcpy(conf->db_config.engine, \"InnoDB\");\n\n    /* Depending on the MariaDB version, the TokuDB compression\n     * default can be either no compression or zlib compression. See\n     * https://mariadb.com/kb/en/mariadb/tokudb-differences. Lets pick\n     * no compression, as zlib compression appears to slow database\n     * inserts when used by robinhood. */\n    strcpy(conf->db_config.tokudb_compression, \"tokudb_uncompressed\");\n#elif defined(_SQLITE)\n    strcpy(conf->db_config.filepath, \"/var/robinhood/robinhood_sqlite_db\");\n    conf->db_config.retry_delay_microsec = 1000;    /* 1ms */\n#endif\n\n    conf->acct = true;\n}\n\nstatic void lmgr_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, LMGR_CONFIG_BLOCK, NULL);\n    print_line(output, 1, \"commit_behavior             : transaction\");\n    print_line(output, 1, \"connect_retry_interval_min  : 1s\");\n    print_line(output, 1, \"connect_retry_interval_max  : 30s\");\n    print_line(output, 1, \"accounting  : enabled\");\n    fprintf(output, \"\\n\");\n\n#ifdef _MYSQL\n    print_begin_block(output, 1, MYSQL_CONFIG_BLOCK, NULL);\n    print_line(output, 2, \"server  :   localhost\");\n    print_line(output, 2, \"db      :   [MANDATORY]\");\n    print_line(output, 2, \"user    :   robinhood\");\n    print_line(output, 2, \"password|password_file : [MANDATORY]\");\n    print_line(output, 2, \"port    :   (MySQL default)\");\n    print_line(output, 2, \"socket  :   NONE\");\n    print_line(output, 2, \"engine  :   InnoDB\");\n    print_end_block(output, 1);\n#elif defined(_SQLITE)\n    print_begin_block(output, 1, SQLITE_CONFIG_BLOCK, NULL);\n    print_line(output, 2,\n               \"db_file              :  \\\"/var/robinhood/robinhood_sqlite_db\\\"\");\n    print_line(output, 2, \"retry_delay_microsec :  1000 (1 millisec)\");\n    print_end_block(output, 1);\n#endif\n\n    print_end_block(output, 0);\n}\n\nstatic int lmgr_cfg_read(config_file_t config, void *module_config,\n                         char *msg_out)\n{\n    int rc;\n    bool bval;\n    lmgr_config_t *conf = (lmgr_config_t *) module_config;\n    char **options = NULL;\n    unsigned int nb_options = 0;\n    char tmpstr[1024];\n    config_item_t lmgr_block;\n    config_item_t db_block;\n\n    static const char *lmgr_allowed[] = {\n        \"commit_behavior\", \"connect_retry_interval_min\",\n        \"connect_retry_interval_max\", \"accounting\",\n        MYSQL_CONFIG_BLOCK, SQLITE_CONFIG_BLOCK,\n        \"user_acct\", \"group_acct\",  /* deprecated => accounting */\n        NULL\n    };\n\n    const cfg_param_t cfg_params[] = {\n        {\"connect_retry_interval_min\", PT_DURATION, PFLG_POSITIVE |\n         PFLG_NOT_NULL, &conf->connect_retry_min, 0},\n        {\"connect_retry_interval_max\", PT_DURATION, PFLG_POSITIVE |\n         PFLG_NOT_NULL, &conf->connect_retry_max, 0},\n        {\"accounting\", PT_BOOL, 0, &conf->acct, 0},\n        END_OF_PARAMS\n    };\n\n#ifdef _MYSQL\n    static const char *db_allowed[] = {\n        \"server\", \"db\", \"user\", \"password\", \"password_file\", \"port\", \"socket\",\n        \"engine\", \"tokudb_compression\", NULL\n    };\n\n    const cfg_param_t db_params[] = {\n        {\"server\", PT_STRING, PFLG_NO_WILDCARDS,\n         conf->db_config.server, sizeof(conf->db_config.server)}\n        ,\n        {\"db\", PT_STRING, PFLG_MANDATORY | PFLG_NO_WILDCARDS,\n         conf->db_config.db, sizeof(conf->db_config.db)}\n        ,\n        {\"user\", PT_STRING, PFLG_NO_WILDCARDS, conf->db_config.user,\n         sizeof(conf->db_config.user)}\n        ,\n        {\"port\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         (int *)&conf->db_config.port, 0},\n        {\"socket\", PT_STRING, PFLG_NO_WILDCARDS | PFLG_ABSOLUTE_PATH,\n         conf->db_config.socket, sizeof(conf->db_config.socket)}\n        ,\n        {\"engine\", PT_STRING, PFLG_NO_WILDCARDS | PFLG_NOT_EMPTY,\n         conf->db_config.engine, sizeof(conf->db_config.engine)}\n        ,\n        {\"tokudb_compression\", PT_STRING, PFLG_NO_WILDCARDS,\n         conf->db_config.tokudb_compression,\n         sizeof(conf->db_config.tokudb_compression)}\n        ,\n        END_OF_PARAMS\n    };\n#elif defined(_SQLITE)\n    static const char *db_allowed[] = {\n        \"db_file\", \"retry_delay_microsec\",\n        NULL\n    };\n    const cfg_param_t db_params[] = {\n        {\"db_file\", PT_STRING, PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS,\n         conf->db_config.filepath, sizeof(conf->db_config.filepath)}\n        ,\n        {\"retry_delay_microsec\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         (int *)&conf->db_config.retry_delay_microsec, 0},\n        END_OF_PARAMS\n    };\n#endif\n\n    /* get ListManager block */\n    rc = get_cfg_block(config, LMGR_CONFIG_BLOCK, &lmgr_block, msg_out);\n    if (rc)\n        return rc;\n\n    /* retrieve std parameters */\n    rc = read_scalar_params(lmgr_block, LMGR_CONFIG_BLOCK, cfg_params, msg_out);\n    if (rc)\n        return rc;\n\n    /* commit_behavior */\n    rc = GetStringParam(lmgr_block, LMGR_CONFIG_BLOCK, \"commit_behavior\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr), &options,\n                        &nb_options, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc != ENOENT) {\n        if (!strcasecmp(tmpstr, \"autocommit\"))\n            conf->commit_behavior = 0;\n        else if (!strcasecmp(tmpstr, \"transaction\"))\n            conf->commit_behavior = 1;\n        else if (!strcasecmp(tmpstr, \"periodic\")) {\n            if ((nb_options != 1) || !options || !options[0]) {\n                strcpy(msg_out,\n                       \"A single argument is expected for periodic commit behavior. Eg: commit_behavior = periodic(1000)\");\n                return EINVAL;\n            }\n\n            conf->commit_behavior = atoi(options[0]);\n            if (conf->commit_behavior == 0) {\n                strcpy(msg_out,\n                       \"The argument for \\\"\" LMGR_CONFIG_BLOCK\n                       \"::commit_behavior = periodic\\\" must be a positive integer. Eg: commit_behavior = periodic(1000)\");\n                return EINVAL;\n            }\n        } else {\n            sprintf(msg_out,\n                    \"Invalid commit behavior '%s' (expected: autocommit, \"\n                    \"transaction, periodic(<count>))\", tmpstr);\n            return EINVAL;\n        }\n    }\n\n    /* manage deprecated parameters */\n    rc = GetBoolParam(lmgr_block, LMGR_CONFIG_BLOCK, \"user_acct\", 0, &bval,\n                      NULL, NULL, msg_out);\n    if (rc == 0) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"WARNING: parameter %s::%s' is deprecated. Specify 'accounting = yes/no' instead.\",\n                   LMGR_CONFIG_BLOCK, \"user_acct\");\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Setting 'accounting = %s' for compatibility.\",\n                   bool2str(bval));\n        conf->acct = bval;\n    }\n\n    rc = GetBoolParam(lmgr_block, LMGR_CONFIG_BLOCK, \"group_acct\", 0, &bval,\n                      NULL, NULL, msg_out);\n    if (rc == 0) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"WARNING: parameter %s::%s' is deprecated. Specify 'accounting = yes/no' instead.\",\n                   LMGR_CONFIG_BLOCK, \"group_acct\");\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Setting 'accounting = %s' for compatibility.\",\n                   bool2str(bval));\n        conf->acct = bval;\n    }\n\n    CheckUnknownParameters(lmgr_block, LMGR_CONFIG_BLOCK, lmgr_allowed);\n\n    /* Database parameters */\n#ifdef _MYSQL\n    /* get MySQL block */\n    rc = get_cfg_block(config, LMGR_CONFIG_BLOCK \"::\" MYSQL_CONFIG_BLOCK,\n                       &db_block, msg_out);\n    if (rc)\n        return rc;\n\n    /* DB std params */\n    rc = read_scalar_params(db_block, MYSQL_CONFIG_BLOCK, db_params, msg_out);\n    if (rc)\n        return rc;\n\n    /* DB params with specific type */\n    rc = GetStringParam(db_block, MYSQL_CONFIG_BLOCK, \"password\",\n                        0, conf->db_config.password, 256, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    else if (rc == ENOENT) {\n        FILE *passfile;\n        char errstr[1024];\n\n        rc = GetStringParam(db_block, MYSQL_CONFIG_BLOCK,\n                            \"password_file\",\n                            PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS,\n                            tmpstr, sizeof(tmpstr), NULL, NULL, msg_out);\n        if ((rc != 0) && (rc != ENOENT))\n            return rc;\n        else if (rc == ENOENT) {\n            strcpy(msg_out,\n                   MYSQL_CONFIG_BLOCK \"::password or \"\n                   MYSQL_CONFIG_BLOCK \"::password_file must be provided\");\n            return ENOENT;\n        }\n\n        /* read password file and @TODO check its rights */\n        passfile = fopen(tmpstr, \"r\");\n        if (!passfile) {\n            rc = errno;\n            sprintf(msg_out, \"Error opening password file %s : %s\", tmpstr,\n                    strerror(errno));\n            return rc;\n        }\n        rc = fscanf(passfile, \"%1023s\", tmpstr);\n        if (ferror(passfile) || rc < 1) {\n            rc = errno;\n            if (strerror_r(rc, errstr, sizeof(errstr))) {\n                snprintf(errstr, sizeof(errstr), \"%d\", rc);\n            }\n            sprintf(msg_out, \"Error reading password file %s : %s\", tmpstr,\n                    errstr);\n            return rc;\n        }\n        fclose(passfile);\n        rh_strncpy(conf->db_config.password, tmpstr, 256);\n    }\n\n    CheckUnknownParameters(db_block, MYSQL_CONFIG_BLOCK, db_allowed);\n\n#elif defined(_SQLITE)\n    /* get SQLite block */\n    rc = get_cfg_block(config, LMGR_CONFIG_BLOCK \"::\" SQLITE_CONFIG_BLOCK,\n                       &db_block, msg_out);\n    if (rc)\n        return rc;\n\n    rc = read_scalar_params(db_block, SQLITE_CONFIG_BLOCK, db_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(db_block, SQLITE_CONFIG_BLOCK, db_allowed);\n#endif\n\n    return 0;\n}\n\nstatic int lmgr_cfg_reload(lmgr_config_t *conf)\n{\n    if (conf->commit_behavior != lmgr_config.commit_behavior)\n        DisplayLog(LVL_MAJOR, TAG,\n                   LMGR_CONFIG_BLOCK\n                   \"::commit_behavior changed in config file, but cannot be modified dynamically\");\n\n    if (conf->acct != lmgr_config.acct)\n        DisplayLog(LVL_MAJOR, TAG,\n                   LMGR_CONFIG_BLOCK\n                   \"::accounting changed in config file, but cannot be modified dynamically\");\n\n    if (conf->connect_retry_min != lmgr_config.connect_retry_min) {\n        DisplayLog(LVL_EVENT, TAG,\n                   LMGR_CONFIG_BLOCK\n                   \"::connect_retry_interval_min updated: %ld->%ld\",\n                   lmgr_config.connect_retry_min, conf->connect_retry_min);\n        lmgr_config.connect_retry_min = conf->connect_retry_min;\n    }\n\n    if (conf->connect_retry_max != lmgr_config.connect_retry_max) {\n        DisplayLog(LVL_EVENT, TAG,\n                   LMGR_CONFIG_BLOCK\n                   \"::connect_retry_interval_max updated: %ld->%ld\",\n                   lmgr_config.connect_retry_max, conf->connect_retry_max);\n        lmgr_config.connect_retry_max = conf->connect_retry_max;\n    }\n#ifdef _MYSQL\n\n    if (strcmp(conf->db_config.server, lmgr_config.db_config.server))\n        DisplayLog(LVL_MAJOR, TAG,\n                   MYSQL_CONFIG_BLOCK\n                   \"::server changed in config file, but cannot be modified dynamically\");\n    if (strcmp(conf->db_config.db, lmgr_config.db_config.db))\n        DisplayLog(LVL_MAJOR, TAG,\n                   MYSQL_CONFIG_BLOCK\n                   \"::db changed in config file, but cannot be modified dynamically\");\n    if (strcmp(conf->db_config.user, lmgr_config.db_config.user))\n        DisplayLog(LVL_MAJOR, TAG,\n                   MYSQL_CONFIG_BLOCK\n                   \"::user changed in config file, but cannot be modified dynamically\");\n    if (strcmp(conf->db_config.password, lmgr_config.db_config.password))\n        DisplayLog(LVL_MAJOR, TAG,\n                   MYSQL_CONFIG_BLOCK\n                   \"::password changed in config file, but cannot be modified dynamically\");\n#elif defined(_SQLITE)\n    if (strcmp(conf->db_config.filepath, lmgr_config.db_config.filepath))\n        DisplayLog(LVL_MAJOR, TAG,\n                   SQLITE_CONFIG_BLOCK\n                   \"::db_file changed in config file, but cannot be modified dynamically\");\n\n    if (conf->db_config.retry_delay_microsec !=\n        lmgr_config.db_config.retry_delay_microsec) {\n        DisplayLog(LVL_EVENT, TAG,\n                   SQLITE_CONFIG_BLOCK \"::retry_delay_microsec updated: %u->%u\",\n                   lmgr_config.db_config.retry_delay_microsec,\n                   conf->db_config.retry_delay_microsec);\n        lmgr_config.db_config.retry_delay_microsec =\n            conf->db_config.retry_delay_microsec;\n    }\n#endif\n\n    return 0;\n}\n\nstatic int lmgr_cfg_set(void *cfg, bool reload)\n{\n    lmgr_config_t *conf = (lmgr_config_t *) cfg;\n\n    if (reload)\n        return lmgr_cfg_reload(conf);\n\n    lmgr_config = *conf;\n    return 0;\n}\n\nstatic void lmgr_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, LMGR_CONFIG_BLOCK, NULL);\n\n    print_line(output, 1, \"# Method for committing information to database.\");\n    print_line(output, 1, \"# Possible values are:\");\n    print_line(output, 1,\n               \"# - \\\"autocommit\\\": weak transactions (more efficient, but database inconsistencies may occur)\");\n    print_line(output, 1,\n               \"# - \\\"transaction\\\": manage operations in transactions (best consistency, lower performance)\");\n    print_line(output, 1,\n               \"# - \\\"periodic(<nb_transaction>)\\\": periodically commit (every <n> transactions).\");\n    print_line(output, 1, \"commit_behavior = transaction ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# Minimum time (in seconds) to wait before trying to reestablish a lost connection.\");\n    print_line(output, 1,\n               \"# Then this time is multiplied by 2 until reaching connect_retry_interval_max\");\n    print_line(output, 1, \"connect_retry_interval_min = 1 ;\");\n    print_line(output, 1, \"connect_retry_interval_max = 30 ;\");\n\n    print_line(output, 1,\n               \"# disable the following options if you are not interested in\");\n    print_line(output, 1, \"# user or group stats (to speed up scan)\");\n    print_line(output, 1, \"accounting  = enabled ;\");\n    fprintf(output, \"\\n\");\n#ifdef _MYSQL\n    print_begin_block(output, 1, MYSQL_CONFIG_BLOCK, NULL);\n    print_line(output, 2, \"server = \\\"localhost\\\" ;\");\n    print_line(output, 2, \"db     = \\\"robinhood_db\\\" ;\");\n    print_line(output, 2, \"user   = \\\"robinhood\\\" ;\");\n    print_line(output, 2, \"password_file = \\\"/etc/robinhood.d/.dbpassword\\\" ;\");\n    print_line(output, 2, \"# port   = 3306 ;\");\n    print_line(output, 2, \"# socket = \\\"/tmp/mysql.sock\\\" ;\");\n    print_line(output, 2, \"engine = InnoDB ;\");\n    print_end_block(output, 1);\n#elif defined(_SQLITE)\n    print_begin_block(output, 1, SQLITE_CONFIG_BLOCK, NULL);\n    print_line(output, 2, \"db_file = \\\"/var/robinhood/robinhood_sqlite_db\\\" ;\");\n    print_line(output, 2, \"retry_delay_microsec = 1000 ;\");\n    print_end_block(output, 1);\n#endif\n\n    print_end_block(output, 0);\n}\n\nstatic void *lmgr_cfg_new(void)\n{\n    return calloc(1, sizeof(lmgr_config_t));\n}\n\nstatic void lmgr_cfg_free(void *cfg)\n{\n    if (cfg != NULL)\n        free(cfg);\n}\n\nmod_cfg_funcs_t lmgr_cfg_hdlr = {\n    .module_name = \"list manager\",\n    .new = lmgr_cfg_new,\n    .free = lmgr_cfg_free,\n    .set_default = lmgr_cfg_set_default,\n    .read = lmgr_cfg_read,\n    .set_config = lmgr_cfg_set,\n    .write_default = lmgr_cfg_write_default,\n    .write_template = lmgr_cfg_write_template\n};\n\nbool lmgr_parallel_batches(void)\n{\n    return !lmgr_config.acct;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_filters.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"listmgr_common.h\"\n#include <stdlib.h>\n\n#define FILTER_PREALLOC_INIT 2\n\nint lmgr_simple_filter_init(lmgr_filter_t *p_filter)\n{\n    lmgr_simple_filter_t *sf = &p_filter->filter_simple;\n\n    p_filter->filter_type = FILTER_SIMPLE;\n\n    sf->filter_count = 0;\n    sf->filter_flags = MemCalloc(FILTER_PREALLOC_INIT, sizeof(int));\n    sf->filter_index = MemCalloc(FILTER_PREALLOC_INIT, sizeof(unsigned int));\n    sf->filter_compar = MemCalloc(FILTER_PREALLOC_INIT,\n                                  sizeof(filter_comparator_t));\n    sf->filter_value = MemCalloc(FILTER_PREALLOC_INIT, sizeof(filter_value_t));\n\n    if (sf->filter_flags == NULL || sf->filter_index == NULL\n        || sf->filter_compar == NULL || sf->filter_value == NULL)\n        return DB_NO_MEMORY;\n\n    sf->prealloc = FILTER_PREALLOC_INIT;\n    return 0;\n}\n\nstatic int convert_regexp(const char *in_string, char *db_string)\n{\n    char *p_wild;\n\n    strcpy(db_string, in_string);\n\n    /* replace classes [] with _ */\n\n    while ((p_wild = strchr(db_string, '[')) != NULL) {\n        char *p_end = strchr(p_wild, ']');\n\n        if (p_end == NULL) {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Error unmatched '[' in regexp '%s'.\", in_string);\n            return DB_INVALID_ARG;\n        }\n        /* copy from character after ']' to the caracter after '['\n         * and replace '[' with '_'\n         */\n        strcpy(p_wild + 1, p_end + 1);\n        *p_wild = '_';\n    }\n\n    while ((p_wild = strchr(db_string, '*')) != NULL)\n        *p_wild = '%';\n\n    while ((p_wild = strchr(db_string, '?')) != NULL)\n        *p_wild = '_';\n\n    return 0;\n}\n\nstatic int lmgr_simple_filter_dup_buffers(lmgr_filter_t *p_filter,\n                                          unsigned int index)\n{\n    lmgr_simple_filter_t *sf = &p_filter->filter_simple;\n    filter_comparator_t comparator = sf->filter_compar[index];\n    filter_value_t *p_value = &sf->filter_value[index];\n    int flag = sf->filter_flags[index];\n\n    /* @TODO support lists of strings (with both FILTER_FLAG_ALLOC_STR\n     * and FILTER_FLAG_ALLOC_LIST */\n\n    if ((comparator == LIKE) || (comparator == UNLIKE) || (comparator == RLIKE)\n        || (comparator == ILIKE) || (comparator == IUNLIKE)) {\n        int rc;\n        char *newstr = MemAlloc(strlen(p_value->value.val_str) + 1);\n\n        if (comparator != RLIKE) {\n            /* value is a perl regexp, don't convert it */\n            rc = convert_regexp(p_value->value.val_str, newstr);\n            if (rc) {\n                MemFree(newstr);\n                return rc;\n            }\n        } else\n            strcpy(newstr, p_value->value.val_str);\n\n        /* free the previous string */\n        if (flag & FILTER_FLAG_ALLOC_STR)\n            MemFree((char *)p_value->value.val_str);\n\n        /* mark the new string as releasable */\n        sf->filter_flags[index] |= FILTER_FLAG_ALLOC_STR;\n        sf->filter_value[index].value.val_str = newstr;\n    } else if ((comparator == IN) || (comparator == NOTIN)) {\n        /* allocate and copy the list */\n        db_type_u *values =\n            (db_type_u *) MemAlloc(p_value->list.count * sizeof(db_type_u));\n        memcpy(values, p_value->list.values,\n               p_value->list.count * sizeof(db_type_u));\n\n        /* free the previous list */\n        if (flag & FILTER_FLAG_ALLOC_LIST)\n            MemFree((char *)p_value->list.values);\n\n        sf->filter_flags[index] |= FILTER_FLAG_ALLOC_LIST;\n        sf->filter_value[index].list.values = values;\n    }\n\n    return 0;\n}\n\nstatic void lmgr_simple_filter_free_buffers(lmgr_filter_t *p_filter,\n                                            unsigned int index)\n{\n    lmgr_simple_filter_t *sf = &p_filter->filter_simple;\n\n    /* @TODO support lists of strings (with both FILTER_FLAG_ALLOC_STR\n       and FILTER_FLAG_ALLOC_LIST */\n\n    /* check if previous value must be released */\n    if ((sf->filter_flags[index] & FILTER_FLAG_ALLOC_STR)\n        && (sf->filter_value[index].value.val_str != NULL)) {\n        MemFree((char *)sf->filter_value[index].value.val_str);\n    } else if ((sf->filter_flags[index] & FILTER_FLAG_ALLOC_LIST)\n            && (sf->filter_value[index].list.values != NULL)) {\n        MemFree((char *)sf->filter_value[index].list.values);\n    }\n}\n\nint lmgr_simple_filter_add(lmgr_filter_t *p_filter, unsigned int attr_index,\n                           filter_comparator_t comparator, filter_value_t value,\n                           enum filter_flags flag)\n{\n    int rc;\n    lmgr_simple_filter_t *sf = &p_filter->filter_simple;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n\n    if (sf->filter_count >= sf->prealloc) {\n        /* double the size of the buffers */\n        sf->prealloc *= 2;\n        sf->filter_flags = MemRealloc(sf->filter_flags,\n                                      sf->prealloc * sizeof(int));\n        sf->filter_index = MemRealloc(sf->filter_index,\n                                      sf->prealloc * sizeof(unsigned int));\n        sf->filter_compar = MemRealloc(sf->filter_compar,\n                                   sf->prealloc * sizeof(filter_comparator_t));\n        sf->filter_value = MemRealloc(sf->filter_value,\n                                   sf->prealloc * sizeof(filter_value_t));\n\n        if (sf->filter_flags == NULL || sf->filter_index == NULL\n            || sf->filter_compar == NULL || sf->filter_value == NULL)\n            return DB_NO_MEMORY;\n    }\n\n    sf->filter_flags[sf->filter_count] = flag;\n    sf->filter_index[sf->filter_count] = attr_index;\n    sf->filter_compar[sf->filter_count] = comparator;\n    sf->filter_value[sf->filter_count] = value;\n\n    /* duplicate and copy buffers if needed */\n    rc = lmgr_simple_filter_dup_buffers(p_filter, sf->filter_count);\n    if (rc)\n        return rc;\n\n    sf->filter_count++;\n\n    return 0;\n}\n\n/* check if the given attribute is part of a filter */\nint lmgr_filter_check_field(const lmgr_filter_t *p_filter,\n                            unsigned int attr_index)\n{\n    unsigned int i;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n\n    /* first check if there is already a filter on this argument */\n    for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n        if (p_filter->filter_simple.filter_index[i] == attr_index)\n            return 1;\n    }\n    return 0;\n}\n\nint lmgr_simple_filter_add_or_replace(lmgr_filter_t *p_filter,\n                                      unsigned int attr_index,\n                                      filter_comparator_t comparator,\n                                      filter_value_t value,\n                                      enum filter_flags flag)\n{\n    unsigned int i;\n    int rc;\n    lmgr_simple_filter_t *sf;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n    sf = &p_filter->filter_simple;\n\n    /* first check if there is already a filter on this argument */\n    for (i = 0; i < sf->filter_count; i++) {\n        if (sf->filter_index[i] != attr_index)\n            continue;\n\n        int syntax_flags = sf->filter_flags[i]\n            & (FILTER_FLAG_BEGIN | FILTER_FLAG_END | FILTER_FLAG_OR);\n\n        /* check if previous value must be released */\n        lmgr_simple_filter_free_buffers(p_filter, i);\n\n        /* ensure parenthesing and 'OR' keywords are conserved */\n        sf->filter_flags[i] = flag | syntax_flags;\n        sf->filter_compar[i] = comparator;\n        sf->filter_value[i] = value;\n\n        /* duplicate and copy buffers if needed */\n        rc = lmgr_simple_filter_dup_buffers(p_filter, i);\n        if (rc)\n            return rc;\n\n        return 0;\n    }\n\n    /* not found: add it */\n    return lmgr_simple_filter_add(p_filter, attr_index, comparator, value,\n                                  flag);\n}\n\nint lmgr_simple_filter_add_if_not_exist(lmgr_filter_t *p_filter,\n                                        unsigned int attr_index,\n                                        filter_comparator_t comparator,\n                                        filter_value_t value,\n                                        enum filter_flags flag)\n{\n    unsigned int i;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n\n    /* first check if there is already a filter on this argument */\n    for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n        if (p_filter->filter_simple.filter_index[i] == attr_index) {\n            return DB_ALREADY_EXISTS;\n        }\n    }\n\n    /* not found: add it */\n    return lmgr_simple_filter_add(p_filter, attr_index, comparator, value,\n                                  flag);\n\n}\n\nint lmgr_simple_filter_free(lmgr_filter_t *p_filter)\n{\n    int i;\n    lmgr_simple_filter_t *sf;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n    sf = &p_filter->filter_simple;\n\n    /* free the values that must be released */\n    for (i = 0; i < sf->filter_count; i++)\n        lmgr_simple_filter_free_buffers(p_filter, i);\n\n    if (sf->filter_flags)\n        MemFree(sf->filter_flags);\n    if (sf->filter_index)\n        MemFree(sf->filter_index);\n    if (sf->filter_compar)\n        MemFree(sf->filter_compar);\n    if (sf->filter_value)\n        MemFree(sf->filter_value);\n    memset(p_filter, 0, sizeof(lmgr_filter_t));\n    return 0;\n}\n\n/* Add begin or end block. */\nint lmgr_simple_filter_add_block(lmgr_filter_t *p_filter,\n                                  enum filter_flags flag)\n{\n    filter_value_t val;\n    memset(&val, 0, sizeof(filter_value_t));\n\n    return lmgr_simple_filter_add(p_filter, 0, 0, val, flag);\n}\n\n\n/* is it a simple 'AND' expression ? */\nstatic bool is_simple_expr(bool_node_t *boolexpr, int depth, bool_op_t op_ctx)\n{\n    switch (boolexpr->node_type) {\n    case NODE_UNARY_EXPR:\n        if (boolexpr->content_u.bool_expr.bool_op != BOOL_NOT) {\n            /* Error */\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Invalid unary operator %d in %s()\",\n                       boolexpr->content_u.bool_expr.bool_op, __FUNCTION__);\n            return false;\n        }\n        /* only accept 'NOT condition', but reject 'NOT (cond AND cond)' */\n        return (boolexpr->content_u.bool_expr.expr1->node_type ==\n                NODE_CONDITION);\n\n    case NODE_BINARY_EXPR:\n        if (depth > 2) {\n            DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                       \"Too many levels of nested parenthesis in expression \"\n                       \"(%d levels). Consider simplifying it.\", depth);\n            return false;\n        }\n        if (boolexpr->content_u.bool_expr.bool_op != BOOL_AND\n                 && boolexpr->content_u.bool_expr.bool_op != BOOL_OR)\n            return false;\n\n        /* bool operation context unchanged? */\n        if (boolexpr->content_u.bool_expr.bool_op == op_ctx)\n            return (is_simple_expr\n                    (boolexpr->content_u.bool_expr.expr1, depth, op_ctx)\n                    && is_simple_expr(boolexpr->content_u.bool_expr.expr2,\n                                      depth, op_ctx));\n        else\n            return (is_simple_expr\n                    (boolexpr->content_u.bool_expr.expr1, depth + 1,\n                     boolexpr->content_u.bool_expr.bool_op)\n                    && is_simple_expr(boolexpr->content_u.bool_expr.expr2,\n                                      depth + 1,\n                                      boolexpr->content_u.bool_expr.bool_op));\n\n    case NODE_CONDITION:\n        /* If attribute is in DB, it can be filtered\n         * If attribute is not in DB, we ignore it and get all entries\n         *  (~ AND true)\n         */\n        return true;\n\n    case NODE_CONSTANT:\n        return true;\n\n    default:\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Invalid boolean expression in %s()\",\n                   __FUNCTION__);\n        return false;\n    }\n}\n\nstatic bool allow_null(unsigned int attr_index,\n                       const filter_comparator_t *comp,\n                       const filter_value_t *val)\n{\n    /* don't add 'OR IS NULL' if NULL is explicitely matched */\n    if (*comp == ISNULL || *comp == NOTNULL)\n        return false;\n\n    /* allow NULL for strings if matching is:\n     * x != 'non empty val' (or x not like 'non empty')\n     * x == '' (or x like '')\n     * DON't allow NULL string if matching is:\n     * x == 'non empty'  (or x like 'non empty')\n     * x != '' (or x not like '')\n     */\n    if (field_type(attr_index) == DB_TEXT ||\n        field_type(attr_index) == DB_ENUM_FTYPE) {\n        if (*comp == EQUAL || *comp == LIKE || *comp == ILIKE)\n            /* allow NULL if matching against empty string */\n            return (val->value.val_str == NULL\n                    || EMPTY_STRING(val->value.val_str));\n        else if (*comp == NOTEQUAL || *comp == UNLIKE || *comp == IUNLIKE)\n            /* allow NULL if matching != non-empty string */\n            return !(val->value.val_str == NULL\n                     || EMPTY_STRING(val->value.val_str));\n        else {\n            /* unexpected case */\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Warning: unhandled case in %s(), line %u\", __func__,\n                       __LINE__);\n        }\n    }\n    return true;    /* allow, by default */\n}\n\nbool cond2sql_ok(bool_node_t *boolexpr,\n                      const sm_instance_t *smi,\n                      const time_modifier_t *time_mod)\n{\n    unsigned int index = ATTR_INDEX_FLG_UNSPEC;\n    int rc;\n    filter_comparator_t comp;\n    filter_value_t val;\n    bool must_free;\n    attr_mask_t tmp;\n\n    switch (boolexpr->node_type) {\n    case NODE_UNARY_EXPR:\n        return cond2sql_ok(boolexpr->content_u.bool_expr.expr1,\n                                smi, time_mod);\n        break;\n    case NODE_CONDITION:\n        rc = criteria2filter(boolexpr->content_u.condition,\n                             &index, &comp, &val, &must_free, smi, time_mod);\n\n        if (rc != 0 || (index & ATTR_INDEX_FLG_UNSPEC))\n            /* do nothing (equivalent to 'AND TRUE') */\n            return false;\n\n        // free allocated memory\n        if (must_free)\n            MemFree((char *)val.value.val_str);\n\n        /* test generated fields */\n        tmp = null_mask;\n        attr_mask_set_index(&tmp, index);\n\n        if (generated_fields(tmp) || dirattr_fields(tmp)\n            || funcattr_fields(tmp))\n            return false;\n        else\n            return true;\n        break;\n    case NODE_BINARY_EXPR:\n        return cond2sql_ok(boolexpr->content_u.bool_expr.expr1,\n                                 smi, time_mod)\n                || cond2sql_ok(boolexpr->content_u.bool_expr.expr2,\n                                    smi, time_mod);\n        break;\n    case NODE_CONSTANT:\n        return false;\n        break;\n    }\n\n    // Should never arrive here\n    RBH_BUG(\"DB condition verification, found unknown case\");\n    return false;\n}\n\n/* Extract simple pieces of expressions and append them to filter.\n * The resulting filter is expected to return a larger set than the actual\n * condition.\n * Ignore conflicting criteria.\n * \\param expr_flag indicate if BEGIN/END parenthesing is needed\n * \\param depth indicate the current parenthesing depth\n * \\param op_ctx indicate the current operation context:\n *      e.g. AND for 'x and y and z'\n */\nstatic int append_simple_expr(bool_node_t *boolexpr, lmgr_filter_t *filter,\n                              const sm_instance_t *smi,\n                              const time_modifier_t *time_mod, int expr_flag,\n                              int depth, bool_op_t op_ctx)\n{\n    int rc, new_depth;\n    unsigned int index = ATTR_INDEX_FLG_UNSPEC;\n    int flag = 0;\n    filter_comparator_t comp;\n    filter_value_t val;\n    bool must_free;\n    /* keep original filter count to reset it on error */\n    int count_orig = filter->filter_simple.filter_count;\n\n    if (depth > 2) {\n        DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                   \"Too many levels of nested parenthesis in expression \"\n                   \"(%d levels). Consider simplifying it.\", depth);\n        return DB_INVALID_ARG;\n    }\n\n    switch (boolexpr->node_type) {\n    case NODE_UNARY_EXPR:\n        if (boolexpr->content_u.bool_expr.bool_op != BOOL_NOT) {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Invalid unary operator %d in %s()\",\n                       boolexpr->content_u.bool_expr.bool_op, __FUNCTION__);\n            return DB_INVALID_ARG;\n        }\n\n        return append_simple_expr(boolexpr->content_u.bool_expr.expr1,\n                                  filter, smi, time_mod,\n                                  expr_flag | FILTER_FLAG_NOT_BEGIN\n                                  | FILTER_FLAG_NOT_END, depth + 1, op_ctx);\n\n    case NODE_CONDITION:\n        /* If attribute is in DB, it can be filtered\n         * If attribute is not in DB, we ignore it and get all entries\n         * (~ AND TRUE)\n         */\n        // Return if this condition can't be translated to SQL statement\n        if (!cond2sql_ok(boolexpr, smi, time_mod))\n            return 0;\n\n        /* get info about condition */\n        rc = criteria2filter(boolexpr->content_u.condition,\n                             &index, &comp, &val, &must_free, smi, time_mod);\n\n        if ((expr_flag & FILTER_FLAG_ALLOW_NULL)\n            || (allow_null(index, &comp, &val)\n            /* Don't filter null value if we are in a negated block */\n                && !(expr_flag & (FILTER_FLAG_NOT_BEGIN|FILTER_FLAG_NOT_END))))\n            flag |= FILTER_FLAG_ALLOW_NULL;\n\n        if (must_free)\n            flag |= FILTER_FLAG_ALLOC_STR;\n\n        /* propagate parenthesing flag + OR (NOT?) */\n        flag |= (expr_flag & (FILTER_FLAG_BEGIN | FILTER_FLAG_END\n                              | FILTER_FLAG_NOT_BEGIN | FILTER_FLAG_OR\n                              | FILTER_FLAG_NOT_END));\n\n        /* @TODO support FILTER_FLAG_ALLOC_LIST */\n\n        /* add condition to filter */\n        DisplayLog(LVL_FULL, LISTMGR_TAG,\n                   \"Appending filter on \\\"%s\\\", flags=%#X\", field_name(index),\n                   flag);\n\n        return lmgr_simple_filter_add(filter, index, comp, val, flag);\n\n    case NODE_BINARY_EXPR:\n        {\n            int flag1, flag2;\n            bool dbcond1, dbcond2;\n            bool begin_end = false;\n\n            /* only AND/OR binary operators supported */\n            if (boolexpr->content_u.bool_expr.bool_op != BOOL_AND\n                     && boolexpr->content_u.bool_expr.bool_op != BOOL_OR)\n                return DB_INVALID_ARG;\n\n            flag1 = (op_ctx == BOOL_OR) ? FILTER_FLAG_OR : 0;\n            /* x OR y? */\n            flag2 = (boolexpr->content_u.bool_expr.bool_op == BOOL_OR) ?\n                FILTER_FLAG_OR : 0;\n            if (boolexpr->content_u.bool_expr.bool_op == op_ctx) {\n                new_depth = depth;\n                /* propagate BEGIN/END flags */\n                flag1 |= expr_flag & (FILTER_FLAG_BEGIN\n                                      | FILTER_FLAG_NOT_BEGIN);\n                flag2 |= expr_flag & (FILTER_FLAG_END | FILTER_FLAG_NOT_END);\n            } else {\n                new_depth = depth + 1;\n                /* new level of parenthesing */\n                /* propagate NOT_BEGIN/NOT_END flags */\n                flag1 |=\n                    FILTER_FLAG_BEGIN | (expr_flag & FILTER_FLAG_NOT_BEGIN);\n                flag2 |= FILTER_FLAG_END | (expr_flag & FILTER_FLAG_NOT_END);\n\n                // don't create a new block if parent already has one\n                if (!(expr_flag\n                      & (FILTER_FLAG_BEGIN_BLOCK | FILTER_FLAG_NOT_BEGIN))) {\n                    // Append begin node\n                    flag1 |= FILTER_FLAG_BEGIN_BLOCK;\n                    flag1 &= ~(FILTER_FLAG_BEGIN | FILTER_FLAG_NOT_BEGIN);\n                    flag2 &= ~(FILTER_FLAG_END | FILTER_FLAG_NOT_END);\n                    begin_end = true;\n                    switch (op_ctx) {\n                        case BOOL_OR:\n                            lmgr_simple_filter_add_block(filter, FILTER_FLAG_OR\n                                                     | FILTER_FLAG_BEGIN_BLOCK);\n                            break;\n                        case BOOL_AND:\n                            lmgr_simple_filter_add_block(filter,\n                                                     FILTER_FLAG_BEGIN_BLOCK);\n                            break;\n                        default:\n                             DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                                              \"BOOLEXPR unhandled case\");\n                             break;\n                    }\n                }\n            }\n\n            // check if expr1 and expr2 have valid DB filter\n            // if not, do something about it\n            dbcond1 = cond2sql_ok(boolexpr->content_u.bool_expr.expr1,\n                                       smi, time_mod);\n            dbcond2 = cond2sql_ok(boolexpr->content_u.bool_expr.expr2,\n                                       smi, time_mod);\n\n            if (dbcond1 && dbcond2) {\n                rc = append_simple_expr(boolexpr->content_u.bool_expr.expr1,\n                                        filter, smi, time_mod, flag1, new_depth,\n                                        boolexpr->content_u.bool_expr.bool_op);\n                if (rc) {\n                    filter->filter_simple.filter_count = count_orig;\n                    return rc;\n                }\n                rc = append_simple_expr(boolexpr->content_u.bool_expr.expr2,\n                                        filter, smi, time_mod, flag2, new_depth,\n                                        boolexpr->content_u.bool_expr.bool_op);\n                if (begin_end)\n                    lmgr_simple_filter_add_block(filter, FILTER_FLAG_END_BLOCK);\n\n            } else if (dbcond1 && !dbcond2)\n                rc = append_simple_expr(boolexpr->content_u.bool_expr.expr1,\n                                        filter, smi, time_mod,\n                                        flag1 | flag2, new_depth,\n                                        boolexpr->content_u.bool_expr.bool_op);\n            else if (!dbcond1 && dbcond2)\n                rc = append_simple_expr(boolexpr->content_u.bool_expr.expr2,\n                                        filter, smi, time_mod,\n                                        flag1 | flag2, new_depth,\n                                        boolexpr->content_u.bool_expr.bool_op);\n            else\n                // No valid conditions in exp1 and exp2\n                rc = 0;\n\n            if (rc)\n                filter->filter_simple.filter_count = count_orig;\n            return rc;\n        }\n\n    case NODE_CONSTANT:\n        if (boolexpr->content_u.constant) {\n            return 0;   /* 'and true' */\n        } else {\n            /* no sense, abort the query */\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Building DB request which is always false?!\");\n            return DB_INVALID_ARG;\n        }\n\n    default:\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Invalid boolean expression %#x in %s()\",\n                   boolexpr->node_type, __FUNCTION__);\n        return DB_INVALID_ARG;\n    }\n}\n\n/** Convert simple expressions to ListMgr filter (append filter) */\nint convert_boolexpr_to_simple_filter(bool_node_t *boolexpr,\n                                      lmgr_filter_t *filter,\n                                      const sm_instance_t *smi,\n                                      const time_modifier_t *time_mod,\n                                      enum filter_flags flags,\n                                      bool_op_t op_ctx)\n{\n    int rc;\n\n    if (!is_simple_expr(boolexpr, 0, op_ctx))\n        return DB_INVALID_ARG;\n\n    /* create a boolexpr as 'NOT ( <expr> )' */\n    if (flags & FILTER_FLAG_NOT) {\n        bool_node_t notexpr;\n        int prev_nb;\n\n        notexpr.node_type = NODE_UNARY_EXPR;\n        notexpr.content_u.bool_expr.bool_op = BOOL_NOT;\n        notexpr.content_u.bool_expr.expr1 = boolexpr;\n        notexpr.content_u.bool_expr.owner = 0;\n\n        /* add all or nothing => save filter count before */\n        prev_nb = filter->filter_simple.filter_count;\n\n        /* default filter context is op_ctx */\n        rc = append_simple_expr(&notexpr, filter, smi, time_mod,\n                                flags & ~FILTER_FLAG_NOT, 0, op_ctx);\n        if (rc)\n            filter->filter_simple.filter_count = prev_nb;\n\n        return rc;\n    }\n\n    /* default filter context is op_ctx */\n    rc = append_simple_expr(boolexpr, filter, smi, time_mod, flags,\n                              0, op_ctx);\n    return rc;\n}\n\n/** Set a complex filter structure */\nint lmgr_set_filter_expression(lmgr_filter_t *p_filter,\n                               struct bool_node_t *boolexpr)\n{\n    p_filter->filter_type = FILTER_BOOLEXPR;\n    p_filter->filter_u.boolean_expr = boolexpr;\n    return 0;\n}\n\n/** Check that all fields in filter are in the given mask of supported\n *  attributes\n * @param index if not NULL, it is set to the index of the unsupported filter.\n *              and -1 for other errors.\n */\nint lmgr_check_filter_fields(lmgr_filter_t *p_filter, attr_mask_t attr_mask,\n                             int *index)\n{\n    int i;\n\n    if (index)\n        *index = -1;\n\n    if (p_filter->filter_type != FILTER_SIMPLE)\n        return DB_INVALID_ARG;\n\n    for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n        /* Skip begin/end blocks as they are not related to an\n         * attribute */\n        if (p_filter->filter_simple.filter_flags[i] &\n            (FILTER_FLAG_BEGIN_BLOCK | FILTER_FLAG_END_BLOCK))\n            continue;\n\n        if (!attr_mask_test_index(&attr_mask,\n                                  p_filter->filter_simple.filter_index[i])) {\n            if (index)\n                *index = i;\n            return DB_NOT_SUPPORTED;\n        }\n    }\n\n    return DB_SUCCESS;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_get.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_internal.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"database.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n\n\nint ListMgr_Exists(lmgr_t *p_mgr, const entry_id_t *p_id)\n{\n    GString        *req;\n    int             rc;\n    result_handle_t result;\n    char           *str_count = NULL;\n    DEF_PK(pk);\n    int             retry_status;\n\n    /* retrieve primary key */\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    /* verify it exists in main table */\n    req = g_string_new(\"SELECT id FROM \" MAIN_TABLE \" WHERE id=\");\n    g_string_append_printf(req, DPK, pk);\n\nretry:\n    /* execute the request (must return negative value on error) */\n    rc = -db_exec_sql(&p_mgr->conn, req->str, &result);\n    retry_status = lmgr_delayed_retry(p_mgr, -rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2) {\n        rc = -DB_RBH_SIG_SHUTDOWN;\n        goto free_str;\n    } else if (rc)\n        goto free_str;\n\n    rc = db_next_record(&p_mgr->conn, &result, &str_count, 1);\n    if (rc == 0)\n        rc = 1; /* return 1 if entry exists */\n    else if (rc != DB_END_OF_LIST)\n    {\n        retry_status = lmgr_delayed_retry(p_mgr, -rc);\n        if (retry_status == 1)\n            goto retry;\n        else if (retry_status == 2) {\n            rc = -DB_RBH_SIG_SHUTDOWN;\n            goto free_result;\n        }\n    }\n    else\n        rc = 0;\n\nfree_result:\n    db_result_free(&p_mgr->conn, &result);\n\nfree_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** retrieve directory attributes (nbr of entries, avg size of entries)*/\nint listmgr_get_dirattrs( lmgr_t * p_mgr, PK_ARG_T dir_pk, attr_set_t * p_attrs )\n{\n    GString         *req;\n    result_handle_t  result;\n    char            *str_info[1];\n    int              rc = 0;\n    int              tmp_val;\n    long long        tmp_long;\n\n    if (ATTR_MASK_TEST(p_attrs, type) &&\n        (strcmp(ATTR(p_attrs, type), STR_TYPE_DIR) != 0))\n    {\n        DisplayLog(LVL_FULL, LISTMGR_TAG,\n                   \"Type='%s' != 'dir' => unsetting dirattrs in attr mask\",\n                   ATTR(p_attrs, type));\n        p_attrs->attr_mask = attr_mask_and_not(&p_attrs->attr_mask, &dir_attr_set);\n        return 0;\n    }\n\n    req = g_string_new(NULL);\n\n    /* get child entry count from DNAMES_TABLE */\n    if (ATTR_MASK_TEST(p_attrs, dircount))\n    {\n        g_string_printf(req, \"SELECT %s FROM \"DNAMES_TABLE\" WHERE parent_id=\"DPK,\n                        dirattr2str(ATTR_INDEX_dircount), dir_pk);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n        if (rc)\n            goto free_str;\n\n        rc = db_next_record(&p_mgr->conn, &result, str_info, 1);\n        if (rc == DB_END_OF_LIST)\n        {\n            ATTR_MASK_UNSET(p_attrs, dircount);\n            rc = DB_SUCCESS;\n        }\n        else if (rc == DB_SUCCESS)\n        {\n            if (str_info[0] == NULL)\n                /* count(*) should at least return 0 */\n                rc = DB_REQUEST_FAILED;\n            else\n            {\n                tmp_val = str2int(str_info[0]);\n                if (tmp_val != -1)\n                {\n                    ATTR_MASK_SET(p_attrs, dircount);\n                    ATTR(p_attrs, dircount) = tmp_val;\n                    rc = DB_SUCCESS;\n                }\n                else\n                    /* invalid output format */\n                    rc = DB_REQUEST_FAILED;\n            }\n        }\n        db_result_free(&p_mgr->conn, &result);\n        if (rc)\n            goto free_str;\n    }\n\n    /* get avgsize of child entries from MAIN_TABLE */\n    if (ATTR_MASK_TEST(p_attrs, avgsize))\n    {\n        g_string_printf(req, \"SELECT %s FROM \"MAIN_TABLE\" m, \"DNAMES_TABLE\" d\"\n                        \" WHERE m.id = d.id and type='file' and d.parent_id=\"DPK,\n                        dirattr2str(ATTR_INDEX_avgsize), dir_pk);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n        if (rc)\n            goto free_str;\n\n        rc = db_next_record(&p_mgr->conn, &result, str_info, 1);\n        if (rc == DB_END_OF_LIST)\n            ATTR_MASK_UNSET(p_attrs, avgsize);\n        else if (rc == DB_SUCCESS)\n        {\n            if (str_info[0] == NULL)\n            {\n                /* NULL if no entry matches the criteria */\n                ATTR_MASK_UNSET(p_attrs, avgsize);\n                rc = DB_SUCCESS;\n            }\n            else\n            {\n                tmp_long = str2bigint(str_info[0]);\n                if (tmp_long != -1LL)\n                {\n                    ATTR_MASK_SET(p_attrs, avgsize);\n                    ATTR(p_attrs, avgsize) = tmp_long;\n                    rc = DB_SUCCESS;\n                }\n                else\n                    /* invalid output format */\n                    rc = DB_REQUEST_FAILED;\n            }\n        }\n        db_result_free(&p_mgr->conn, &result);\n    }\n\nfree_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** only keep supported bits in the given mask */\nstatic void supported_bits_only(attr_mask_t *p_mask)\n{\n    /* don't get fields that are not in main, names, annex, stripe...\n     * This allows the caller to set all bits 'on' to get everything.\n     */\n    p_mask->std &= (main_attr_set.std | names_attr_set.std\n                              | annex_attr_set.std | stripe_attr_set.std\n                              | dir_attr_set.std | slink_attr_set.std);\n\n    p_mask->status &= (main_attr_set.status | names_attr_set.status\n                              | annex_attr_set.status | stripe_attr_set.status\n                              | dir_attr_set.status | slink_attr_set.status);\n\n    p_mask->sm_info &= (main_attr_set.sm_info | names_attr_set.sm_info\n                              | annex_attr_set.sm_info | stripe_attr_set.sm_info\n                              | dir_attr_set.sm_info | slink_attr_set.sm_info);\n}\n\n/** clean bits of attributes in main, annex and names */\nstatic void clean_std_table_bits(attr_mask_t *p_mask)\n{\n    p_mask->std &= ~(main_attr_set.std | annex_attr_set.std\n                               | names_attr_set.std);\n    p_mask->status &= ~(main_attr_set.status | annex_attr_set.status\n                               | names_attr_set.status);\n    p_mask->sm_info &= ~(main_attr_set.sm_info | annex_attr_set.sm_info\n                               | names_attr_set.sm_info);\n}\n\n/**\n *  Retrieve entry attributes from its primary key\n */\nint listmgr_get_by_pk( lmgr_t * p_mgr, PK_ARG_T pk, attr_set_t * p_info )\n{\n    int             rc;\n    char           *first_table = NULL;\n    GString        *req, *from;\n    /* attribute count is up to 1 per bit (8 per byte).\n     * x2 for bullet proofing */\n    char           *result_tab[2*8*sizeof(p_info->attr_mask)];\n    result_handle_t result;\n    bool            checkmain   = true;\n    int             main_count  = 0,\n                    annex_count = 0,\n                    name_count  = 0;\n    attr_mask_t     gen = gen_fields(p_info->attr_mask);\n\n    if (p_info == NULL)\n        return 0;\n\n    /* init entry info */\n    memset(&p_info->attr_values, 0, sizeof(entry_info_t));\n    req = g_string_new(\"SELECT \");\n    from = g_string_new(\" FROM \");\n\n    /* retrieve source info for generated fields (only about std fields)*/\n    add_source_fields_for_gen(&p_info->attr_mask.std);\n\n    /* don't get fields that are not in main, names, annex, stripe...\n     * This allows the caller to set all bits 'on' to get everything.\n     * Note: this also clear generated fields. They will be restored after.\n     */\n    supported_bits_only(&p_info->attr_mask);\n\n    /* get info from main table (if asked) */\n    main_count = attrmask2fieldlist(req, p_info->attr_mask, T_MAIN, \"\", \"\", 0);\n    if (main_count < 0)\n    {\n        rc = -main_count;\n        goto free_str;\n    }\n    else if (main_count > 0)\n    {\n        checkmain = false;\n        first_table = MAIN_TABLE;\n        g_string_append(from, MAIN_TABLE);\n    }\n\n    annex_count = attrmask2fieldlist(req, p_info->attr_mask, T_ANNEX, \"\", \"\",\n                                     first_table != NULL ? AOF_LEADING_SEP : 0);\n    if (annex_count < 0)\n    {\n        rc = -annex_count;\n        goto free_str;\n    }\n    else if (annex_count > 0)\n    {\n        if (first_table != NULL)\n            g_string_append_printf(from, \" LEFT JOIN \"ANNEX_TABLE\" ON %s.id=\"\n                                   ANNEX_TABLE\".id\", first_table);\n        else\n        {\n            first_table = ANNEX_TABLE;\n            g_string_append(from, ANNEX_TABLE);\n        }\n    }\n\n    name_count = attrmask2fieldlist(req, p_info->attr_mask, T_DNAMES, \"\", \"\",\n                                    first_table != NULL ? AOF_LEADING_SEP : 0);\n    if (name_count < 0)\n    {\n        rc = -name_count;\n        goto free_str;\n    }\n    else if (name_count > 0)\n    {\n        if (first_table)\n            /* it's OK to JOIN with NAMES table here even if there are multiple paths,\n             * as we only take one result record. The important thing is to return\n             * consistent values for parent_id, name and fullpath. */\n            g_string_append_printf(from, \" LEFT JOIN \"DNAMES_TABLE\" ON %s.id=\"\n                                   DNAMES_TABLE\".id\", first_table);\n        else\n        {\n            first_table = DNAMES_TABLE;\n            g_string_append(from, DNAMES_TABLE);\n        }\n    }\n\n    if (first_table != NULL)\n    {\n        int shift = 0;\n\n        g_string_append_printf(req, \"%s WHERE %s.id=\"DPK, from->str,\n                               first_table, pk);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n        if (rc)\n            goto free_str;\n\n        rc = db_next_record(&p_mgr->conn, &result, result_tab,\n                            main_count + annex_count + name_count);\n        /* END_OF_LIST means it does not exist */\n        if (rc == DB_END_OF_LIST)\n        {\n            clean_std_table_bits(&p_info->attr_mask);\n\n            /* not found, but did not check MAIN yet */\n            if (checkmain)\n                goto next_table;\n\n            rc = DB_NOT_EXISTS;\n        }\n        if (rc)\n            goto free_res;\n\n        /* set info from result */\n        if (main_count)\n        {\n            rc = result2attrset(T_MAIN, result_tab + shift, main_count, p_info);\n            shift += main_count;\n            if (rc)\n                goto free_res;\n        }\n        if (annex_count)\n        {\n            rc = result2attrset(T_ANNEX, result_tab + shift, annex_count,\n                                p_info);\n            shift += annex_count;\n            if (rc)\n                goto free_res;\n        }\n        if (name_count)\n        {\n            rc = result2attrset(T_DNAMES, result_tab + shift, name_count,\n                                p_info);\n            shift += name_count;\n            if (rc)\n                goto free_res;\n        }\n\nnext_table:\n        db_result_free(&p_mgr->conn, &result);\n    }\n\n    /* remove stripe info if it is not a file */\n    if (stripe_fields(p_info->attr_mask) && ATTR_MASK_TEST(p_info, type)\n        && strcmp(ATTR(p_info, type), STR_TYPE_FILE) != 0)\n    {\n        p_info->attr_mask = attr_mask_and_not(&p_info->attr_mask, &stripe_attr_set);\n    }\n\n    /* get stripe info if asked */\n#ifdef _LUSTRE\n    if (stripe_fields(p_info->attr_mask))\n    {\n        rc = get_stripe_info(p_mgr, pk, &ATTR(p_info, stripe_info),\n                             ATTR_MASK_TEST(p_info, stripe_items)?\n                                &ATTR(p_info, stripe_items) : NULL);\n        if (rc == DB_ATTR_MISSING || rc == DB_NOT_EXISTS)\n        {\n            /* stripe info is in std mask */\n            p_info->attr_mask.std &= ~ATTR_MASK_stripe_info;\n\n            if (ATTR_MASK_TEST(p_info, stripe_items))\n                p_info->attr_mask.std &= ~ATTR_MASK_stripe_items;\n        }\n        else if (rc)\n            goto free_str;\n        else\n            checkmain = false; /* entry exists */\n    }\n#else\n    /* POSIX: always clean stripe bits */\n    p_info->attr_mask = attr_mask_and_not(&p_info->attr_mask, &stripe_attr_set);\n#endif\n\n    /* special field dircount */\n    if (dirattr_fields(p_info->attr_mask))\n    {\n        if (listmgr_get_dirattrs(p_mgr, pk, p_info))\n        {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"listmgr_get_dirattrs failed for \"DPK, pk);\n            p_info->attr_mask = attr_mask_and_not(&p_info->attr_mask, &dir_attr_set);\n        }\n    }\n\n    if (checkmain)\n    {\n        /* verify it exists in main table */\n        g_string_printf(req, \"SELECT id FROM \" MAIN_TABLE \" WHERE id=\"DPK, pk);\n\n        /* execute the request */\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n        if (rc)\n            goto free_str;\n\n        rc = db_next_record(&p_mgr->conn, &result, result_tab, 1);\n        db_result_free(&p_mgr->conn, &result);\n        if (rc)\n        {\n            rc = DB_NOT_EXISTS;\n            goto free_str;\n        }\n    }\n\n    /* restore generated fields in attr mask */\n    p_info->attr_mask = attr_mask_or(&p_info->attr_mask, &gen);\n    /* generate them */\n    generate_fields(p_info);\n\n    /* update operation stats */\n    p_mgr->nbop[OPIDX_GET]++;\n\n    rc = DB_SUCCESS;\n    goto free_str;\n\n  free_res:\n    db_result_free(&p_mgr->conn, &result);\n  free_str:\n    g_string_free(req, TRUE);\n    g_string_free(from, TRUE);\n    return rc;\n} /* listmgr_get_by_pk */\n\n\n\nint ListMgr_Get( lmgr_t * p_mgr, const entry_id_t * p_id, attr_set_t * p_info )\n{\n    int rc;\n    DEF_PK(pk);\n    int retry_status;\n\n    entry_id2pk(p_id, PTR_PK(pk));\nretry:\n    rc = listmgr_get_by_pk(p_mgr, pk, p_info);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2)\n        rc = DB_RBH_SIG_SHUTDOWN;\n    return rc;\n}\n\n\n/* Retrieve the FID from the database given the parent FID and the file name. */\nint ListMgr_Get_FID_from_Path( lmgr_t * p_mgr, const entry_id_t * parent_fid,\n                               const char *name, entry_id_t * fid)\n{\n    result_handle_t result;\n    GString        *req = NULL;\n    char            escaped[RBH_NAME_MAX*2+1];\n    DEF_PK(pk);\n    int rc;\n    char            *str_info[1];\n    int             retry_status;\n\n    entry_id2pk(parent_fid, PTR_PK(pk));\n\n    db_escape_string(&p_mgr->conn, escaped, sizeof(escaped), name);\n\n    req = g_string_new(\"SELECT id FROM \"DNAMES_TABLE\" WHERE pkn=\");\n    g_string_append_printf(req, HNAME_FMT, pk, escaped);\n\nretry:\n    rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2) {\n        rc = DB_RBH_SIG_SHUTDOWN;\n        goto free_str;\n    } else if (rc)\n        goto free_str;\n\n    rc = db_next_record(&p_mgr->conn, &result, str_info, 1);\n\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2) {\n        rc = DB_RBH_SIG_SHUTDOWN;\n        goto free_res;\n    } else if (rc != DB_SUCCESS)\n        goto free_res;\n\n    rc = pk2entry_id(p_mgr, str_info[0], fid);\n\nfree_res:\n    db_result_free(&p_mgr->conn, &result);\nfree_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_init.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_internal.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <pwd.h>\n#include <grp.h>\n\n/* global symbols */\nstatic const char *acct_info_table = NULL;\nstatic enum lmgr_init_flags init_flags;\n#define report_only (!!(init_flags & LIF_REPORT_ONLY))\n#define alter_db    (!!(init_flags & LIF_ALTER_DB))\n#define alter_no_display (!!(init_flags & LIF_ALTER_NODISP))\n\n#define MAX_DB_FIELDS 64\n\n/** append SQL type of a status field */\nstatic void append_status_sql_type(GString *str, const sm_instance_t *smi)\n{\n    int i;\n\n    g_string_append(str, \"ENUM('', \");\n    for (i = 0; i < smi->sm->status_count; i++) {\n        g_string_append_printf(str, \"%s'%s'\", (i == 0) ? \"\" : \",\",\n                               smi->sm->status_enum[i]);\n    }\n    /* end of \"ENUM (\" */\n    g_string_append_c(str, ')');\n}\n\n/** append SQL request to create a status field */\nstatic void append_status_def(const sm_instance_t *smi, GString *str,\n                              bool is_first)\n{\n    g_string_append_printf(str, \"%s%s_status \", is_first ? \"\" : \",\",\n                           smi->instance_name);\n    append_status_sql_type(str, smi);\n\n    /* default status is always '' */\n    g_string_append(str, \" DEFAULT ''\");\n}\n\nstatic void append_sql_type(GString *str, db_type_e type, unsigned int size)\n{\n    switch (type) {\n    case DB_STRIPE_INFO:   /* never in main table (ignored) */\n    case DB_STRIPE_ITEMS:\n        break;\n    case DB_TEXT:\n        {\n            /* VARBINARY length is limited. For larger strings, use TEXT. */\n            if (size <= MAX_VARBINARY)\n                g_string_append_printf(str, \"VARBINARY(%u)\", size);\n            else\n                g_string_append(str, \"TEXT\");\n        }\n        break;\n    case DB_INT:\n        g_string_append(str, \"INT\");\n        break;\n    case DB_UINT:\n        g_string_append(str, \"INT UNSIGNED\");\n        break;\n    case DB_SHORT:\n        g_string_append(str, \"SMALLINT\");\n        break;\n    case DB_USHORT:\n        g_string_append(str, \"SMALLINT UNSIGNED\");\n        break;\n    case DB_BIGINT:\n        g_string_append(str, \"BIGINT\");\n        break;\n    case DB_BIGUINT:\n        g_string_append(str, \"BIGINT UNSIGNED\");\n        break;\n    case DB_BOOL:\n        g_string_append(str, \"BOOLEAN\");\n        break;\n    case DB_ID:\n        g_string_append(str, PK_TYPE);\n        break;\n    case DB_ENUM_FTYPE:\n        g_string_append_printf(str,\n                               \"ENUM('%s', '%s', '%s', '%s', '%s', '%s', '%s')\",\n                               STR_TYPE_LINK, STR_TYPE_DIR, STR_TYPE_FILE,\n                               STR_TYPE_CHR, STR_TYPE_BLK, STR_TYPE_FIFO,\n                               STR_TYPE_SOCK);\n        break;\n    case DB_UIDGID:\n        {\n            if (global_config.uid_gid_as_numbers)\n                g_string_append(str, \"INT\");\n            else\n                append_sql_type(str, DB_TEXT, RBH_LOGIN_MAX - 1);\n        }\n        break;\n    }\n}\n\n/** builds [,] <field_name> <field_type> [DEFAULT <default_val>] */\nstatic void append_field(db_conn_t *pconn, GString *str, bool is_first,\n                         db_type_e type, unsigned int size, const char *name,\n                         const db_type_u *default_value)\n{\n    if (!is_first)\n        g_string_append(str, \", \");\n\n    g_string_append_printf(str, \"%s \", name);\n    append_sql_type(str, type, size);\n\n    if (default_value) {\n        g_string_append(str, \" DEFAULT \");\n        printdbtype(pconn, str, type, default_value);\n    }\n}\n\nstatic db_type_u default_uid = { .val_str = ACCT_DEFAULT_OWNER };\nstatic db_type_u default_gid = { .val_str = ACCT_DEFAULT_GROUP };\nstatic db_type_u default_projid = { .val_uint = ACCT_DEFAULT_PROJID };\nstatic db_type_u default_type = { .val_str = \"file\" };\nstatic db_type_u default_status = { .val_str = \"\" };\nstatic db_type_u default_zero = { 0 };\n\nstatic void init_default_field_values(void)\n{\n    if (global_config.uid_gid_as_numbers) {\n        char buff[4096];\n        struct passwd pw;\n        struct passwd *p_pw;\n        struct group gr;\n        struct group *p_gr;\n\n        if (getpwnam_r(\"nobody\", &pw, buff, sizeof(buff), &p_pw) != 0 ||\n            p_pw == NULL) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Warning: couldn't resolve uid for user 'nobody'\");\n\n            /* nobody is 65534 on most Linux systems. */\n            default_uid.val_int = 65534;\n        } else {\n            default_uid.val_int = pw.pw_uid;\n        }\n\n        if ((getgrnam_r(\"nobody\", &gr, buff, sizeof(buff), &p_gr) != 0 ||\n             p_gr == NULL) &&\n            (getgrnam_r(\"nogroup\", &gr, buff, sizeof(buff), &p_gr) != 0 ||\n             p_gr == NULL)) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Warning: couldn't resolve gid for group 'nogroup' or 'nobody'\");\n\n            /* nogroup is 65534 on Debian. nobody is 99 on RHEL and\n             * 65533 on SLES. */\n            default_gid.val_int = 65534;\n        } else {\n            default_gid.val_int = gr.gr_gid;\n        }\n    }\n}\n\nstatic const db_type_u *default_field_value(int attr_index)\n{\n    switch (attr_index) {\n    case ATTR_INDEX_type:\n        return &default_type;\n    case ATTR_INDEX_uid:\n        return &default_uid;\n    case ATTR_INDEX_gid:\n        return &default_gid;\n    case ATTR_INDEX_projid:\n        return &default_projid;\n    default:\n        if (is_status_field(attr_index)) {\n            return &default_status;\n        }\n\n        if (is_sm_info_field(attr_index)) {\n            int idx = attr2sminfo_index(attr_index);\n\n            /* return NULL if db_type is TEXT and val_str is NULL */\n            if (sm_attr_info[idx].def->db_type == DB_TEXT &&\n                sm_attr_info[idx].def->db_default.val_str == NULL)\n                return NULL;\n\n            return &sm_attr_info[idx].def->db_default;\n        }\n\n        /* accounting fields (except primary key) default to 0\n         * (must be able to sum) */\n        if (is_acct_field(attr_index))\n            return &default_zero;\n\n        return NULL;\n    }\n    UNREACHED();\n}\n\nstatic int field_size(int i)\n{\n    if (is_status_field(i))\n        return 0;   /* always enum, no varchar size */\n\n    if (is_sm_info_field(i)) {\n        int idx = attr2sminfo_index(i);\n        return sm_attr_info[idx].def->db_type_size;\n    }\n\n    return field_infos[i].db_type_size;\n}\n\nstatic void append_field_def(db_conn_t *pconn, int i, GString *str,\n                             bool is_first)\n{\n    unsigned int idx;\n\n    if (is_status_field(i)) {\n        idx = attr2status_index(i);\n        append_status_def(get_sm_instance(idx), str, is_first);\n        return;\n    }\n    if (is_sm_info_field(i)) {\n        idx = attr2sminfo_index(i);\n        append_field(pconn, str, is_first, sm_attr_info[idx].def->db_type,\n                     sm_attr_info[idx].def->db_type_size,\n                     sm_attr_info[idx].db_attr_name,\n                     &sm_attr_info[idx].def->db_default);\n        return;\n    }\n\n    append_field(pconn, str, is_first, field_infos[i].db_type,\n                 field_infos[i].db_type_size,\n                 field_infos[i].field_name, default_field_value(i));\n}\n\n/**\n * Check table fields.\n * @param i\n * @param curr_field_index [in,out] field index in currently checked schema\n * @return 0 on success\n * @return -1 on error\n */\nstatic int _check_field_name(const char *name, int *curr_field_index,\n                             const char *table, char **fieldtab)\n{\n    if ((*curr_field_index >= MAX_DB_FIELDS)\n        || (fieldtab[*curr_field_index] == NULL)) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Database schema: missing field '%s' in table %s.\",\n                   name, table);\n        return -1;\n    }\n    /* check that this is the expected field */\n    if (!strcmp(name, fieldtab[*curr_field_index])) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG, \"%s.%s field name OK\", table, name);\n        return 0;\n    }\n\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"%s: '%s' expected, '%s' found\", table,\n               name, fieldtab[*curr_field_index]);\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n               \"Database schema: unexpected field '%s' in table %s: '%s' expected.\",\n               fieldtab[*curr_field_index], table, name);\n    return -1;\n}\n\nstatic int check_field_name(const char *name, int *curr_field_index,\n                            const char *table, char **fieldtab)\n{\n    if (_check_field_name(name, curr_field_index, table, fieldtab) == 0) {\n        (*curr_field_index)++;\n        return 0;\n    }\n    return -1;\n}\n\nstatic void drop_chars(char *str, int start_off, int end_off)\n{\n    /* drop len chars */\n    int len = (end_off - start_off + 1);\n    char *c;\n\n    for (c = str + start_off; *(c + len) != '\\0'; c++)\n        *c = *(c + len);\n\n    *c = '\\0';\n}\n\nstatic inline void drop_parenthesis_for(char *str, const char *pattern)\n{\n    char *w1, *w2;\n\n    /* remove parenthesis */\n    if ((w1 = strcasestr(str, pattern)) != NULL) {\n        /* move w1 to '(' */\n        w1 += 3;\n        w2 = strchr(w1, ')');\n        if (w2 != NULL)\n            drop_chars(str, (w1 - str), (w2 - str));\n    }\n}\n\nstatic int type_cmp(const char *db_type, const char *expected)\n{\n    char tmp[1024];\n    char expect_trunc[1024];\n\n    /* turn \"int(10)\" to \"INT\",\n     *      \"smallint(5)\" to \"SMALLINT\",\n     *      \"bigint(20)\" to \"BIGINT\" ...\n     */\n    rh_strncpy(tmp, db_type, sizeof(tmp));\n    rh_strncpy(expect_trunc, expected, sizeof(expect_trunc));\n\n    drop_parenthesis_for(tmp, \"INT(\");\n    drop_parenthesis_for(tmp, \"ENUM(\");\n    drop_parenthesis_for(expect_trunc, \"INT(\");\n    drop_parenthesis_for(expect_trunc, \"ENUM(\");\n\n    /* TINYINT may stand for BOOLEAN */\n    if (!strcasecmp(tmp, \"TINYINT\") && !strcmp(expect_trunc, \"BOOLEAN\"))\n        return 0;\n\n    return strcasecmp(tmp, expect_trunc);\n}\n\n/** Return the estimated time for a conversion operation.\n * @retval (time_t)-1 on error.\n */\nstatic time_t estimated_time(db_conn_t *pconn, const char *table,\n                             float avg_ent_per_sec)\n{\n    uint64_t record_count = 0;\n\n    /* prevent div by zero */\n    if (avg_ent_per_sec == 0.0)\n        return -1;\n\n    if (lmgr_table_count(pconn, table, &record_count) != DB_SUCCESS) {\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                   \"Warning: lmgr_table_count(%s) failed\", table);\n        return -1;\n    }\n\n    return ((float)record_count) / avg_ent_per_sec;\n}\n\nstatic int convert_field_type(db_conn_t *pconn, const char *table,\n                              const char *field, const char *type)\n/* XXX may be unused if no field type is to be checked... */\n    __attribute__ ((unused));\n\nstatic int convert_field_type(db_conn_t *pconn, const char *table,\n                              const char *field, const char *type)\n{\n    char query[1024];\n    char timestr[256] = \"\";\n    char t[128];\n    int rc;\n    time_t estimated = estimated_time(pconn, table, 100000);\n\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    snprintf(query, sizeof(query), \"ALTER TABLE %s MODIFY COLUMN %s %s\", table,\n             field, type);\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Converting type of %s.%s to '%s'...%s\",\n               table, field, type, timestr);\n    rc = db_exec_sql(pconn, query, NULL);\n\n    if (rc) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to run database conversion: Error: %s\",\n                   db_errmsg(pconn, query, sizeof(query)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully converted\", table,\n               field);\n    return 0;\n}\n\nstatic int check_field_default(int attr_index, const char *val)\n{\n    GString *str;\n    int rc = 0;\n\n    /* get the expected default for this field */\n    const db_type_u *val_expect = default_field_value(attr_index);\n\n    /* NULL only matches NULL... */\n    if (val == NULL) {\n        if (val_expect == NULL)\n            return 0;\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Default value for field '%s' \"\n                   \"should not be NULL\", field_name(attr_index));\n        return DB_NEED_ALTER;\n    }\n    if (val_expect == NULL) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"NULL default value expected for '%s' (current is %s)\",\n                   field_name(attr_index), val);\n        return DB_NEED_ALTER;\n    }\n\n    /* print the values and compare them */\n    str = g_string_new(NULL);\n\n    /* string value reported by DB is not escaped:\n     * don't provide a connection (won't escape string). */\n    printdbtype(NULL, str, field_type(attr_index), val_expect);\n\n    /* string value reported by DB is not quoted:\n     * drop quotes.\n     */\n    if (str->str[0] == '\\'') {\n        g_string_erase(str, 0, 1);\n        g_string_truncate(str, str->len - 1);\n    }\n\n    if (strcmp(str->str, val)) {\n        rc = DB_NEED_ALTER;\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Default value for field '%s' (%s) \"\n                   \"doesn't match expected value %s\", field_name(attr_index),\n                   val, str->str);\n    } else\n        DisplayLog(LVL_FULL, LISTMGR_TAG, \"%s field default OK\",\n                   field_name(attr_index));\n    g_string_free(str, TRUE);\n    return rc;\n}\n\nstatic int check_field_type(int attr_index, const char *val)\n{\n    GString *str;\n    int rc = 0;\n\n    assert(val != NULL);\n\n    /* get the type string and compare it */\n    str = g_string_new(NULL);\n\n    if (is_status(attr_index)) {\n        /* status are particular ENUMs */\n        int idx = attr2status_index(attr_index);\n\n        append_status_sql_type(str, get_sm_instance(idx));\n    } else\n        append_sql_type(str, field_type(attr_index), field_size(attr_index));\n\n    if (type_cmp(val, str->str)) {\n        rc = DB_NEED_ALTER;\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Type for field '%s' (%s) \"\n                   \"doesn't match expected type %s\", field_name(attr_index),\n                   val, str->str);\n    } else\n        DisplayLog(LVL_FULL, LISTMGR_TAG, \"%s field type OK\",\n                   field_name(attr_index));\n\n    g_string_free(str, TRUE);\n    return rc;\n}\n\n/** change field type and set its default */\nstatic int change_field_type(db_conn_t *pconn, table_enum table,\n                             int attr_index)\n{\n    const char *t_name = table2name(table);\n    const char *f_name = field_name(attr_index);\n    GString *query = g_string_new(NULL);\n    const db_type_u *default_val;\n    int rc = 0;\n    char timestr[256] = \"\";\n    char t[128];\n    time_t estimated;\n\n    estimated = estimated_time(pconn, t_name, 80000);\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Converting type of '%s.%s'%s\",\n               t_name, f_name, timestr);\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Converting type of '%s.%s'...\",\n               table2name(table), field_name(attr_index));\n\n    g_string_printf(query, \"ALTER TABLE %s MODIFY COLUMN %s \", t_name, f_name);\n\n    if (is_status(attr_index)) {\n        /* status are particular ENUMs */\n        int idx = attr2status_index(attr_index);\n\n        append_status_sql_type(query, get_sm_instance(idx));\n    } else\n        append_sql_type(query, field_type(attr_index), field_size(attr_index));\n\n    default_val = default_field_value(attr_index);\n    if (default_val) {\n        g_string_append(query, \" DEFAULT \");\n        printdbtype(pconn, query, field_type(attr_index), default_val);\n    }\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n    g_string_free(query, TRUE);\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to run database conversion: Error: %s\",\n                   db_errmsg(pconn, buff, sizeof(buff)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully converted\",\n               t_name, f_name);\n    return 0;\n}\n\n/** change the default value for a given field */\nstatic int change_field_default(db_conn_t *pconn, table_enum table,\n                                int attr_index, bool update_null)\n{\n    const char *t_name = table2name(table);\n    const char *f_name = field_name(attr_index);\n    GString *query = g_string_new(NULL);\n    const db_type_u *defval;\n    int rc = 0;\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Changing default value of '%s.%s'...\",\n               t_name, f_name);\n\n    g_string_printf(query, \"ALTER TABLE %s ALTER COLUMN %s SET DEFAULT \",\n                    t_name, f_name);\n\n    /* get & print default value */\n    defval = default_field_value(attr_index);\n    if (defval)\n        printdbtype(pconn, query, field_type(attr_index), defval);\n    else\n        g_string_append(query, \"NULL\");\n\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n    rc = db_exec_sql(pconn, query->str, NULL);\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to alter field '%s': Error: %s\",\n                   f_name, db_errmsg(pconn, buff, sizeof(buff)));\n        goto out_free;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s default successfully changed\",\n               t_name, f_name);\n\n    if (defval == NULL || !update_null)\n        goto out_free;\n\n    /* If the new value is not NULL, update previous records\n     * having NULL in this field */\n    g_string_printf(query, \"UPDATE %s SET %s=\", t_name, f_name);\n    printdbtype(pconn, query, field_type(attr_index), defval);\n    g_string_append_printf(query, \" WHERE %s is NULL\", f_name);\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n               \"Updating previous NULL values of '%s.%s'...\", t_name, f_name);\n\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to update field '%s': Error: %s\",\n                   f_name, db_errmsg(pconn, buff, sizeof(buff)));\n    }\n\n out_free:\n    g_string_free(query, TRUE);\n    return rc;\n}\n\n/** Rename field 'old_name' to 'new_name' */\nstatic int change_field_name(db_conn_t *pconn, const char *table,\n                             const char *old_name, const char *new_name,\n                             int field_index)\n{\n    /* syntax: ALTER TABLE <tablename>\n     *         CHANGE <OldColumnName> <NewColunmName> <DATATYPE>; */\n    GString *query = g_string_new(NULL);\n    int rc;\n    char timestr[256] = \"\";\n    char t[128];\n    time_t estimated;\n\n    estimated = estimated_time(pconn, table, 60000);\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"=> Renaming old field '%s.%s' \"\n               \"to '%s'%s\", table, old_name, new_name, timestr);\n\n    g_string_printf(query, \"ALTER TABLE %s CHANGE %s \", table, old_name);\n    append_field_def(pconn, field_index, query, true);\n\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n    g_string_free(query, TRUE);\n\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to rename field '%s' to '%s': Error: %s\",\n                   old_name, new_name, db_errmsg(pconn, buff, sizeof(buff)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully renamed to %s.%s\",\n               table, old_name, table, new_name);\n    return 0;\n}\n\nstatic int change_id_field(db_conn_t *pconn, const char *table,\n                           const char *old_name, const char *new_name)\n{\n    /* syntax: ALTER TABLE <tablename>\n               CHANGE <OldColumnName> <NewColunmName> <DATATYPE>; */\n    GString *query = g_string_new(NULL);\n    int rc;\n\n    g_string_printf(query, \"ALTER TABLE %s CHANGE %s \", table, old_name);\n    append_field(pconn, query, true, DB_ID, 0, new_name, NULL);\n\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n    g_string_free(query, TRUE);\n\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to rename field '%s' to '%s': Error: %s\",\n                   old_name, new_name, db_errmsg(pconn, buff, sizeof(buff)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully renamed to %s.%s\",\n               table, old_name, table, new_name);\n    return 0;\n}\n\n/** Insert field defined by 'def_index' after 'prev_field'. */\nstatic int insert_field(db_conn_t *pconn, const char *table, int def_index,\n                        const char *prev_field)\n{\n    /* syntax: ALTER TABLE <tablename>\n               ADD <field_name> <field_type> AFTER <prev_field_name> */\n    GString *query;\n    int rc;\n    char timestr[256] = \"\";\n    char t[128];\n    time_t estimated;\n\n    if (!alter_db) {\n        if (!alter_no_display)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"DB schema change detected: field '%s.%s' must be added \"\n                       \" => Run 'robinhood --alter-db' to apply this change.\",\n                       table, field_name(def_index));\n        return DB_NEED_ALTER;\n    }\n\n    estimated = estimated_time(pconn, table, 68000);\n\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    if (prev_field)\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"=> Inserting field '%s' in table %s after '%s'%s\",\n                   field_name(def_index), table, prev_field, timestr);\n    else\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"=> Appending field '%s' in table %s%s\",\n                   field_name(def_index), table, timestr);\n\n    query = g_string_new(NULL);\n    g_string_printf(query, \"ALTER TABLE %s ADD \", table);\n    append_field_def(pconn, def_index, query, true);\n    if (prev_field != NULL)\n        g_string_append_printf(query, \" AFTER %s\", prev_field);\n\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n    g_string_free(query, TRUE);\n\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to insert field '%s': Error: %s\",\n                   field_name(def_index), db_errmsg(pconn, buff, sizeof(buff)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully inserted\", table,\n               field_name(def_index));\n    return 0;\n}\n\nstatic int drop_field(db_conn_t *pconn, const char *table, const char *field)\n{\n    /* syntax: ALTER TABLE <tablename> DROP <field_name> */\n    GString *query;\n    int rc;\n    char timestr[256] = \"\";\n    char t[128];\n    time_t estimated;\n\n    if (!alter_db) {\n        if (!alter_no_display)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"DB schema change detected: field '%s.%s' must be DROPPED \"\n                       \" => Run 'robinhood --alter-db' to confirm this change.\",\n                       table, field);\n        return DB_NEED_ALTER;\n    }\n\n    estimated = estimated_time(pconn, table, 61000);\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"=> Dropping field '%s' from table %s%s\",\n               field, table, timestr);\n\n    query = g_string_new(NULL);\n    g_string_printf(query, \"ALTER TABLE %s DROP %s\", table, field);\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"sql> %s\", query->str);\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n    g_string_free(query, TRUE);\n\n    if (rc) {\n        char buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop field '%s': Error: %s\", field,\n                   db_errmsg(pconn, buff, sizeof(buff)));\n        return rc;\n    }\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"%s.%s successfully dropped.\", table,\n               field);\n    return 0;\n}\n\n/** check if the given DB field is in next expected ones */\nstatic bool is_next_expected(table_enum table, const char *db_field_name,\n                             int curr_field_def_index, bool allow_func_attr,\n                             int *found_index)\n{\n    int i, cookie;\n\n    if (found_index)\n        *found_index = -1;\n\n    /* end of table, must insert as last field */\n    if (db_field_name == NULL)\n        return false;\n\n    cookie = curr_field_def_index;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n\n        if (match_table(table, i) && (allow_func_attr || !is_funcattr(i))\n            && !strcmp(db_field_name, field_name(i))) {\n            if (found_index)\n                *found_index = i;\n            return true;\n        }\n    }\n    return false;\n}\n\n/* describe name change comatibility */\nstruct name_compat {\n    const char *old_name;\n    const char *new_name;\n};\n\n/**\n * Handle compatibility with old field names.\n * @retval 1 if field has been converted/renamed.\n * @retval 0 id field matches no rename rule.\n * @retval < 0 on error.\n */\nstatic int check_renamed_db_field(db_conn_t *pconn, table_enum table,\n                                  int field_index, const char *curr_field_name,\n                                  const struct name_compat *compat_table,\n                                  bool allow_func_attr)\n{\n    int i, rc;\n    const char *tname = table2name(table);\n\n    /* no table (or end of table) => not a rename */\n    if (compat_table == NULL || curr_field_name == NULL)\n        return 0;\n\n    /* handle compatibility with old field names */\n    for (i = 0; compat_table[i].old_name != NULL; i++) {\n        /* does the DB field matches the old name? */\n        if (strcmp(curr_field_name, compat_table[i].old_name))\n            continue;\n\n        /* DB field matches */\n        DisplayLog(LVL_FULL, LISTMGR_TAG,\n                   \"DB field '%s' matches an old name for '%s'\",\n                   curr_field_name, compat_table[i].new_name);\n\n        /* does it match the currently expected one? */\n        if (!strcmp(field_name(field_index), compat_table[i].new_name)) {\n            if (!alter_db) {\n                if (!alter_no_display)\n                    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                               \"DB schema change detected: \"\n                               \"field '%s.%s' renamed to '%s.%s' \"\n                               \" => Run 'robinhood --alter-db' to apply this change.\",\n                               tname, compat_table[i].old_name, tname,\n                               compat_table[i].new_name);\n                return -DB_NEED_ALTER;\n            }\n\n            rc = change_field_name(pconn, tname, compat_table[i].old_name,\n                                   compat_table[i].new_name, field_index);\n            if (rc)\n                /* db errors codes are > 0 */\n                return -rc;\n\n            return 1;\n        }\n        /* not an handled case */\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Unhandled rename case\");\n        return 0;\n    }\n\n    return 0;\n}\n\n/** return the oldname for a new field */\nstatic const char *get_old_name(const struct name_compat *compat_table,\n                                const char *new_name)\n{\n    int i;\n\n    /* no compat table => no old name */\n    if (compat_table == NULL)\n        return NULL;\n\n    /* handle compatibility with old field names */\n    for (i = 0; compat_table[i].old_name != NULL; i++) {\n        /* does the entry matches the new name */\n        if (!strcmp(new_name, compat_table[i].new_name))\n            return compat_table[i].old_name;\n    }\n    return NULL;\n}\n\n/** check if the given field definition is in the next DB fields */\nstatic bool is_next_db_field(const char *field_def_name,\n                             char *const *curr_field, int *shift)\n{\n    *shift = 0;\n\n    curr_field++;   /* start from next field */\n    while (*curr_field != NULL) {\n        (*shift)++;\n        if (!strcmp(*curr_field, field_def_name))\n            return true;\n        curr_field++;\n    }\n    return false;\n}\n\nstatic inline void swap_db_fields(char **field_tab, int i1, int i2)\n{\n    char *tmp = field_tab[i1];\n\n    field_tab[i1] = field_tab[i2];\n    field_tab[i2] = tmp;\n}\n\n/* the following function is only used for checking stripe 'validator' */\n#ifdef _LUSTRE\n/** @return -1 on error, 0 if OK, 1 if conversion is required */\nstatic int check_field_name_type(const char *name, const char *type,\n                                 int *curr_field_index, const char *table,\n                                 char **fieldtab, char **typetab)\n{\n    if (_check_field_name(name, curr_field_index, table, fieldtab) != 0)\n        return -1;\n\n    if (type_cmp(typetab[*curr_field_index], type))\n        return 1;\n\n    (*curr_field_index)++;\n    return 0;\n}\n#endif\n\nstatic int check_field(int i, int *curr_field_index, const char *table,\n                       char **fieldtab, char **typetab, char **defaulttab)\n{\n    if (_check_field_name(field_name(i), curr_field_index, table, fieldtab) !=\n        0)\n        return -1;\n\n    if (typetab != NULL && check_field_type(i, typetab[*curr_field_index]) != 0)\n        return -1;\n\n    if (defaulttab != NULL &&\n        check_field_default(i, defaulttab[*curr_field_index]) != 0)\n        return -1;\n\n    (*curr_field_index)++;\n    return 0;\n}\n\n/* Return false if there is no extra field, else return true */\nstatic inline int has_extra_field(int curr_field_index, const char *table,\n                                  char **fieldtab, bool warn)\n{\n    if ((curr_field_index < MAX_DB_FIELDS)\n        && (fieldtab[curr_field_index] != NULL)) {\n        if (warn)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Database schema: extra field '%s' in table %s.\",\n                       fieldtab[curr_field_index], table);\n        return true;\n    }\n    return false;\n}\n\n/** Check and fix field definition (type and default value) */\nstatic int check_and_fix_def(db_conn_t *pconn, table_enum table, int def_index,\n                             const char *db_type, const char *db_default)\n{\n    bool default_was_set = false;\n    int rc;\n\n    /* check field type */\n    if (db_type != NULL && check_field_type(def_index, db_type) != 0) {\n        if (!alter_db) {\n            if (!alter_no_display)\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"DB schema change detected: type of field '%s.%s' must be changed \"\n                           \" => Run 'robinhood --alter-db' to confirm this change.\",\n                           table2name(table), field_name(def_index));\n            return DB_NEED_ALTER;\n        }\n\n        rc = change_field_type(pconn, table, def_index);\n        if (rc)\n            return rc;\n        /* change_field_type also set the default */\n        default_was_set = true;\n    }\n\n    /* don't care about default values for report */\n    if (report_only)\n        return 0;\n\n    /* check field default value */\n    if (!default_was_set && check_field_default(def_index, db_default) != 0) {\n        /* This is light: don't need 'alterdb' to change default value */\n        rc = change_field_default(pconn, table, def_index, db_default == NULL);\n        if (rc)\n            return rc;\n    }\n    return 0;\n}\n\n/** Check current field and fix the DB schema if 'alter_db' is specified */\nstatic int check_and_fix_field(db_conn_t *pconn,\n                               int def_index, int *db_index,\n                               table_enum table, char **fieldtab,\n                               char **typetab, char **defaulttab,\n                               const struct name_compat *compat_table,\n                               const char **last_field, bool allow_func_attr)\n{\n    int rc, shift;\n\n recheck:\n    if (check_field_name(field_name(def_index), db_index,\n                         table2name(table), fieldtab) == 0) {\n        /* check_field_name should have increased db_index */\n        assert(*db_index > 0);\n\n        /* field is at the right place, now check its type and default */\n        rc = check_and_fix_def(pconn, table, def_index,\n                               typetab != NULL ? typetab[*db_index - 1] : NULL,\n                               defaulttab !=\n                               NULL ? defaulttab[*db_index - 1] : NULL);\n        if (rc)\n            return rc;\n\n        *last_field = fieldtab[*db_index - 1];\n\n        /* OK */\n        return 0;\n    }\n\n    /*  field appending case (end of table) */\n    if (fieldtab[*db_index] == NULL) {\n        rc = insert_field(pconn, table2name(table), def_index, NULL);\n        if (rc == DB_SUCCESS && rc == DB_NEED_ALTER)\n            /* NEED_ALTER: still update last_field to check other tables */\n            *last_field = field_name(def_index);\n\n        return rc;\n    }\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Checking if '%s' is renamed\",\n               fieldtab[*db_index]);\n    /* convert renamed fields */\n    rc = check_renamed_db_field(pconn, table, def_index, fieldtab[*db_index],\n                                compat_table, allow_func_attr);\n    if (rc == 1 || rc == -DB_NEED_ALTER) {\n        /* NEED_ALTER: still update last_field to check next fields\n         * and tables */\n        *last_field = field_name(def_index);\n        (*db_index)++;\n        return rc == 1 ? 0 : -rc;\n    } else if (rc < 0)\n        /* DB error */\n        return -rc;\n\n    /* The current DB field is one of the next expected ones */\n    if (is_next_expected(table, fieldtab[*db_index], def_index,\n                         allow_func_attr, NULL)) {\n        int shift;\n\n        /* 2 cases:\n         * 1) fields have been shuffled: in this case,\n         *    the expected field is one of the next DB fields.\n         * 2) the expected field must be inserted.\n         */\n        if (is_next_db_field\n            (field_name(def_index), fieldtab + *db_index, &shift)) {\n            DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                       \"Shuffled DB fields: avoid changing \"\n                       \"the order of policy definitions \"\n                       \"to avoid this warning.\");\n            /* virtually swap the 2 field so that the related next field check\n             * will be OK */\n            swap_db_fields(fieldtab, *db_index, *db_index + shift);\n            swap_db_fields(typetab, *db_index, *db_index + shift);\n            swap_db_fields(defaulttab, *db_index, *db_index + shift);\n            /* current field is OK */\n            *last_field = fieldtab[*db_index];\n            (*db_index)++;\n            return 0;\n        }\n\n        rc = insert_field(pconn, table2name(table), def_index, *last_field);\n        if (rc == DB_SUCCESS && rc == DB_NEED_ALTER)\n            /* NEED_ALTER: still update last_field to check other tables */\n            *last_field = field_name(def_index);\n\n        return rc;  /* SUCCESS or NEED_ALTER */\n    }\n\n    /* If expected field in found later in the table, swap them\n     * and keep current DB field for later */\n    if (is_next_db_field(field_name(def_index), fieldtab + *db_index, &shift)) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG, \"'%s' is in next DB fields\",\n                   field_name(def_index));\n        /* swap fields and keep current DB field for later */\n        swap_db_fields(fieldtab, *db_index, *db_index + shift);\n        swap_db_fields(typetab, *db_index, *db_index + shift);\n        swap_db_fields(defaulttab, *db_index, *db_index + shift);\n        /* go and check type and default */\n        goto recheck;\n    }\n\n    /* does this field has an old name ? */\n    const char *old_name = get_old_name(compat_table, field_name(def_index));\n    if (old_name != NULL\n        && is_next_db_field(old_name, fieldtab + *db_index, &shift)) {\n        /* swap and recheck */\n        swap_db_fields(fieldtab, *db_index, *db_index + shift);\n        swap_db_fields(typetab, *db_index, *db_index + shift);\n        swap_db_fields(defaulttab, *db_index, *db_index + shift);\n        goto recheck;\n    }\n\n    /* Expected field is not in DB => insert it */\n    rc = insert_field(pconn, table2name(table), def_index, *last_field);\n    if (rc == 0 || rc == DB_NEED_ALTER)\n        *last_field = field_name(def_index);\n    return rc;\n}\n\n/** drop extra fields at the end of a table */\nstatic int drop_extra_fields(db_conn_t *pconn, int curr_field_index,\n                             table_enum table, char **fieldtab)\n{\n    bool need_alter = false;\n    int rc = 0;\n\n    /* is there any extra field ? */\n    if (!has_extra_field(curr_field_index, table2name(table), fieldtab,\n                         !report_only))\n        return 0;\n\n    /* This is allowed, in particular for read-only case, if the report\n     * command don't have all policies defined in its configuration file. */\n    if (report_only) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Warning: extra fields found in \"\n                   \"table %s: '%s'\", table2name(table),\n                   fieldtab[curr_field_index]);\n        return need_alter ? DB_NEED_ALTER : 0;\n    }\n\n    while (fieldtab[curr_field_index] != NULL) {\n        rc = drop_field(pconn, table2name(table), fieldtab[curr_field_index]);\n        if (rc != 0 && rc != DB_NEED_ALTER)\n            return rc;\n\n        if (rc == DB_NEED_ALTER)\n            need_alter = true;\n\n        curr_field_index++;\n    }\n\n    return (rc == 0 && need_alter) ? DB_NEED_ALTER : rc;\n}\n\n/**\n * @param alias_val     Alias name for 'FLOOR(LOG2(<prefix>.size)/5)' (eg. local variable).\n */\nstatic void append_size_range_val(GString *request, bool leading_comma,\n                                  char *prefix, const char *alias_val)\n{\n    unsigned int i;\n    char value[128];\n\n    if (alias_val && alias_val[0])\n        rh_strncpy(value, alias_val, sizeof(value));\n    else\n        snprintf(value, sizeof(value), SZRANGE_FUNC \"(%ssize)\", prefix);\n\n    g_string_append_printf(request, \"%s%ssize=0\", leading_comma ? \",\" : \"\",\n                           prefix);\n\n    for (i = 1; i < SZ_PROFIL_COUNT - 1; i++) { /* 2nd to before the last */\n        g_string_append_printf(request, \",%s=%u\", value, i - 1);\n    }\n    /* last */\n    g_string_append_printf(request, \",%s>=%u\", value, i - 1);\n}\n\n/**\n * @param alias_val  Alias name for 'FLOOR(LOG2(<prefix>.size)/5)' (eg. local variable).\n */\nstatic void append_size_range_op(GString *request, bool leading_comma,\n                                 char *prefix, const char *alias_val,\n                                 operation_type optype)\n{\n    unsigned int i;\n    char value[128];\n    const char *op = (optype == OT_ADD) ? \"+\" : \"-\";\n\n    if (alias_val && alias_val[0])\n        rh_strncpy(value, alias_val, sizeof(value));\n    else\n        snprintf(value, sizeof(value), SZRANGE_FUNC \"(%ssize)\", prefix);\n\n    /* only CAST for subtract */\n    if (optype == OT_SUBTRACT) {\n        g_string_append_printf(request,\n                               \"%s%s=CAST(%s as SIGNED)%sCAST((%ssize=0) as SIGNED)\",\n                               leading_comma ? \",\" : \"\", sz_field[0],\n                               sz_field[0], op, prefix);\n\n        for (i = 1; i < SZ_PROFIL_COUNT - 1; i++) { /* 2nd to before the last */\n            g_string_append_printf(request,\n                                   \", %s=CAST(%s as SIGNED)%sCAST((%s=%u) as SIGNED)\",\n                                   sz_field[i], sz_field[i], op, value, i - 1);\n        }\n        /* last */\n        g_string_append_printf(request,\n                               \", %s=CAST(%s as SIGNED)%sCAST((%s>=%u) as SIGNED)\",\n                               sz_field[i], sz_field[i], op, value, i - 1);\n    } else {\n        /* keep the trigger code simple */\n        g_string_append_printf(request, \"%s%s=%s%s(%ssize=0)\",\n                               leading_comma ? \",\" : \"\", sz_field[0],\n                               sz_field[0], op, prefix);\n\n        for (i = 1; i < SZ_PROFIL_COUNT - 1; i++) { /* 2nd to before the last */\n            g_string_append_printf(request, \", %s=%s%s(%s=%u)\",\n                                   sz_field[i], sz_field[i], op, value, i - 1);\n        }\n        /* last */\n        g_string_append_printf(request, \", %s=%s%s(%s>=%u)\",\n                               sz_field[i], sz_field[i], op, value, i - 1);\n    }\n}\n\n/**\n * Check what tables are used as source for accounting.\n * Return the main source table for accounting.\n */\nstatic const char *acct_table(void)\n{\n    const char *src_table = NULL;\n    bool is_annex = false;\n    bool is_main = false;\n\n    if (lmgr_config.acct) {\n        int i, cookie;\n\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_acct_field(i) || is_acct_pk(i)) {\n                if (is_annex_field(i))\n                    is_annex = true;\n                else if (is_main_field(i))\n                    is_main = true;\n                else\n                    /* BUG */\n                    RBH_BUG(\"Accounting field not in \" MAIN_TABLE\n                            \" or \" ANNEX_TABLE \" table\");\n            }\n        }\n\n        if (is_annex && is_main) {\n            RBH_BUG(\"Accounting info is on several tables (unsupported)\");\n        } else if (is_main)\n            src_table = MAIN_TABLE;\n        else if (is_annex)\n            src_table = ANNEX_TABLE;\n        else\n            /* BUG */\n            RBH_BUG(\"Accounting info is not in \" MAIN_TABLE\n                    \" or \" ANNEX_TABLE \" table\");\n    }\n    return src_table;\n}\n\n/** generic type for check_table/create_table functions */\ntypedef int (*check_create_tab_func_t) (db_conn_t *, bool *);\n\nstatic int check_table_vars(db_conn_t *pconn, bool *affects_trig)\n{\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n\n    int rc = db_list_table_info(pconn, VAR_TABLE, fieldtab, NULL, NULL,\n                                MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n    if (rc == DB_SUCCESS) {\n        int curr_index = 0;\n        /* check fields */\n        if (check_field_name(\"varname\", &curr_index, VAR_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name(\"value\", &curr_index, VAR_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n\n        if (has_extra_field(curr_index, VAR_TABLE, fieldtab, true))\n            return DB_BAD_SCHEMA;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return rc;\n}\n\n/** wrapper for table creation request + display of log messages */\nstatic int run_create_table(db_conn_t *pconn, const char *table_name,\n                            const char *request)\n{\n    int rc;\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Table creation request =\\n%s\", request);\n    rc = db_exec_sql(pconn, request, NULL);\n    if (rc != DB_SUCCESS) {\n        char errmsg[1024];\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create table %s: Error: %s\", table_name,\n                   db_errmsg(pconn, errmsg, sizeof(errmsg)));\n        return rc;\n    }\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"Table %s created successfully\",\n               table_name);\n    return DB_SUCCESS;\n}\n\n/** wrapper for index: creation request + display of log messages */\nstatic int run_create_index(db_conn_t *pconn, const char *table_name,\n                            const char *field, const char *request)\n{\n    int rc;\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Index creation request =\\n%s\", request);\n    rc = db_exec_sql(pconn, request, NULL);\n    if (rc != DB_SUCCESS) {\n        char errmsg[1024];\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create index of %s(%s): Error: %s\", table_name,\n                   field, db_errmsg(pconn, errmsg, sizeof(errmsg)));\n        return rc;\n    }\n    DisplayLog(LVL_VERB, LISTMGR_TAG, \"Index on %s(%s) created successfully\",\n               table_name, field);\n    return DB_SUCCESS;\n}\n\nstatic void append_engine(GString *request)\n{\n#ifdef _MYSQL\n    g_string_append_printf(request, \" ENGINE=%s\", lmgr_config.db_config.engine);\n\n    if (strcasecmp(lmgr_config.db_config.engine, \"TokuDB\") == 0)\n        g_string_append_printf(request, \" COMPRESSION=%s\",\n                               lmgr_config.db_config.tokudb_compression);\n#endif\n}\n\nstatic int create_table_vars(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    GString *request = g_string_new(\"CREATE TABLE \" VAR_TABLE \" (\"\n                                    \"varname VARCHAR(255) PRIMARY KEY, \"\n                                    \"value TEXT)\");\n    append_engine(request);\n    rc = run_create_table(pconn, VAR_TABLE, request->str);\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic struct name_compat main_name_compat[] = {\n    {\"owner\", \"uid\"},\n    {\"gr_name\", \"gid\"},\n/* Lustre/HSM fields */\n    {\"no_release\", \"lhsm_norels\"},\n    {\"no_archive\", \"lhsm_noarch\"},\n/* Soft-rm specific fields */\n    {\"soft_rm_time\", \"rm_time\"},\n\n    {NULL, NULL},\n};\n\n/* FIXME: these fields were in ANNEX_INFO */\n/*\n    {\"last_archive\", \"lhsm_lstarc\"},\n    {\"last_restore\", \"lhsm_lstrst\"},\n    {\"archive_id\",    \"lhsm_archid\"},\n*/\n\nstatic int check_table_main(db_conn_t *pconn, bool *affects_trig)\n{\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    char *typetab[MAX_DB_FIELDS];\n    char *defaulttab[MAX_DB_FIELDS];\n    bool need_alter = false;\n\n    int rc = db_list_table_info(pconn, MAIN_TABLE, fieldtab, typetab,\n                                defaulttab, MAX_DB_FIELDS,\n                                strbuf, sizeof(strbuf));\n    if (rc == DB_SUCCESS) {\n        int i, cookie;\n        int curr_field_index = 0;\n        const char *last = NULL;\n\n        /* check primary key */\n        if (check_field_name(\"id\", &curr_field_index, MAIN_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        last = \"id\";\n\n        /* std fields + SM status + SM specific info */\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            /* is this field part of MAIN_TABLE? */\n            if (is_main_field(i) && !is_funcattr(i)) {\n                rc = check_and_fix_field(pconn, i, &curr_field_index, T_MAIN,\n                                         fieldtab, typetab, defaulttab,\n                                         main_name_compat, &last, false);\n                if (rc == DB_NEED_ALTER)\n                    need_alter = true;\n                /* don't return immediately, to report about other fields */\n                else if (rc)\n                    return rc;\n            }\n        }\n\n        rc = drop_extra_fields(pconn, curr_field_index, T_MAIN, fieldtab);\n        if (rc)\n            return rc;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return (rc == 0 && need_alter) ? DB_NEED_ALTER : rc;\n}\n\nstatic int create_table_main(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int i, rc, cookie;\n\n    request =\n        g_string_new(\"CREATE TABLE \" MAIN_TABLE \" (id \" PK_TYPE \" PRIMARY KEY\");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_main_field(i) && !is_funcattr(i))\n            append_field_def(pconn, i, request, 0);\n    }\n\n    /* end of field list (null terminated) */\n    g_string_append(request, \")\");\n    append_engine(request);\n\n    rc = run_create_table(pconn, MAIN_TABLE, request->str);\n    if (rc)\n        goto free_str;\n\n    /* create indexes on this table */\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_main_field(i) && is_indexed_field(i)) {\n            g_string_printf(request,\n                            \"CREATE INDEX %s_index ON \" MAIN_TABLE \"(%s)\",\n                            field_name(i), field_name(i));\n            rc = run_create_index(pconn, MAIN_TABLE, field_name(i),\n                                  request->str);\n            if (rc)\n                goto free_str;\n        }\n    }\n    rc = DB_SUCCESS;\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_table_dnames(db_conn_t *pconn, bool *affects_trig)\n{\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    int rc = db_list_table_info(pconn, DNAMES_TABLE, fieldtab, NULL, NULL,\n                                MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n\n    if (rc == DB_SUCCESS) {\n        int i, cookie;\n        int curr_field_index = 0;\n\n        /* check first fields: id and pkn */\n        if (check_field_name(\"id\", &curr_field_index, DNAMES_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name(\"pkn\", &curr_field_index, DNAMES_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_names_field(i) && !is_funcattr(i)) {\n                if (check_field(i, &curr_field_index, DNAMES_TABLE,\n                                fieldtab, NULL, NULL))\n                    return DB_BAD_SCHEMA;\n            }\n        }\n        /* is there any extra field ? */\n        if (has_extra_field(curr_field_index, DNAMES_TABLE, fieldtab, true))\n            return DB_BAD_SCHEMA;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return rc;\n}\n\nstatic int create_table_dnames(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int i, rc, cookie;\n\n    request = g_string_new(\"CREATE TABLE \" DNAMES_TABLE \" (id \" PK_TYPE \", \"\n                           \"pkn VARBINARY(40) PRIMARY KEY\");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_names_field(i) && !is_funcattr(i)) {\n            append_field_def(pconn, i, request, 0);\n        }\n    }\n    g_string_append(request, \")\");\n    append_engine(request);\n\n    rc = run_create_table(pconn, DNAMES_TABLE, request->str);\n    if (rc)\n        goto free_str;\n\n    /* create indexes on this table */\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_names_field(i) && is_indexed_field(i)) {\n            g_string_printf(request,\n                            \"CREATE INDEX %s_index ON \" DNAMES_TABLE \"(%s)\",\n                            field_name(i), field_name(i));\n            rc = run_create_index(pconn, DNAMES_TABLE, field_name(i),\n                                  request->str);\n            if (rc)\n                goto free_str;\n        }\n    }\n\n    /* this index is needed to build the fullpath of entries */\n    rc = run_create_index(pconn, DNAMES_TABLE, \"id\",\n                          \"CREATE INDEX id_index ON \" DNAMES_TABLE \"(id)\");\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_table_annex(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc, i, cookie;\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    char *typetab[MAX_DB_FIELDS];\n    char *defaulttab[MAX_DB_FIELDS];\n    const char *last = NULL;\n    bool need_alter = false;\n\n    rc = db_list_table_info(pconn, ANNEX_TABLE, fieldtab, typetab, defaulttab,\n                            MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n\n    if (rc == DB_SUCCESS) {\n        int curr_field_index = 0;\n\n        /* check primary key */\n        if (check_field_name(\"id\", &curr_field_index, ANNEX_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        last = \"id\";\n\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_annex_field(i) && !is_funcattr(i)) {\n                rc = check_and_fix_field(pconn, i, &curr_field_index, T_ANNEX,\n                                         fieldtab, typetab, defaulttab, NULL,\n                                         &last, false);\n                if (rc)\n                    return rc;\n            }\n        }\n\n        rc = drop_extra_fields(pconn, curr_field_index, T_ANNEX, fieldtab);\n        if (rc)\n            return rc;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return (rc == 0 && need_alter) ? DB_NEED_ALTER : rc;\n}\n\nstatic int create_table_annex(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int i, rc, cookie;\n\n    request =\n        g_string_new(\"CREATE TABLE \" ANNEX_TABLE \" (id \" PK_TYPE\n                     \" PRIMARY KEY\");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_annex_field(i) && !is_funcattr(i)) {\n            append_field_def(pconn, i, request, 0);\n        }\n    }\n    g_string_append(request, \")\");\n    append_engine(request);\n\n    rc = run_create_table(pconn, ANNEX_TABLE, request->str);\n    if (rc)\n        goto free_str;\n\n    /* create indexes on this table */\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_annex_field(i) && is_indexed_field(i)) {\n            g_string_printf(request,\n                            \"CREATE INDEX %s_index ON \" ANNEX_TABLE \"(%s)\",\n                            field_name(i), field_name(i));\n            rc = run_create_index(pconn, ANNEX_TABLE, field_name(i),\n                                  request->str);\n            if (rc)\n                goto free_str;\n        }\n    }\n    rc = DB_SUCCESS;\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\n#ifdef _LUSTRE\nstatic int check_table_stripe_info(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    char *typetab[MAX_DB_FIELDS];\n\n    rc = db_list_table_info(pconn, STRIPE_INFO_TABLE, fieldtab, typetab, NULL,\n                            MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n    if (rc == DB_SUCCESS) {\n        int curr_field_index = 0;\n\n        /* check primary key */\n        if (check_field_name\n            (\"id\", &curr_field_index, STRIPE_INFO_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        /* compat with 2.5.3- */\n        switch (check_field_name_type\n                (\"validator\", \"INT\", &curr_field_index, STRIPE_INFO_TABLE,\n                 fieldtab, typetab)) {\n        case -1:\n            return DB_BAD_SCHEMA;\n        case 1:\n            /* only run conversion for other programs than reporting commands */\n            if (report_only) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Incompatible DB type for \"\n                           STRIPE_INFO_TABLE \".validator\");\n                return DB_BAD_SCHEMA;\n            }\n\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Detected type change for \"\n                       STRIPE_INFO_TABLE\n                       \".validator (<= 2.5.3): running conversion\");\n            /* run type conversion */\n            rc = convert_field_type(pconn, STRIPE_INFO_TABLE, \"validator\",\n                                    \"INT\");\n            if (rc)\n                return rc;\n            curr_field_index++;\n            break;\n        case 0:    /* OK */\n            break;\n        }\n        if (check_field_name\n            (\"stripe_count\", &curr_field_index, STRIPE_INFO_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name\n            (\"stripe_size\", &curr_field_index, STRIPE_INFO_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name\n            (\"pool_name\", &curr_field_index, STRIPE_INFO_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        /* is there any extra field ? */\n        if (has_extra_field\n            (curr_field_index, STRIPE_INFO_TABLE, fieldtab, true))\n            return DB_BAD_SCHEMA;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return rc;\n}\n\nstatic int create_table_stripe_info(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int rc;\n\n    request = g_string_new(NULL);\n    g_string_printf(request, \"CREATE TABLE \" STRIPE_INFO_TABLE\n                    \" (id \" PK_TYPE \" PRIMARY KEY, validator INT, \"\n                    \"stripe_count INT UNSIGNED, stripe_size INT UNSIGNED, \"\n                    \"pool_name VARBINARY(%u))\", MAX_POOL_LEN - 1);\n    append_engine(request);\n\n    rc = run_create_table(pconn, STRIPE_INFO_TABLE, request->str);\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_table_stripe_items(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n\n    rc = db_list_table_info(pconn, STRIPE_ITEMS_TABLE, fieldtab, NULL, NULL,\n                            MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n\n    if (rc == DB_SUCCESS) {\n        int curr_field_index = 0;\n\n        /* check index */\n        if (check_field_name\n            (\"id\", &curr_field_index, STRIPE_ITEMS_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name\n            (\"stripe_index\", &curr_field_index, STRIPE_ITEMS_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name\n            (\"ostidx\", &curr_field_index, STRIPE_ITEMS_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n        if (check_field_name\n            (\"details\", &curr_field_index, STRIPE_ITEMS_TABLE, fieldtab))\n            return DB_BAD_SCHEMA;\n\n        /* is there any extra field ? */\n        if (has_extra_field\n            (curr_field_index, STRIPE_ITEMS_TABLE, fieldtab, true))\n            return DB_BAD_SCHEMA;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return rc;\n}\n\nstatic int create_table_stripe_items(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int rc;\n\n    request = g_string_new(NULL);\n    g_string_printf(request, \"CREATE TABLE \" STRIPE_ITEMS_TABLE\n                    \" (id \" PK_TYPE \", stripe_index INT UNSIGNED, \"\n                    \"ostidx INT UNSIGNED, details BINARY(%u))\",\n                    STRIPE_DETAIL_SZ);\n    append_engine(request);\n\n    rc = run_create_table(pconn, STRIPE_ITEMS_TABLE, request->str);\n    g_string_free(request, TRUE);\n    if (rc)\n        return rc;\n\n    rc = run_create_index(pconn, STRIPE_ITEMS_TABLE, \"id\",\n                          \"CREATE INDEX id_index ON \" STRIPE_ITEMS_TABLE\n                          \"(id)\");\n    if (rc)\n        return rc;\n\n    rc = run_create_index(pconn, STRIPE_ITEMS_TABLE, \"ostidx\",\n                          \"CREATE INDEX ost_index ON \" STRIPE_ITEMS_TABLE\n                          \"(ostidx)\");\n    return rc;\n}\n#endif\n\nstatic void disable_acct(void)\n{\n    lmgr_config.acct = false;\n    /* reset acct masks */\n    acct_pk_attr_set = null_mask;\n    acct_attr_set = null_mask;\n}\n\nstatic int acct_drop_or_warn(db_conn_t *pconn, bool *affects_trig)\n{\n    char strbuf[4096];\n    int rc;\n\n    if (!alter_db) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"DB schema change detected: \"\n                   \"modification in \" ACCT_TABLE\n                   \" requires to drop and repopulate the table\"\n                   \" => Run 'robinhood --alter-db' to apply this change.\");\n        return DB_NEED_ALTER;\n    }\n    DisplayLog(LVL_CRIT, LISTMGR_TAG, \"DB schema change detected:\"\n               \" dropping and repopulating table \" ACCT_TABLE);\n    rc = db_drop_component(pconn, DBOBJ_TABLE, ACCT_TABLE);\n    if (rc == DB_SUCCESS || rc == DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Primary key of \" ACCT_TABLE\n                   \" table changed. Triggers will be updated.\");\n        *affects_trig = true;\n\n        /* always return DB_NOT_EXISTS to re-create the table */\n        return DB_NOT_EXISTS;\n    }\n\n    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n               \"Failed to drop table: Error: %s\",\n               db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    return rc;\n}\n\nstatic int check_table_acct(db_conn_t *pconn, bool *affects_trig)\n{\n    int i, rc;\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    char *typetab[MAX_DB_FIELDS];\n    char *defaulttab[MAX_DB_FIELDS];\n\n    rc = db_list_table_info(pconn, ACCT_TABLE, fieldtab, typetab, defaulttab,\n                            MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n    if (rc == DB_SUCCESS) {\n        int cookie;\n        int curr_field_index = 0;\n        const char *last = NULL;\n\n        /* When running daemon mode with accounting disabled: drop ACCT table,\n         * else it may become inconsistent. */\n        if (!lmgr_config.acct && !report_only) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Accounting is disabled: dropping table \" ACCT_TABLE);\n\n            rc = db_drop_component(pconn, DBOBJ_TABLE, ACCT_TABLE);\n            if (rc != DB_SUCCESS)\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Failed to drop table: Error: %s\",\n                           db_errmsg(pconn, strbuf, sizeof(strbuf)));\n\n            return rc;\n        }\n\n        /* check primary key */\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_acct_pk(i)) {\n                enum lmgr_init_flags save_flags = init_flags;\n\n                /* only shuffling is allowed in PK, no insert/drop */\n                /* => force no alter_db */\n                init_flags &= ~LIF_ALTER_DB;\n                /* Also, don't ask to run --alter-db */\n                init_flags |= LIF_ALTER_NODISP;\n                rc = check_and_fix_field(pconn, i, &curr_field_index, T_ACCT_PK,\n                                         fieldtab, typetab, defaulttab,\n                                         main_name_compat, &last, false);\n                init_flags = save_flags;\n                if (rc != 0 && rc != DB_NEED_ALTER)\n                    return rc;\n\n                if (rc == DB_NEED_ALTER)\n                    return acct_drop_or_warn(pconn, affects_trig);\n            }\n        }\n        /* check other fields */\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_acct_field(i)) {\n                rc = check_and_fix_field(pconn, i, &curr_field_index,\n                                         T_ACCT_VAL, fieldtab, typetab,\n                                         defaulttab, main_name_compat, &last,\n                                         false);\n                if (rc)\n                    return rc;\n            }\n        }\n        /* check count field */\n        if (check_field_name\n            (ACCT_FIELD_COUNT, &curr_field_index, ACCT_TABLE, fieldtab))\n            return acct_drop_or_warn(pconn, affects_trig);\n\n        /* check size range fields */\n        /* based on log2(size/32) => 0 1 32 1K 32K 1M 32M 1G 32G 1T */\n        for (i = 0; i < SZ_PROFIL_COUNT; i++) {\n            if (check_field_name\n                (sz_field[i], &curr_field_index, ACCT_TABLE, fieldtab))\n                return DB_BAD_SCHEMA;\n        }\n\n        rc = drop_extra_fields(pconn, curr_field_index, T_ACCT, fieldtab);\n        if (rc)\n            return rc;\n    } else if (rc == DB_NOT_EXISTS) {\n        if (report_only) {\n            /* report only: remember there is no ACCT table and don't warn */\n            DisplayLog(LVL_VERB, LISTMGR_TAG, \"Accounting stats not available\");\n            disable_acct();\n            return DB_SUCCESS;\n        }\n    } else {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return rc;\n}\n\nstatic int populate_acct_table(db_conn_t *pconn)\n{\n    int i, rc;\n    GString *request = NULL;\n    char err_buf[1024];\n    char timestr[256] = \"\";\n    char t[128];\n    time_t estimated;\n\n    if (acct_info_table == NULL)\n        RBH_BUG(\"Can't populate \" ACCT_TABLE \" with no source table\");\n\n    estimated = estimated_time(pconn, MAIN_TABLE, 25000);\n\n    if (estimated > 0)\n        snprintf(timestr, sizeof(timestr), \" (estim. duration: ~%s)\",\n                 FormatDurationFloat(t, sizeof(t), estimated));\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n               \"Populating accounting table from existing DB contents.\"\n               \" This can take a while...%s\", timestr);\n    FlushLogs();\n\n    /* Initial table population for already existing entries */\n    /* INSERT <fields>... */\n    request = g_string_new(\"INSERT INTO \" ACCT_TABLE \"(\");\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"\", \"\", 0);\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"\", \"\", AOF_LEADING_SEP);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT);\n    append_size_range_fields(request, true, \"\");\n\n    /* ...SELECT <fields>... */\n    g_string_append(request, \") SELECT \");\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"\", \"\", 0);\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"SUM(\", \")\",\n                       AOF_LEADING_SEP);\n    g_string_append(request, \",COUNT(id),SUM(size=0)\");\n    for (i = 1; i < SZ_PROFIL_COUNT - 1; i++)   /* 1 to 8 */\n        g_string_append_printf(request, \",SUM(\" SZRANGE_FUNC \"(size)=%u)\",\n                               i - 1);\n    g_string_append_printf(request, \",SUM(\" SZRANGE_FUNC \"(size)>=%u)\", i - 1);\n\n    /* FROM ... GROUP BY ... */\n    g_string_append_printf(request, \" FROM %s  GROUP BY \", acct_info_table);\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"\", \"\", 0);\n\n    rc = db_exec_sql(pconn, request->str, NULL);\n    g_string_free(request, TRUE);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to populate accounting table: Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n\n        /* drop this table, to leave the db in a consistent state\n         * (if ACCT_TABLE exists, it must be populated) */\n        if (db_drop_component(pconn, DBOBJ_TABLE, ACCT_TABLE))\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Failed to drop table: Error: %s\",\n                       db_errmsg(pconn, err_buf, sizeof(err_buf)));\n    }\n    return rc;\n}\n\nstatic int create_table_acct(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int i, rc, cookie;\n    bool first_acct_pk = true;\n    bool is_first_acct_field = true;\n\n    if (!lmgr_config.acct)\n        return DB_SUCCESS;\n\n    request = g_string_new(\"CREATE TABLE \" ACCT_TABLE \" (\");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_acct_pk(i)) {\n            append_field_def(pconn, i, request, is_first_acct_field);\n            is_first_acct_field = false;\n        }\n    }\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_acct_field(i))\n            append_field_def(pconn, i, request, is_first_acct_field);\n    }\n\n    /* count field */\n    g_string_append(request,\n                    \", \" ACCT_FIELD_COUNT \" BIGINT UNSIGNED DEFAULT 0\");\n\n    /* size range fields */\n    for (i = 0; i < SZ_PROFIL_COUNT; i++) {\n        g_string_append_printf(request, \", %s BIGINT UNSIGNED DEFAULT 0\",\n                               sz_field[i]);\n    }\n\n    /* PK definition */\n    g_string_append(request, \", PRIMARY KEY ( \");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_acct_pk(i)) {\n            if (!first_acct_pk)\n                g_string_append_printf(request, \", %s\", field_name(i));\n            else {\n                g_string_append_printf(request, \"%s\", field_name(i));\n                first_acct_pk = false;\n            }\n        }\n    }\n    g_string_append(request, \"))\");\n    append_engine(request);\n\n    rc = run_create_table(pconn, ACCT_TABLE, request->str);\n    if (rc)\n        goto free_str;\n\n    /* now populate it */\n    rc = populate_acct_table(pconn);\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_table_softrm(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc, cookie;\n    char strbuf[4096];\n    char *fieldtab[MAX_DB_FIELDS];\n    char *typetab[MAX_DB_FIELDS];\n    char *defaulttab[MAX_DB_FIELDS];\n    bool need_alter = false;\n\n    rc = db_list_table_info(pconn, SOFT_RM_TABLE, fieldtab, typetab, defaulttab,\n                            MAX_DB_FIELDS, strbuf, sizeof(strbuf));\n    if (rc == DB_SUCCESS) {\n        const char *last = NULL;\n        int curr_index = 0;\n        int i;\n\n        /* check primary key */\n        if (check_field_name(\"id\", &curr_index, SOFT_RM_TABLE, fieldtab)) {\n            /* check old name 'fid' */\n            if (!strcmp(\"fid\", fieldtab[0])) {\n                if (!alter_db) {\n                    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                               \"DB schema change detected: \"\n                               \"field '%s.%s' renamed to '%s.%s' \"\n                               \" => Run 'robinhood --alter-db' to apply this change.\",\n                               SOFT_RM_TABLE, \"fid\", SOFT_RM_TABLE, \"id\");\n                    need_alter = true;\n                } else {\n                    rc = change_id_field(pconn, SOFT_RM_TABLE, \"fid\", \"id\");\n                    if (rc)\n                        return rc;\n                    curr_index++;\n                }\n            } else\n                return DB_BAD_SCHEMA;\n        }\n        last = \"id\";\n\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (is_softrm_field(i)) {\n                rc = check_and_fix_field(pconn, i, &curr_index, T_SOFTRM,\n                                         fieldtab, typetab, defaulttab,\n                                         main_name_compat, &last, true);\n                if (rc == DB_NEED_ALTER)\n                    need_alter = true;\n                /* don't return immediately, to report about other fields */\n                else if (rc)\n                    return rc;\n            }\n        }\n\n        rc = drop_extra_fields(pconn, curr_index, T_SOFTRM, fieldtab);\n        if (rc)\n            return rc;\n    } else if (rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error checking database schema: %s\",\n                   db_errmsg(pconn, strbuf, sizeof(strbuf)));\n    }\n    return (rc == 0 && need_alter) ? DB_NEED_ALTER : rc;\n}\n\nstatic int create_table_softrm(db_conn_t *pconn, bool *affects_trig)\n{\n    GString *request;\n    int rc, i, cookie;\n\n    request =\n        g_string_new(\"CREATE TABLE \" SOFT_RM_TABLE \" (id \" PK_TYPE\n                     \" PRIMARY KEY\");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_softrm_field(i))\n            append_field_def(pconn, i, request, 0);\n    }\n    g_string_append(request, \")\");\n    append_engine(request);\n\n    rc = run_create_table(pconn, SOFT_RM_TABLE, request->str);\n    if (rc)\n        goto free_str;\n\n    /* create indexes on this table */\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_softrm_field(i) && is_indexed_field(i)) {\n            g_string_printf(request,\n                            \"CREATE INDEX %s_index ON \" SOFT_RM_TABLE \"(%s)\",\n                            field_name(i), field_name(i));\n\n            rc = run_create_index(pconn, SOFT_RM_TABLE, field_name(i),\n                                  request->str);\n            if (rc)\n                goto free_str;\n        }\n    }\n    rc = DB_SUCCESS;\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\n#define VERSION_VAR_FUNC    \"VersionFunctionSet\"\n#define VERSION_VAR_TRIG    \"VersionTriggerSet\"\n\n#define FUNCTIONSET_VERSION    \"1.6\"\n#define TRIGGERSET_VERSION     \"1.6\"\n\nstatic int check_functions_version(db_conn_t *conn)\n{\n    int rc;\n    char val[1024];\n\n    /* check the functions version */\n    rc = lmgr_get_var(conn, VERSION_VAR_FUNC, val, sizeof(val));\n    if (rc == DB_SUCCESS) {\n        if (strcmp(val, FUNCTIONSET_VERSION)) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Wrong functions version (in DB: %s, expected: %s). %s.\",\n                       val, FUNCTIONSET_VERSION,\n                       report_only ? \"Reports output might be incorrect\" :\n                       \"Existing functions will be dropped and re-created\");\n\n            return DB_BAD_SCHEMA;\n        } else {\n            DisplayLog(LVL_FULL, LISTMGR_TAG,\n                       \"Functions version is up-to-date (%s)\", val);\n            return DB_SUCCESS;\n        }\n    } else if (rc == DB_NOT_EXISTS) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"No function versioning (expected: %s). %s.\",\n                   FUNCTIONSET_VERSION,\n                   report_only ? \"Reports output might be incorrect\" :\n                   \"Existing functions will be dropped and re-created\");\n        return DB_BAD_SCHEMA;\n    } else {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error getting functions version: %s\",\n                   db_errmsg(conn, val, 1024));\n        return rc;\n    }\n}\n\nstatic int set_functions_version(db_conn_t *conn)\n{\n    /* set new functions version */\n    int rc = lmgr_set_var(conn, VERSION_VAR_FUNC, FUNCTIONSET_VERSION);\n    if (rc) {\n        char msgbuf[1024];\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Failed to set triggers version: %s\",\n                   db_errmsg(conn, msgbuf, sizeof(msgbuf)));\n    }\n    return rc;\n}\n\nstatic int check_triggers_version(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char val[1024];\n\n    /* no accounting or report_only: don't check triggers */\n    if (!lmgr_config.acct && !report_only) {\n        DisplayLog(LVL_VERB, LISTMGR_TAG,\n                   \"Accounting is disabled: all triggers will be dropped.\");\n        return DB_SUCCESS;\n    } else if (report_only)\n        return DB_SUCCESS;  /* don't care about triggers */\n\n    /* check the triggers version */\n    rc = lmgr_get_var(pconn, VERSION_VAR_TRIG, val, sizeof(val));\n    if (rc == DB_SUCCESS) {\n        if (strcmp(val, TRIGGERSET_VERSION)) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Wrong triggers version (in DB: %s, expected: %s). \"\n                       \"Existing triggers will be dropped and re-created.\", val,\n                       TRIGGERSET_VERSION);\n            return DB_BAD_SCHEMA;\n        } else {\n            DisplayLog(LVL_FULL, LISTMGR_TAG,\n                       \"Triggers version is up-to-date (%s)\", val);\n            return DB_SUCCESS;\n        }\n    } else if (rc == DB_NOT_EXISTS) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"No trigger versioning (expected: %s). \"\n                   \"Existing triggers will be dropped and re-created.\",\n                   TRIGGERSET_VERSION);\n        return DB_BAD_SCHEMA;\n    } else {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error getting trigger version: %s\",\n                   db_errmsg(pconn, val, sizeof(val)));\n        return rc;\n    }\n}\n\nstatic int set_triggers_version(db_conn_t *pconn, bool *affects_trig)\n{\n    /* set new triggers version */\n    int rc = lmgr_set_var(pconn, VERSION_VAR_TRIG, TRIGGERSET_VERSION);\n    if (rc) {\n        char msgbuf[1024];\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Failed to set triggers version: %s\",\n                   db_errmsg(pconn, msgbuf, sizeof(msgbuf)));\n    }\n    return rc;\n}\n\nstatic int check_trig_acct_insert(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char strbuf[4096];\n\n    if (!lmgr_config.acct) {\n        /* no acct: must delete trigger */\n        if (!report_only) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Dropping trigger %s\",\n                       ACCT_TRIGGER_INSERT);\n            rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_INSERT);\n            if (rc == DB_NOT_SUPPORTED) {\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                           \"Triggers are not supported with this database. \"\n                           \"Not a big issue (wanted to disable it)\");\n            } else if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Failed to drop \" ACCT_TRIGGER_INSERT\n                           \"trigger: Error: %s\", db_errmsg(pconn, strbuf,\n                                                           sizeof(strbuf)));\n                return rc;\n            }\n        }\n        return DB_SUCCESS;\n    }\n\n    return db_check_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_INSERT,\n                              acct_info_table);\n}\n\nstatic int check_trig_acct_delete(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char strbuf[4096];\n    if (!lmgr_config.acct) {\n        /* no acct: must delete trigger */\n        if (!report_only) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Dropping trigger %s\",\n                       ACCT_TRIGGER_DELETE);\n            rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_DELETE);\n            if (rc == DB_NOT_SUPPORTED) {\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                           \"Triggers are not supported with this database. \"\n                           \"Not a big issue (wanted to disable it)\");\n            } else if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Failed to drop \" ACCT_TRIGGER_DELETE\n                           \"trigger: Error: %s\", db_errmsg(pconn, strbuf,\n                                                           sizeof(strbuf)));\n                return rc;\n            }\n        }\n        return DB_SUCCESS;\n    }\n\n    return db_check_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_DELETE,\n                              acct_info_table);\n}\n\nstatic int check_trig_acct_update(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char strbuf[4096];\n    if (!lmgr_config.acct) {\n        /* no acct: must delete trigger */\n        if (!report_only) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Dropping trigger %s\",\n                       ACCT_TRIGGER_UPDATE);\n            rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_UPDATE);\n            if (rc == DB_NOT_SUPPORTED) {\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                           \"Triggers are not supported with this database. \"\n                           \"Not a big issue (wanted to disable it)\");\n            } else if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Failed to drop \" ACCT_TRIGGER_UPDATE\n                           \"trigger: Error: %s\", db_errmsg(pconn, strbuf,\n                                                           sizeof(strbuf)));\n                return rc;\n            }\n        }\n        return DB_SUCCESS;\n    }\n\n    return db_check_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_UPDATE,\n                              acct_info_table);\n}\n\nstatic int create_trig_acct_insert(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    GString *request;\n    char errbuf[1024];\n\n    /* Trigger on insert */\n    request = g_string_new(\"DECLARE val INT;\"\n                           \"SET val=\" SZRANGE_FUNC \"(NEW.size);\"\n                           \"INSERT INTO \" ACCT_TABLE \"(\");\n    /* INSERT(list of fields... */\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"\", \"\", 0);\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"\", \"\", AOF_LEADING_SEP);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT);\n    append_size_range_fields(request, true, \"\");\n\n    /* ... ) VALUES (... */\n    g_string_append(request, \") VALUES (\");\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"NEW.\", \"\", 0);\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"NEW.\", \"\",\n                       AOF_LEADING_SEP);\n    g_string_append(request, \",1\");\n    append_size_range_val(request, true, \"NEW.\", \"val\");\n    g_string_append(request, \") ON DUPLICATE KEY UPDATE \");\n\n    /* on duplicate key update... */\n    attrmask2fieldoperation(request, acct_attr_set, T_ACCT, \"NEW.\", OT_ADD);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT \"=\" ACCT_FIELD_COUNT \"+1\");\n    append_size_range_op(request, true, \"NEW.\", \"val\", OT_ADD);\n    g_string_append(request, \";\");\n\n    rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_INSERT);\n    if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop \" ACCT_TRIGGER_INSERT \" trigger: Error: %s\",\n                   db_errmsg(pconn, errbuf, sizeof(errbuf)));\n        goto free_str;\n    }\n\n    rc = db_create_trigger(pconn, ACCT_TRIGGER_INSERT, \"AFTER INSERT\",\n                           acct_info_table, request->str);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create \" ACCT_TRIGGER_INSERT\n                   \" trigger: Error: %s\", db_errmsg(pconn, errbuf,\n                                                    sizeof(errbuf)));\n        goto free_str;\n    }\n    rc = DB_SUCCESS;\n    DisplayLog(LVL_VERB, LISTMGR_TAG,\n               \"Trigger \" ACCT_TRIGGER_INSERT \" created successfully\");\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int create_trig_acct_delete(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    GString *request;\n    char err_buf[1024];\n\n    /* Trigger on delete */\n    request = g_string_new(\"DECLARE val INT;\"\n                           \"SET val=\" SZRANGE_FUNC \"(OLD.size);\"\n                           \"UPDATE \" ACCT_TABLE \" SET \");\n    /* update ACCT_TABLE SET ... */\n    attrmask2fieldoperation(request, acct_attr_set, T_ACCT, \"OLD.\", OT_SUBTRACT);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT \"=\" ACCT_FIELD_COUNT \"-1\");\n    append_size_range_op(request, true, \"OLD.\", \"val\", OT_SUBTRACT);\n\n    /* ... WHERE ... */\n    g_string_append(request, \" WHERE \");\n    attrmask2fieldcomparison(request, acct_pk_attr_set, T_ACCT, \"\", \"OLD.\", \"=\",\n                             \"AND\");\n    g_string_append(request, \";\");\n\n    rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_DELETE);\n    if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop \" ACCT_TRIGGER_DELETE \" trigger: Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n        goto free_str;\n    }\n\n    rc = db_create_trigger(pconn, ACCT_TRIGGER_DELETE, \"BEFORE DELETE\",\n                           acct_info_table, request->str);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create \" ACCT_TRIGGER_DELETE\n                   \" trigger: Error: %s\", db_errmsg(pconn, err_buf,\n                                                    sizeof(err_buf)));\n        goto free_str;\n    }\n    DisplayLog(LVL_VERB, LISTMGR_TAG,\n               \"Trigger \" ACCT_TRIGGER_DELETE \" created successfully\");\n    rc = DB_SUCCESS;\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int create_trig_acct_update(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc, i, cookie;\n    bool is_first_field = true;\n    GString *request;\n    char err_buf[1024];\n\n    /* Trigger on update */\n\n    /* In case the owner or group changes, we\n     * must subtract old information in previous raw\n     * and add new information to the new raw.\n     */\n    /* Simple case: owner and group are still the same */\n    request = g_string_new(\"DECLARE val_old,val_new INT;\"\n                           \"SET val_old=\" SZRANGE_FUNC \"(OLD.size);\"\n                           \"SET val_new=\" SZRANGE_FUNC \"(NEW.size);\" \"IF \");\n    /* generate comparison like NEW.uid=OLD.uid AND NEW.gid=OLD.gid */\n    attrmask2fieldcomparison(request, acct_pk_attr_set, T_ACCT, \"NEW.\", \"OLD.\",\n                             \"=\", \"AND\");\n    g_string_append(request, \"THEN \\n\\t IF \");\n    /* if one of the attribute value has changed: update the acct table */\n    /* generate comparison like NEW.size<>=OLD.size OR NEW.blocks<>OLD.blocks */\n    attrmask2fieldcomparison(request, acct_attr_set, T_ACCT, \"NEW.\", \"OLD.\",\n                             \"<>\", \"OR\");\n    g_string_append(request, \"THEN \\n\\t\\t UPDATE \" ACCT_TABLE \" SET \");\n\n    cookie = -1;\n    while ((i = attr_index_iter(0, &cookie)) != -1) {\n        if (is_acct_field(i)) {\n            if (!is_first_field)\n                g_string_append_printf(request,\n                                       \",%s=CAST(%s as SIGNED)+CAST(NEW.%s as SIGNED)-CAST(OLD.%s as SIGNED)\",\n                                       field_name(i), field_name(i),\n                                       field_name(i), field_name(i));\n            else {\n                g_string_append_printf(request,\n                                       \"%s=CAST(%s as SIGNED)+CAST(NEW.%s as SIGNED)-CAST(OLD.%s as SIGNED)\",\n                                       field_name(i), field_name(i),\n                                       field_name(i), field_name(i));\n                is_first_field = false;\n            }\n        }\n    }\n\n    /* update size range values */\n    g_string_append_printf(request,\n                           \"%s%s=CAST(%s as SIGNED)-CAST((OLD.size=0) as SIGNED)+CAST((NEW.size=0) as SIGNED)\",\n                           is_first_field ? \"\" : \",\", sz_field[0], sz_field[0]);\n    is_first_field = false;\n    for (i = 1; i < SZ_PROFIL_COUNT - 1; i++) { /* 2nd to before the last */\n        g_string_append_printf(request,\n                               \",%s=CAST(%s as SIGNED)-CAST((val_old=%u) as SIGNED)+CAST((val_new=%u) as SIGNED)\",\n                               sz_field[i], sz_field[i], i - 1, i - 1);\n    }\n    /* last */\n    g_string_append_printf(request,\n                           \",%s=CAST(%s as SIGNED)-CAST((val_old>=%u) as SIGNED)+CAST((val_new>=%u) as SIGNED)\",\n                           sz_field[i], sz_field[i], i - 1, i - 1);\n    g_string_append(request, \" WHERE \");\n    /* generate comparison as follows: owner=NEW.uid AND gid=NEW.gid */\n    attrmask2fieldcomparison(request, acct_pk_attr_set, T_ACCT, \"\", \"NEW.\", \"=\",\n                             \"AND\");\n    g_string_append(request, \"; \\n\\t END IF; \\nELSEIF \");\n\n    /* tricky case: owner and/or group changed */\n\n    attrmask2fieldcomparison(request, acct_pk_attr_set, T_ACCT, \"NEW.\", \"OLD.\",\n                             \"<>\", \"OR\");\n    g_string_append(request, \"THEN \\n\\tINSERT INTO \" ACCT_TABLE \"(\");\n    /* generate fields as follows: owner, gid */\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"\", \"\", 0);\n    /* generate fields as follows: , size, blocks */\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"\", \"\", AOF_LEADING_SEP);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT);\n    append_size_range_fields(request, true, \"\");\n    g_string_append(request, \") VALUES (\");\n    /* generate fields as follows: NEW.uid, NEW.gid */\n    attrmask2fieldlist(request, acct_pk_attr_set, T_ACCT, \"NEW.\", \"\", 0);\n    attrmask2fieldlist(request, acct_attr_set, T_ACCT, \"NEW.\", \"\",\n                       AOF_LEADING_SEP);\n    g_string_append(request, \",1\");\n    append_size_range_val(request, true, \"NEW.\", \"val_new\");\n\n    g_string_append(request, \") \\n\\tON DUPLICATE KEY UPDATE \");\n    /* generate operations as follows:\n     * size=size+New.size, blocks=blocks+NEW.blocks */\n    attrmask2fieldoperation(request, acct_attr_set, T_ACCT, \"NEW.\", OT_ADD);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT \"=\" ACCT_FIELD_COUNT \"+1\");\n    /* update size range values */\n    append_size_range_op(request, true, \"NEW.\", \"val_new\", OT_ADD);\n    g_string_append(request, \";\\n\" \"\\tUPDATE \" ACCT_TABLE \" SET \");\n\n    /* generate operations as follows:\n     * size=size-Old.size, blocks=blocks-Old.blocks */\n    attrmask2fieldoperation(request, acct_attr_set, T_ACCT, \"OLD.\", OT_SUBTRACT);\n    g_string_append(request, \", \" ACCT_FIELD_COUNT \"=\" ACCT_FIELD_COUNT \"-1 \");\n    append_size_range_op(request, true, \"OLD.\", \"val_old\", OT_SUBTRACT);\n    g_string_append(request, \" WHERE \");\n    attrmask2fieldcomparison(request, acct_pk_attr_set, T_ACCT, \"\", \"OLD.\", \"=\",\n                             \"AND\");\n    g_string_append(request, \";\\nEND IF;\\n\");\n\n    rc = db_drop_component(pconn, DBOBJ_TRIGGER, ACCT_TRIGGER_UPDATE);\n    if (rc != DB_SUCCESS && rc != DB_TRG_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop \" ACCT_TRIGGER_UPDATE \" trigger: Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n        goto free_str;\n    }\n\n    rc = db_create_trigger(pconn, ACCT_TRIGGER_UPDATE, \"AFTER UPDATE\",\n                           acct_info_table, request->str);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create \" ACCT_TRIGGER_UPDATE\n                   \" trigger: Error: %s\", db_errmsg(pconn, err_buf,\n                                                    sizeof(err_buf)));\n        goto free_str;\n    }\n    DisplayLog(LVL_VERB, LISTMGR_TAG,\n               \"Trigger \" ACCT_TRIGGER_UPDATE \" created successfully\");\n    rc = DB_SUCCESS;\n\n free_str:\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_func_szrange(db_conn_t *pconn, bool *affects_trig)\n{\n    /* XXX /!\\ do not modify the code of DB functions\n     * without changing FUNCTIONSET_VERSION!!!!\n     */\n    return db_check_component(pconn, DBOBJ_FUNCTION, SZRANGE_FUNC, NULL);\n}\n\nstatic int create_func_szrange(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    char err_buf[1024];\n    /* XXX /!\\ do not modify the code of DB functions\n     * without changing FUNCTIONSET_VERSION!!!!\n     */\n    const char *request = \"CREATE FUNCTION \" SZRANGE_FUNC \"(sz BIGINT UNSIGNED)\"\n        \" RETURNS INT DETERMINISTIC\"\n        \" BEGIN\" \"     RETURN IF(sz=0,-1,FLOOR(LOG2(sz)/5));\" \" END\";\n\n    rc = db_drop_component(pconn, DBOBJ_FUNCTION, SZRANGE_FUNC);\n    if (rc != DB_SUCCESS && rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop function '\" SZRANGE_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n        return rc;\n    }\n    rc = db_exec_sql(pconn, request, NULL);\n    if (rc)\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create function '\" SZRANGE_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n\n    return rc;\n}\n\nstatic int check_func_onepath(db_conn_t *pconn, bool *affects_trig)\n{\n    /* XXX /!\\ do not modify the code of DB functions\n     * without changing FUNCTIONSET_VERSION!!!!\n     */\n    return db_check_component(pconn, DBOBJ_FUNCTION, ONE_PATH_FUNC, NULL);\n}\n\nstatic int create_func_onepath(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    GString *request;\n    char err_buf[1024];\n\n    /* XXX /!\\ do not modify the code of DB functions\n     * without changing FUNCTIONSET_VERSION!!!!\n     */\n    rc = db_drop_component(pconn, DBOBJ_FUNCTION, ONE_PATH_FUNC);\n    if (rc != DB_SUCCESS && rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop function '\" ONE_PATH_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n        return rc;\n    }\n    /* creating function to get one path for a file */\n    /* Note: use \"DETERMINISTIC\" assuming that it returns the same path for\n     * the same id in a given request */\n    request = g_string_new(NULL);\n    g_string_printf(request,\n                    \"CREATE FUNCTION \" ONE_PATH_FUNC \"(param \" PK_TYPE \")\"\n                    \" RETURNS VARBINARY(%u) DETERMINISTIC READS SQL DATA\"\n                    \" BEGIN\" \" DECLARE p VARBINARY(%u) DEFAULT NULL;\"\n                    \" DECLARE pid \" PK_TYPE \" DEFAULT NULL;\"\n                    \" DECLARE n VARBINARY(%u) DEFAULT NULL;\"\n                    /* returns path when parent is not found\n                       (NULL if id is not found) */\n                    \" DECLARE EXIT HANDLER FOR NOT FOUND RETURN CONCAT(pid,'/',p);\"\n                    \" SELECT parent_id, name INTO pid, p from NAMES WHERE id=param\"\n                    /* limit result to 1 path only */\n                    \" LIMIT 1;\"\n                    \" LOOP\"\n                    \" SELECT parent_id, name INTO pid, n from NAMES WHERE id=pid\"\n                    \" LIMIT 1;\"\n                    \" SELECT CONCAT( n, '/', p) INTO p;\" \" END LOOP;\" \" END\",\n                    /* size of fullpath */\n                    field_infos[ATTR_INDEX_fullpath].db_type_size,\n                    /* size of fullpath */\n                    field_infos[ATTR_INDEX_fullpath].db_type_size,\n                    /* size of name */\n                    field_infos[ATTR_INDEX_name].db_type_size);\n\n    rc = db_exec_sql(pconn, request->str, NULL);\n    if (rc)\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create function '\" ONE_PATH_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n\n    g_string_free(request, TRUE);\n    return rc;\n}\n\nstatic int check_func_thispath(db_conn_t *pconn, bool *affects_trig)\n{\n    return db_check_component(pconn, DBOBJ_FUNCTION, THIS_PATH_FUNC, NULL);\n}\n\nstatic int create_func_thispath(db_conn_t *pconn, bool *affects_trig)\n{\n    int rc;\n    GString *request;\n    char err_buf[1024];\n    /* XXX /!\\ do not modify the code of DB functions\n     * without changing FUNCTIONSET_VERSION!!!!\n     */\n\n    /* drop previous versions of the function */\n    rc = db_drop_component(pconn, DBOBJ_FUNCTION, THIS_PATH_FUNC);\n    if (rc != DB_SUCCESS && rc != DB_NOT_EXISTS) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to drop function '\" THIS_PATH_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n        return rc;\n    }\n\n    /* creating function to get a path for a file, for the given parent\n     * and name  */\n    /* Note: use \"DETERMINISTIC\" assuming that it returns the same path for\n     * the same parent+name in a given request */\n    request = g_string_new(NULL);\n    g_string_printf(request,\n                \"CREATE FUNCTION \" THIS_PATH_FUNC \"(pid_arg \" PK_TYPE\n                \", n_arg VARBINARY(%u))\"\n                \" RETURNS VARBINARY(%u) DETERMINISTIC READS SQL DATA\"\n                \" BEGIN\" \" DECLARE p VARBINARY(%u) DEFAULT NULL;\"\n                \" DECLARE pid \" PK_TYPE \" DEFAULT NULL;\"\n                \" DECLARE n VARBINARY(%u) DEFAULT NULL;\"\n                /* Returns path when parent is not found (NULL if id is\n                 * not found) */\n                \" DECLARE EXIT HANDLER FOR NOT FOUND RETURN CONCAT(pid,'/',p);\"\n                \" SET pid=pid_arg;\"\n                \" SET p=n_arg;\"\n                \" LOOP\"\n                \" SELECT parent_id, name INTO pid, n from NAMES WHERE id=pid\"\n                /* limit result to 1 path only */\n                \" LIMIT 1;\"\n                \" SELECT CONCAT( n, '/', p) INTO p;\" \" END LOOP;\" \" END\",\n                /* size of name */\n                field_infos[ATTR_INDEX_name].db_type_size,\n                /* size of fullpath */\n                field_infos[ATTR_INDEX_fullpath].db_type_size,\n                /* size of fullpath */\n                field_infos[ATTR_INDEX_fullpath].db_type_size,\n                /* size of name */\n                field_infos[ATTR_INDEX_name].db_type_size);\n\n    rc = db_exec_sql(pconn, request->str, NULL);\n    if (rc)\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to create function '\" THIS_PATH_FUNC \"': Error: %s\",\n                   db_errmsg(pconn, err_buf, sizeof(err_buf)));\n\n    g_string_free(request, TRUE);\n    return rc;\n}\n\ntypedef struct dbobj_descr {\n    db_object_e o_type;\n    const char *o_name;\n    check_create_tab_func_t o_check;\n    check_create_tab_func_t o_create;\n} dbobj_descr_t;\n\n/** list of tables and their check/create functions. */\nstatic const dbobj_descr_t o_list[] = {\n    /* tables */\n    {DBOBJ_TABLE, VAR_TABLE, check_table_vars, create_table_vars},\n    {DBOBJ_TABLE, MAIN_TABLE, check_table_main, create_table_main},\n    {DBOBJ_TABLE, DNAMES_TABLE, check_table_dnames, create_table_dnames},\n    {DBOBJ_TABLE, ANNEX_TABLE, check_table_annex, create_table_annex},\n\n    /* this function is needed to populate acct table (and for triggers) */\n    {DBOBJ_FUNCTION, SZRANGE_FUNC, check_func_szrange, create_func_szrange},\n\n    {DBOBJ_TABLE, ACCT_TABLE, check_table_acct, create_table_acct},\n#ifdef _LUSTRE\n    {DBOBJ_TABLE, STRIPE_INFO_TABLE, check_table_stripe_info,\n     create_table_stripe_info},\n    {DBOBJ_TABLE, STRIPE_ITEMS_TABLE, check_table_stripe_items,\n     create_table_stripe_items},\n#endif\n    {DBOBJ_TABLE, SOFT_RM_TABLE, check_table_softrm, create_table_softrm},\n\n    /* triggers */\n    {DBOBJ_TRIGGER, ACCT_TRIGGER_INSERT, check_trig_acct_insert,\n     create_trig_acct_insert},\n    {DBOBJ_TRIGGER, ACCT_TRIGGER_DELETE, check_trig_acct_delete,\n     create_trig_acct_delete},\n    {DBOBJ_TRIGGER, ACCT_TRIGGER_UPDATE, check_trig_acct_update,\n     create_trig_acct_update},\n\n    /* other functions */\n    {DBOBJ_FUNCTION, ONE_PATH_FUNC, check_func_onepath, create_func_onepath},\n    {DBOBJ_FUNCTION, THIS_PATH_FUNC, check_func_thispath, create_func_thispath},\n\n    {0, NULL, NULL, NULL}   /* STOP item */\n};\n\n/**\n * Initialize the database access module and\n * check and create the schema.\n */\nint ListMgr_Init(enum lmgr_init_flags flags)\n{\n    int rc;\n    db_conn_t conn;\n    const dbobj_descr_t *o;\n    bool create_all_functions = false;\n    bool create_all_triggers = false;\n    bool dummy;\n\n    /* store the parameter as a global variable */\n    init_flags = flags;\n\n    /* initialize attr masks for each table */\n    init_attrset_masks(&lmgr_config);\n\n    init_default_field_values();\n\n    /* determine source tables for accounting */\n    acct_info_table = acct_table();\n\n    /* create a database access */\n    rc = db_connect(&conn);\n    if (rc)\n        return rc;\n\n    /* check if tables exist, and check their schema */\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Checking database schema\");\n\n    /* check function and trigger version: if wrong, drop and re-create\n     * them all */\n    if (check_functions_version(&conn) != DB_SUCCESS)\n        create_all_functions = true;\n    if (check_triggers_version(&conn, &dummy) != DB_SUCCESS)\n        create_all_triggers = true;\n\n    for (o = o_list; o->o_name != NULL; o++) {\n        /* don't care about triggers for report-only */\n        if (report_only && (o->o_type == DBOBJ_TRIGGER))\n            continue;\n\n        /* force re-creating triggers and functions, if needed */\n        if ((o->o_type == DBOBJ_TRIGGER) && create_all_triggers)\n            rc = DB_NOT_EXISTS;\n        else if ((o->o_type == DBOBJ_FUNCTION) && create_all_functions)\n            rc = DB_NOT_EXISTS;\n        else\n            rc = o->o_check(&conn, &create_all_triggers);\n\n        switch (rc) {\n        case DB_SUCCESS:   /* OK */\n            break;\n        case DB_NOT_EXISTS:\n            if (report_only)\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"WARNING: %s %s\"\n                           \" does not exist\", dbobj2str(o->o_type), o->o_name);\n            else {\n                DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                           \"%s %s does not exist (or wrong version):\"\n                           \" creating it.\", dbobj2str(o->o_type), o->o_name);\n                rc = o->o_create(&conn, &create_all_triggers);\n                if (rc != DB_SUCCESS)\n                    goto close_conn;\n            }\n            break;\n\n        case DB_NEED_ALTER:\n            if (report_only)\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                           \"WARNING: ALTER required on %s %s\",\n                           dbobj2str(o->o_type), o->o_name);\n            else\n                goto close_conn;\n            break;\n\n        default:   /* error */\n            goto close_conn;\n        }\n    }\n\n    if (create_all_triggers && !report_only) {\n        rc = set_triggers_version(&conn, &dummy);\n        if (rc)\n            goto close_conn;\n    }\n\n    if (create_all_functions && !report_only) {\n        rc = set_functions_version(&conn);\n        if (rc)\n            goto close_conn;\n    }\n\n    rc = DB_SUCCESS;\n\n close_conn:\n    /* close the connection in any case */\n    db_close_conn(&conn);\n    return rc;\n}   /* ListMgr_Init */\n\nint ListMgr_InitAccess(lmgr_t *p_mgr)\n{\n    int rc, i;\n\n    rc = db_connect(&p_mgr->conn);\n\n    if (rc)\n        return rc;\n\n    /* set READ COMMITTED isolation level to avoid locking issues and\n     * performance drop. */\n    rc = db_transaction_level(&p_mgr->conn, TRANS_SESSION, TXL_READ_COMMITTED);\n    if (rc) {\n        char errmsg_buf[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Failed to set READ_COMMITTED isolation level: Error: %s\",\n                   db_errmsg(&p_mgr->conn, errmsg_buf, sizeof(errmsg_buf)));\n        return rc;\n    }\n\n    p_mgr->last_commit = 0;\n    p_mgr->force_commit = false;\n    p_mgr->retry_delay = 0;\n    p_mgr->retry_count = 0;\n    timerclear(&p_mgr->first_error);\n\n    for (i = 0; i < OPCOUNT; i++)\n        p_mgr->nbop[i] = 0;\n\n    return 0;\n}\n\nint ListMgr_CloseAccess(lmgr_t *p_mgr)\n{\n    int rc;\n\n    /* force to commit queued requests */\n    rc = lmgr_flush_commit(p_mgr);\n\n    /* close connexion */\n    db_close_conn(&p_mgr->conn);\n\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_insert.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\nstatic void no_name_warning(const PK_PARG_T pk, const attr_set_t *p_attrs,\n                            unsigned int count)\n{\n    DEF_PK(ppk);\n    char msg[256];\n\n    entry_id2pk(&ATTR(p_attrs, parent_id), PTR_PK(ppk));\n\n    if (count > 1)\n        snprintf(msg, sizeof(msg), \"%u entries\", count);\n    else\n        rh_strncpy(msg, \"entry\", sizeof(msg));\n\n    DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"WARNING: %s created without\"\n               \" name or parent information: pk=\"DPK\", name='%s', parent='%s'\",\n               msg, pk, ATTR_MASK_TEST(p_attrs, name) ? ATTR(p_attrs, name) : \"\",\n               ATTR_MASK_TEST(p_attrs, parent_id) ? ppk : \"\");\n}\n\n\n/**\n * Check if a given entry must be inserted to a given table.\n * May display a warning in some cases.\n * @retval false if the entry must be skipped for the given table.\n * @retval true  if the entry must be inserted to the given table.\n */\nstatic bool entry_filter(table_enum table, bool update,\n                         const PK_PARG_T pk, const attr_set_t *p_attrs)\n{\n    switch(table)\n    {\n        case T_MAIN:\n            /* don't set this entry if no attribute is in the table */\n            if (!main_fields(p_attrs->attr_mask))\n                return false;\n            return true;\n        case T_DNAMES:\n            /* don't set this entry if parent or name is missing */\n            if (!ATTR_MASK_TEST(p_attrs, name) || !ATTR_MASK_TEST(p_attrs, parent_id))\n            {\n                /* warn for create operations without name information */\n                if (!update)\n                    no_name_warning(pk, p_attrs, 1);\n                return false;\n            }\n            return true;\n        case T_ANNEX:\n            if (!annex_fields(p_attrs->attr_mask))\n                return false;\n            return true;\n        default:\n            return true;\n    }\n}\n\n/**\n * Build and execute a batch insert request for the given table.\n * @param full_mask     the sum of all entries attribute masks\n * @param match_mask    insert an entry in the table if it matches attrs in\n *                      this mask.\n * @param mandatory_mask don't insert an entry in the table if it misses an\n *                       attribute in this mask.\n */\nstatic int run_batch_insert(lmgr_t *p_mgr,\n                            attr_mask_t full_mask,\n                            pktype *const pklist,\n                            attr_set_t **p_attrs, unsigned int count,\n                            table_enum table,\n                            bool update, bool id_is_pk,\n                            const char* extra_field_name,\n                            const char* extra_field_value)\n{\n    GString    *req = NULL;\n    int         rc = DB_SUCCESS;\n    int         i;\n    bool        first;\n\n    if (unlikely(extra_field_name != NULL && extra_field_value == NULL))\n        return DB_INVALID_ARG;\n\n    /* build batch request for the table */\n    req = g_string_new(\"INSERT INTO \");\n    g_string_append_printf(req, \"%s(id\", table2name(table));\n\n    /* do nothing if no field is to be set */\n    if ((attrmask2fieldlist(req, full_mask, table, \"\", \"\", AOF_LEADING_SEP) <= 0)\n        && (extra_field_name == NULL))\n        goto free_str;\n\n    if (extra_field_name != NULL)\n        g_string_append_printf(req,\",%s) VALUES \", extra_field_name);\n    else\n        g_string_append(req, \") VALUES \");\n\n    first = true;\n    /* append \",(id,values)\" to the query */\n    for (i = 0; i < count; i++)\n    {\n        if (!entry_filter(table, update, pklist[i], p_attrs[i]))\n            continue;\n\n        g_string_append_printf(req, \"%s(\"DPK, first ? \"\" : \",\", pklist[i]);\n        attrset2valuelist(p_mgr, req, p_attrs[i], table, AOF_LEADING_SEP);\n\n        if (extra_field_value != NULL)\n            g_string_append_printf(req,\",%s)\", extra_field_value);\n        else\n            g_string_append(req,\")\");\n        first = false;\n    }\n\n    if (update)\n    {\n        /* fake attribute struct, to write \"field=VALUES(field)\"\n         * based on full_mask attr mask */\n        attr_set_t  fake_attrs = *(p_attrs[0]);\n\n        g_string_append(req, \" ON DUPLICATE KEY UPDATE \");\n        /* explicitely update the id if it is not part of the pk */\n        if (!id_is_pk)\n            g_string_append(req, \"id=VALUES(id),\");\n\n        /* append x=VALUES(x) for all values */\n        fake_attrs.attr_mask = full_mask;\n        attrset2updatelist(p_mgr, req, &fake_attrs, table, AOF_GENERIC_VAL);\n    }\n\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n\nfree_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\nint listmgr_batch_insert_no_tx(lmgr_t * p_mgr, entry_id_t **p_ids,\n                               attr_set_t **p_attrs,\n                               unsigned int count,\n                               bool update_if_exists)\n{\n    int            rc = 0;\n    int            i;\n    attr_mask_t       full_mask;\n    attr_mask_t       all_bits_on = {.std = ~0, .status = ~0, .sm_info = ~0LL};\n    pktype        *pklist = NULL;\n\n    full_mask = sum_masks(p_attrs, count, all_bits_on);\n    pklist = (pktype *)MemCalloc(count, sizeof(pktype));\n    if (pklist == NULL)\n        return DB_NO_MEMORY;\n\n    for (i = 0; i < count; i++)\n    {\n        /* check attr mask */\n        if (!lmgr_batch_compat(full_mask, p_attrs[i]->attr_mask))\n        {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Incompatible attr mask \"\n                       \"in batched operation: \"DMASK\" vs. \"DMASK,\n                       PMASK(&p_attrs[i]->attr_mask), PMASK(&full_mask));\n            rc = DB_INVALID_ARG;\n            goto out_free;\n        }\n        /* fill pk array */\n        entry_id2pk(p_ids[i], PTR_PK(pklist[i])); /* The same for all tables? */\n    }\n\n    rc = run_batch_insert(p_mgr, full_mask, pklist, p_attrs,\n                          count, T_MAIN, update_if_exists,\n                          true, NULL, NULL);\n    if (rc)\n        goto out_free;\n\n    /* allow inserting entries in MAIN_TABLE, without name information */\n\n    /* both parent and name are defined */\n    if (attr_mask_test_index(&full_mask, ATTR_INDEX_name)\n        &&  attr_mask_test_index(&full_mask, ATTR_INDEX_parent_id))\n    {\n        rc = run_batch_insert(p_mgr, full_mask, pklist, p_attrs, count,\n                              T_DNAMES, true, false, \"pkn\", HNAME_DEF);\n        if (rc)\n            goto out_free;\n    }\n    else if (!update_if_exists) /* warn for create operations without name information */\n    {\n        /* if we get here, the name information is missing in all fields.\n         * use entry[0] as example for the warning message. */\n        no_name_warning(pklist[0], p_attrs[0], count);\n    }\n\n    /* insert info in annex table */\n    /* Always update as having the entry in the main table\n     * is the reference to know if we knew the entry */\n\n    /* append \"on duplicate key ...\" */\n    rc = run_batch_insert(p_mgr, full_mask, pklist, p_attrs, count, T_ANNEX,\n                          true, true, NULL, NULL);\n    if (rc)\n        goto out_free;\n\n#ifdef _LUSTRE\n    /* batch insert of striping info */\n    if (stripe_fields(full_mask))\n    {\n        /* create validator list */\n        int *validators = (int*)MemCalloc(count, sizeof(int));\n        if (!validators)\n        {\n            rc =  DB_NO_MEMORY;\n            goto out_free;\n        }\n        for (i = 0; i < count; i++)\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n            validators[i] = ATTR_MASK_TEST(p_attrs[i], stripe_info)?\n                                ATTR(p_attrs[i],stripe_info).validator:VALID_NOSTRIPE;\n#else\n            validators[i] = VALID(p_ids[i]);\n#endif\n\n        rc = batch_insert_stripe_info(p_mgr, pklist, validators, p_attrs,\n                                      count, true);\n        MemFree(validators);\n        if (rc)\n            goto out_free;\n    }\n#endif\n\nout_free:\n    MemFree(pklist);\n    return rc;\n}\n\n\nint ListMgr_Insert(lmgr_t *p_mgr, entry_id_t *p_id, attr_set_t *p_info,\n                   bool update_if_exists)\n{\n    int rc;\n    char err_buff[4096];\n    int retry_status;\n\n    /* retry the whole transaction when the error is retryable */\nretry:\n    rc = lmgr_begin(p_mgr);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2)\n        return DB_RBH_SIG_SHUTDOWN;\n    else if (rc)\n        return rc;\n\n    rc = listmgr_batch_insert_no_tx(p_mgr, &p_id, &p_info, 1, update_if_exists);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (rc || retry_status == 2)\n    {\n        lmgr_rollback(p_mgr);\n        if (retry_status == 2)\n            rc = DB_RBH_SIG_SHUTDOWN;\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"DB query failed in %s line %d: code=%d: %s\",\n                   __FUNCTION__, __LINE__, rc,\n                   db_errmsg(&p_mgr->conn, err_buff, sizeof(err_buff)));\n        return rc;\n    }\n    rc = lmgr_commit(p_mgr);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n\n    /* success, count it */\n    if (!rc)\n        p_mgr->nbop[OPIDX_INSERT]++;\n    return rc;\n}\n\n/**\n * Insert a batch of entries into the database.\n * All entries must have the same attr mask.\n */\nint            ListMgr_BatchInsert(lmgr_t * p_mgr, entry_id_t ** p_ids,\n                                   attr_set_t ** p_attrs,\n                                   unsigned int count,\n                                   bool update_if_exists)\n{\n    int rc;\n    int retry_status;\n    char err_buff[4096];\n\n    if (count == 0)\n        return DB_SUCCESS;\n    else if (p_ids == NULL || p_attrs == NULL)\n        RBH_BUG(\"NULL pointer argument\");\n\n    /* read only fields in info mask? */\n    if (readonly_fields(p_attrs[0]->attr_mask))\n    {\n        attr_mask_t and = attr_mask_and(&p_attrs[0]->attr_mask, &readonly_attr_set);\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Error: trying to insert read only values: attr_mask=\"\n                    DMASK, PMASK(&and));\n        return DB_INVALID_ARG;\n    }\n\n    /* retry the whole transaction when the error is retryable */\nretry:\n    /* We want insert operation set to be atomic */\n    rc = lmgr_begin(p_mgr);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2)\n        return DB_RBH_SIG_SHUTDOWN;\n    else if (rc)\n        return rc;\n\n    rc = listmgr_batch_insert_no_tx(p_mgr, p_ids, p_attrs, count, update_if_exists);\n\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (rc || retry_status == 2)\n    {\n        lmgr_rollback(p_mgr);\n        if (retry_status == 2)\n            rc = DB_RBH_SIG_SHUTDOWN;\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"DB query failed in %s line %d: code=%d: %s\",\n                   __FUNCTION__, __LINE__, rc,\n                   db_errmsg(&p_mgr->conn, err_buff, sizeof(err_buff)));\n        return rc;\n    }\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc) == 1)\n        goto retry;\n    /* success, count it */\n    if (!rc)\n    {\n        if (update_if_exists)\n            p_mgr->nbop[OPIDX_UPDATE] += count;\n        else\n            p_mgr->nbop[OPIDX_INSERT] += count;\n    }\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_internal.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef _LMGR_INTERNAL_H\n#define _LMGR_INTERNAL_H\n\n#include \"list_mgr.h\"\n\n#define STRINGIFY(_x) #_x\n#define TOSTRING(_x) STRINGIFY(_x)\n/* example:\n * #define FOO 10\n * STRINGIFY(FOO)   => \"FOO\"\n * TOSTRING(FOO)    => \"10\"\n */\n\n#ifdef _MYSQL\n/* The length can be specified as a value from 0 to 255 before MySQL 5.0.3,\n*  and 0 to 65,535 in 5.0.3 and later versions */\n#if MYSQL_VERSION_ID >= 50003\n#define MAX_VARBINARY 65535\n#else\n#define MAX_VARBINARY 255\n#endif\n#else\n#define MAX_VARBINARY 255\n#endif\n\n/* primary key utils */\n#ifndef FID_PK\n\n#define PK_LEN 64\n#define PK_ARG_T  char *\n#define PK_PARG_T char *\n#define PTR_PK(_p) (_p)\n#define DEF_PK(_p) char _p[PK_LEN]\ntypedef DEF_PK(pktype);\n#define PK_DB_TYPE DB_TEXT\n#define DPK      \"'%s'\"\n#define SPK      \"%s\"\n#define VALID(_p) ((_p)->validator)\n#define PK_TYPE   \"VARBINARY(\" TOSTRING(PK_LEN) \")\"\n\n#else\n#define DB_FID_LEN 64\n#ifdef FID_LEN\n#if FID_LEN > DB_FID_LEN\n#error \"Lustre FID are bigger than expected, fix DB_FID_LEN\"\n#endif\n#endif\n\n#define PK_LEN DB_FID_LEN\n#define PK_ARG_T char *\n#define PK_PARG_T char *\n#define PTR_PK(_p) (_p)\n#define DEF_PK(_p) char _p[DB_FID_LEN]\ntypedef DEF_PK(pktype);\n#define PK_DB_TYPE DB_TEXT\n#define DPK      \"'%s'\"\n#define SPK      \"%s\"\n#define VALID(_p) (0)\n#define PK_TYPE   \"VARBINARY(\" TOSTRING(DB_FID_LEN) \")\"\n\n#endif\n\n#define HNAME_DEF  \"sha1(CONCAT(parent_id,'/',name))\"\n#define HNAME_FMT   \"sha1(CONCAT(\"DPK\",'/','%s'))\"\n\nint listmgr_get_by_pk(lmgr_t *p_mgr, PK_ARG_T pk, attr_set_t *p_info);\nint listmgr_get_dirattrs(lmgr_t *p_mgr, PK_ARG_T dir_pk, attr_set_t *p_attrs);\nint listmgr_get_funcattrs(lmgr_t *p_mgr, PK_ARG_T pk, attr_set_t *p_attrs);\n\nint listmgr_batch_insert_no_tx(lmgr_t *p_mgr, entry_id_t **p_ids,\n                               attr_set_t **p_attrs, unsigned int count,\n                               bool update_if_exists);\n\nint listmgr_remove_no_tx(lmgr_t *p_mgr, const entry_id_t *p_id,\n                         const attr_set_t *p_attr_set, bool last);\n\ntypedef struct lmgr_iterator_t {\n    lmgr_t          *p_mgr;\n    lmgr_iter_opt_t  opt;\n    result_handle_t  select_result;\n    unsigned int     opt_is_set:1;\n} lmgr_iterator_t;\n\n#ifdef _LUSTRE\n/* see stripe_item_t structure in list_mgr.h */\n#define OSTGEN_SZ   4\n#define OBJID_SZ    8\n#define OBJSEQ_SZ   8\n#define STRIPE_DETAIL_SZ (OBJID_SZ+OBJSEQ_SZ+OSTGEN_SZ)\n#endif\n\nstatic inline int buf2hex(char *out, size_t out_sz, const unsigned char *in,\n                          size_t in_sz)\n{\n    /* Convert the input buffer into an hex */\n    int i;\n    const unsigned char *src = in;\n    char *dst = out;\n\n    if (out_sz < 2 * in_sz + 1)\n        return -1;\n\n    for (i = 0; i < in_sz; i++) {\n        dst += sprintf(dst, \"%02x\", *src);\n        src++;\n    }\n    *dst = '\\0';\n    return (int)(dst - out);\n}\n\n#endif\n"
  },
  {
    "path": "src/list_mgr/listmgr_iterators.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"listmgr_internal.h\"\n#include \"database.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\n/* generate a select query that defines the given dirattr with the given name.\n * (for FILTERDIR_OTHER types)\n * e.g. SELECT parent_id, COUNT(*) as dirattr... FROM ...\n * \\return number of bytes written (-1 on error)\n */\nstatic int append_dirattr_select(GString *str, unsigned int dirattr_index,\n                                 const char *attrname)\n{\n    if (dirattr_index == ATTR_INDEX_dircount) {\n        /* group parent and count their children */\n        g_string_append_printf(str, \"SELECT parent_id, %s as %s \"\n                               \"FROM \" DNAMES_TABLE \" GROUP BY parent_id\",\n                               dirattr2str(ATTR_INDEX_dircount), attrname);\n        return 0;\n    } else if (dirattr_index == ATTR_INDEX_avgsize) {\n        /* join all entries and avg their size when grouping by parent */\n        g_string_append_printf(str,\n                               \"SELECT parent_id, %s as %s from \" DNAMES_TABLE\n                               \" d,\" MAIN_TABLE\n                               \" m WHERE d.id=m.id and m.type='file' GROUP BY parent_id\",\n                               dirattr2str(ATTR_INDEX_avgsize), attrname);\n        return 0;\n    }\n    return -1;\n}\n\n#define append_dir_filter(_a1, _a2, _a3, _a4, _a5, _a6) \\\n        append_dir_req(_a1, _a2, _a3, ATTR_INDEX_FLG_UNSPEC, _a4, _a5, _a6)\n\n/**\n * Append a directory condition (sort or filter on dirattr) to an iterator request.\n */\nstatic void append_dir_req(GString *from, GString *where,\n                           unsigned int sort_attr_index,\n                           filter_dir_e filter_dir,  /* type of dir filter */\n                           unsigned int filter_dir_index, /* index of filter\n                                                             dirattr */\n                           const char *filter_dir_str)\n{   /* looks like dirattr >= X */\n    if (sort_attr_index & ATTR_INDEX_FLG_UNSPEC) {\n        switch (filter_dir) {\n        case FILTERDIR_NONE:\n            break;\n\n        case FILTERDIR_EMPTY:\n            /* only empty dir filter is to be appended */\n            g_string_append_printf(where, \" AND %s\", filter_dir_str);\n            break;\n\n        case FILTERDIR_OTHER:\n\n            /* join dir entry attributes from main table with special dir\n             * attrs */\n            g_string_append(from, \" INNER JOIN (\");\n            append_dirattr_select(from, filter_dir_index, \"dirattr\");\n            g_string_append_printf(from, \") AS da ON id=da.parent_id\");\n            g_string_append_printf(where, \" AND %s\", filter_dir_str);\n            break;\n\n        default:\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Unexpected filter on directory attribute in %s(): %#x\",\n                       __func__, filter_dir);\n            /* ignore dir filter */\n            break;\n        }\n    } else {    /* sorting on dirattr */\n\n        /* sort on 1 dirattr, possibly needs a second for filter */\n        switch (filter_dir) {\n        case FILTERDIR_NONE:\n            /* implicit filter on 'type == dir' */\n            /* @TODO optim: directly perform request on parent_id if no\n             * table_filter? */\n            g_string_append(from, \" INNER JOIN (\");\n            append_dirattr_select(from, sort_attr_index, \"dirattr_sort\");\n            g_string_append(from, \") AS ds ON id=ds.parent_id\");\n            break;\n\n        case FILTERDIR_EMPTY:\n            /* join with empty dir filter + dirattr_sort */\n            g_string_append(from, \" INNER JOIN (\");\n            append_dirattr_select(from, sort_attr_index, \"dirattr_sort\");\n            g_string_append(from, \") AS ds ON id=ds.parent_id\");\n            g_string_append(where, filter_dir_str); /* XXX add AND? */\n            break;\n\n        case FILTERDIR_OTHER:\n            /* left join with dirattr_sort + right join on filter */\n            g_string_append(from, \"  LEFT JOIN (\");\n            append_dirattr_select(from, sort_attr_index, \"dirattr_sort\");\n            g_string_append(from, \") ds ON id=ds.parent_id RIGHT JOIN (\");\n            append_dirattr_select(from, filter_dir_index, \"dirattr\");\n            g_string_append(from, \") da ON id=da.parent_id\");\n            g_string_append(where, filter_dir_str); /* XXX add AND? */\n            break;\n\n        default:\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Unexpected filter on directory attribute in %s(): %#x\",\n                       __func__, filter_dir);\n            /* ignore dir filter */\n            break;\n        }\n    }\n}\n\n/** Determine sort operation to be done.\n * Set t_sort or sort_dirattr depending on sort type.\n */\nstatic inline void check_sort(const lmgr_sort_type_t *p_sort_type,\n                              table_enum *t_sort, unsigned int *sort_dirattr,\n                              bool *distinct)\n{\n    /* initialize outputs */\n    *t_sort = T_NONE;\n    *sort_dirattr = ATTR_INDEX_FLG_UNSPEC;\n\n    /* is there a sort order ? */\n    if (p_sort_type == NULL || p_sort_type->order == SORT_NONE\n        || ((p_sort_type->attr_index & ATTR_INDEX_FLG_UNSPEC) != 0))\n        return;\n\n    /* check sort order */\n    if (is_main_field(p_sort_type->attr_index))\n        *t_sort = T_MAIN;\n    else if (is_annex_field(p_sort_type->attr_index))\n        *t_sort = T_ANNEX;\n    else if (field_infos[p_sort_type->attr_index].db_type == DB_STRIPE_INFO)\n        *t_sort = T_STRIPE_INFO;\n    else if (field_infos[p_sort_type->attr_index].db_type == DB_STRIPE_ITEMS) {\n        *t_sort = T_STRIPE_ITEMS;\n        *distinct = true;\n    } else if (is_dirattr(p_sort_type->attr_index))\n        *sort_dirattr = p_sort_type->attr_index;\n    else\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Invalid field for sort order (index=%d)\",\n                   p_sort_type->attr_index);\n}\n\n/** Indicate if a sort operation is to be done depending on the variables\n * set by check_sort(). */\nstatic inline bool do_sort(table_enum t_sort, unsigned int sort_dirattr)\n{\n    return (t_sort != T_NONE) || ((sort_dirattr & ATTR_INDEX_FLG_UNSPEC) == 0);\n}\n\nstatic int select_all_request(lmgr_t *p_mgr, GString *req,\n                              table_enum sort_table, unsigned int sort_dirattr,\n                              bool distinct)\n{\n    if (!do_sort(sort_table, sort_dirattr)) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG,\n                   \"Empty filter: all records will be selected\");\n        g_string_assign(req, \"SELECT id FROM \" MAIN_TABLE);\n    } else if (sort_table != T_NONE) {\n        g_string_printf(req, \"SELECT %s FROM %s\",\n                        distinct ? \"DISTINCT(id)\" : \"id\",\n                        table2name(sort_table));\n    } else if ((sort_dirattr & ATTR_INDEX_FLG_UNSPEC) == 0) {\n        append_dirattr_select(req, sort_dirattr, \"dirattr_sort\");\n    } else {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Unsupported sort table\");\n        return DB_NOT_SUPPORTED;\n    }\n    return DB_SUCCESS;\n}\n\n/** get an iterator on a list of entries */\nstruct lmgr_iterator_t *ListMgr_Iterator(lmgr_t *p_mgr,\n                                         const lmgr_filter_t *p_filter,\n                                         const lmgr_sort_type_t *p_sort_type,\n                                         const lmgr_iter_opt_t *p_opt)\n{\n    int rc;\n    lmgr_iterator_t *it;\n    filter_dir_e filter_dir_type = FILTERDIR_NONE;\n    unsigned int filter_dir_index = 0;\n    table_enum sort_table = T_NONE;\n    unsigned int sort_dirattr = ATTR_INDEX_FLG_UNSPEC;\n    struct field_count fcnt = { 0 };\n    bool distinct = false;\n    table_enum query_tab = T_NONE;\n\n    GString *from = NULL;\n    GString *where = NULL;\n    GString *req = NULL;\n    GString *filter_dir = NULL;\n\n    /* Iterator only select a sorted list of ids.\n     * Entry attributes are retrieved afterward in ListMgr_GetNext() call.\n     */\n\n    /* is there a sort order? */\n    check_sort(p_sort_type, &sort_table, &sort_dirattr, &distinct);\n\n    /* initialize the request */\n    req = g_string_new(NULL);\n\n    if (no_filter(p_filter)) {\n        /* no filter is specified: build a select request with no criteria */\n        rc = select_all_request(p_mgr, req, sort_table, sort_dirattr, distinct);\n        if (rc)\n            goto free_str;\n    } else {    /* analyse filter contents */\n\n        unsigned int nbft;\n\n        /* check condition on directory */\n        filter_dir = g_string_new(NULL);\n        filter_dir_type =\n            dir_filter(p_mgr, filter_dir, p_filter, &filter_dir_index,\n                       (sort_table !=\n                        T_NONE) ? table2name(sort_table) : MAIN_TABLE);\n        /* XXX is sort dirattr the same as filter dirattr? */\n\n        where = g_string_new(NULL);\n        filter_where(p_mgr, p_filter, &fcnt, where, 0);\n\n        nbft = nb_field_tables(&fcnt);\n\n        /* finally, there was no filter */\n        if (nbft == 0 && filter_dir_type == FILTERDIR_NONE) {\n            rc = select_all_request(p_mgr, req, sort_table, sort_dirattr,\n                                    distinct);\n            if (rc)\n                goto free_str;\n        } else {\n            /* build the FROM clause */\n            from = g_string_new(NULL);\n            filter_from(p_mgr, &fcnt, from, &query_tab, &distinct, 0);\n\n            /* If there is a single table: use the filter as is.\n             * Else, build the filter a more ordered way */\n            if (nbft > 1) {\n                /* rebuild the contents of \"where\", a more ordered way */\n                g_string_assign(where, \"\");\n                if (unlikely(filter2str(p_mgr, where, p_filter,\n                                        T_NONE, AOF_PREFIX) <= 0))\n                    RBH_BUG(\"Inconsistent case: more than 1 filter table, \"\n                            \"but no filter???\");\n            }\n\n            append_dir_req(from, where, sort_dirattr, filter_dir_type,\n                           filter_dir_index, filter_dir->str);\n\n            /* build the whole request */\n            if (distinct)\n                g_string_printf(req, \"SELECT DISTINCT(%s.id) AS id\",\n                                table2name(query_tab));\n            else\n                g_string_printf(req, \"SELECT %s.id AS id\",\n                                table2name(query_tab));\n\n            g_string_append_printf(req, \" FROM %s WHERE %s\", from->str,\n                                   where->str);\n        }\n    }\n\n#define SORT_ATTR_OPTIM (ATTR_INDEX_FLG_UNSPEC | 0x2)\n#if 0 /** @TODO RBHv3 to be reimplemnted */\n    /* both filter and sort order */\n    /* @TODO optim if both filter and sort order are on the same field */\n    if (filter_dir_index == sort_dirattr) {\n        query_end =\n            query + append_dirattr_select(query, filter_dir_index, \"dirattr\");\n        query_end += sprintf(query_end, \" HAVING %s\", filter_dir_str);\n        /* special value for this optim */\n        sort_dirattr = SORT_ATTR_OPTIM;\n    } else {\n        query_end = query + sprintf(query, \"SELECT da.parent_id FROM (\");\n        query_end +=\n            append_dirattr_select(query_end, filter_dir_index, \"dirattr\");\n        query_end +=\n            sprintf(query_end, \" HAVING %s) da LEFT JOIN (\", filter_dir_str);\n        query_end +=\n            append_dirattr_select(query_end, sort_dirattr, \"dirattr_sort\");\n        query_end += sprintf(query_end, \") ds ON ds.parent_id = da.parent_id\");\n    }\n#endif\n\n    /* sort order */\n    if (do_sort(sort_table, sort_dirattr)) {\n        /* special cases: stripe info stands for pool_name,\n         * stripe items for ost_idx */\n        if (sort_table == T_STRIPE_INFO)\n            g_string_append(req, \" ORDER BY \" STRIPE_INFO_TABLE \".pool_name \");\n        else if (sort_table == T_STRIPE_ITEMS)\n            g_string_append(req, \" ORDER BY \" STRIPE_ITEMS_TABLE \".ostidx \");\n        else if (sort_table != T_NONE)\n            g_string_append_printf(req, \" ORDER BY %s.%s \",\n                                   table2name(sort_table),\n                                   field_name(p_sort_type->attr_index));\n        else if (sort_dirattr == SORT_ATTR_OPTIM)\n            g_string_append(req, \" ORDER BY dirattr \");\n        else if ((sort_dirattr & ATTR_INDEX_FLG_UNSPEC) == 0)\n            g_string_append(req, \" ORDER BY dirattr_sort \");\n\n        if (p_sort_type->order == SORT_ASC)\n            g_string_append(req, \"ASC\");\n        else\n            g_string_append(req, \"DESC\");\n    }\n\n    /* iterator opt */\n    if (p_opt && (p_opt->list_count_max > 0))\n        g_string_append_printf(req, \" LIMIT %u\", p_opt->list_count_max);\n\n    /* allocate a new iterator */\n    it = (lmgr_iterator_t *) MemAlloc(sizeof(lmgr_iterator_t));\n    it->p_mgr = p_mgr;\n    if (p_opt) {\n        it->opt = *p_opt;\n        it->opt_is_set = 1;\n    } else {\n        it->opt_is_set = 0;\n    }\n\n    /* execute request */\n    rc = db_exec_sql(&p_mgr->conn, req->str, &it->select_result);\n    if (rc)\n        goto free_it;\n\n    if (filter_dir != NULL)\n        g_string_free(filter_dir, TRUE);\n    if (from != NULL)\n        g_string_free(from, TRUE);\n    if (where != NULL)\n        g_string_free(where, TRUE);\n    if (req != NULL)\n        g_string_free(req, TRUE);\n\n    return it;\n\n free_it:\n    if (it != NULL)\n        MemFree(it);\n free_str:\n    if (filter_dir != NULL)\n        g_string_free(filter_dir, TRUE);\n    if (from != NULL)\n        g_string_free(from, TRUE);\n    if (where != NULL)\n        g_string_free(where, TRUE);\n    if (req != NULL)\n        g_string_free(req, TRUE);\n    return NULL;\n}\n\nint ListMgr_GetNext(struct lmgr_iterator_t *p_iter, entry_id_t *p_id,\n                    attr_set_t *p_info)\n{\n    int rc = 0;\n    /* can contain id+dirattr+dirattr_sort in case of directory listing */\n    char *idstr[3];\n    DEF_PK(pk);\n\n    bool entry_disappeared = false;\n\n    do {\n        entry_disappeared = false;\n\n        idstr[0] = idstr[1] = idstr[2] = NULL;\n        rc = db_next_record(&p_iter->p_mgr->conn, &p_iter->select_result, idstr,\n                            3);\n\n        if (rc)\n            return rc;\n        if (idstr[0] == NULL)\n            return DB_REQUEST_FAILED;\n\n        rc = parse_entry_id(p_iter->p_mgr, idstr[0], PTR_PK(pk), p_id);\n        if (rc == DB_NOT_EXISTS)\n            entry_disappeared = true;\n        else if (rc)\n            return rc;\n\n        rc = listmgr_get_by_pk(p_iter->p_mgr, pk, p_info);\n\n        if (rc == DB_NOT_EXISTS) {\n            if (p_iter->opt_is_set && p_iter->opt.allow_no_attr) {\n                /* clear missing fields */\n                p_info->attr_mask =\n                    attr_mask_and(&p_info->attr_mask, &dir_attr_set);\n                /* special field dircount */\n                if (dirattr_fields(p_info->attr_mask)) {\n                    if (listmgr_get_dirattrs(p_iter->p_mgr, pk, p_info)) {\n                        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                                   \"listmgr_get_dirattr failed for \" DPK, pk);\n                        p_info->attr_mask =\n                            attr_mask_and_not(&p_info->attr_mask,\n                                              &dir_attr_set);\n                    }\n                }\n\n                /* compute generated fields if asked */\n                generate_fields(p_info);\n\n                rc = 0;\n            } else\n                entry_disappeared = true;\n        }\n    }\n    while (entry_disappeared);  /* goto next record if entry desappered */\n\n    return rc;\n\n}\n\nvoid ListMgr_CloseIterator(struct lmgr_iterator_t *p_iter)\n{\n    db_result_free(&p_iter->p_mgr->conn, &p_iter->select_result);\n    MemFree(p_iter);\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_ns.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Namespace related functions\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_internal.h\"\n#include \"listmgr_stripe.h\"\n#include \"database.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\n/**\n * print parent condition depending on parent list count:\n *      parent_id == xxx or parent_id IN ( xxx, yyy, zzz )\n * \\return db error code\n */\nstatic int append_parent_cond(lmgr_t *p_mgr, GString *str, const wagon_t *parent_list,\n                              unsigned int parent_count, const char *prefix)\n{\n    DEF_PK(pk);\n\n    if (unlikely(parent_count == 0))\n    {\n        DisplayLog( LVL_MAJOR, LISTMGR_TAG, \"Warning: parent list is empty in %s()\", __func__ );\n        return DB_INVALID_ARG;\n    }\n\n    if (likely(parent_count == 1)) /* the only expected for now */\n    {\n        entry_id2pk(&parent_list[0].id, PTR_PK(pk));\n        g_string_append_printf(str, \"%sparent_id=\"DPK, prefix ? prefix : \"\", pk);\n    }\n    else\n    {\n        int i;\n\n        g_string_append_printf(str, \"%sparent_id IN (\", prefix ? prefix : \"\");\n        for (i = 0; i < parent_count; i++)\n        {\n            entry_id2pk(&parent_list[i].id, PTR_PK(pk));\n            g_string_append_printf(str, \"%s\"DPK, (i == 0)? \"\":\",\", pk);\n        }\n        g_string_append(str,\")\");\n    }\n    return DB_SUCCESS;\n}\n\n/**\n * Get the list of children of a given parent (or list of parents).\n * \\param parent_list       [in]  list of parents to get the child of\n * \\param parent_count      [in]  number of ids in parent list\n * \\param attr_mask         [in]  required attributes for children\n * \\param child_id_list     [out] ptr to array of child ids\n * \\param child_attr_list   [out] ptr to array of child attrs\n * \\param child_count       [out] number of returned children\n */\nint ListMgr_GetChild(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                     const wagon_t *parent_list, unsigned int parent_count,\n                     attr_mask_t attr_mask,\n                     wagon_t **child_id_list, attr_set_t **child_attr_list,\n                     unsigned int *child_count)\n{\n    result_handle_t result;\n    char *path = NULL;\n    int path_len;\n    int                rc, i;\n    GString           *req = NULL;\n    GString           *fields = NULL;\n    GString           *from = NULL;\n    GString           *where = NULL;\n    struct field_count field_cnt = {0};\n    struct field_count filter_cnt = {0};\n    table_enum         query_tab = T_DNAMES;\n    bool               distinct = false;\n    int                retry_status;\n\n    /* XXX: querying children from several parent cannot work, since\n     * we need to get the paths of the children. Or we could do a\n     * lookup into parent_list to find the right one. In the meantime,\n     * try not to mess up the code. */\n    if (unlikely(parent_count != 1))\n        RBH_BUG(\"cannot get children for several parent simultaneously\");\n\n    /* always request for name to build fullpath in wagon */\n    attr_mask_set_index(&attr_mask, ATTR_INDEX_name);\n\n    fields = g_string_new(NULL);\n\n    /* append fields for all tables */\n    if (!attr_mask_is_null(attr_mask))\n    {\n        /* retrieve source info for generated fields */\n        add_source_fields_for_gen(&attr_mask.std);\n\n        field_cnt.nb_names = attrmask2fieldlist(fields, attr_mask, T_DNAMES,\n                                                DNAMES_TABLE\".\", \"\",\n                                                AOF_LEADING_SEP);\n\n        field_cnt.nb_main = attrmask2fieldlist(fields, attr_mask, T_MAIN,\n                                               MAIN_TABLE\".\", \"\",\n                                               AOF_LEADING_SEP);\n\n        field_cnt.nb_annex = attrmask2fieldlist(fields, attr_mask, T_ANNEX,\n                                                ANNEX_TABLE\".\", \"\",\n                                                AOF_LEADING_SEP);\n    }\n    else\n    {\n        /* no returned attrs */\n        if (child_attr_list != NULL)\n            *child_attr_list = NULL;\n    }\n\n    where = g_string_new(NULL);\n\n    /* starts with condition on parent */\n    rc = append_parent_cond(p_mgr, where, parent_list, parent_count, DNAMES_TABLE\".\");\n    if (rc != DB_SUCCESS)\n        goto free_str;\n\n    /* check filters on other tables */\n    if (!no_filter(p_filter))\n    {\n        if (unlikely(dir_filter(p_mgr, NULL, p_filter, NULL, NULL) != FILTERDIR_NONE))\n        {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Directory filter not supported in %s()\", __func__);\n            rc = DB_NOT_SUPPORTED;\n            goto free_str;\n        }\n        else if (unlikely(func_filter(p_mgr, NULL, p_filter, T_MAIN, 0)))\n        {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Function filter not supported in %s()\", __func__);\n            rc = DB_NOT_SUPPORTED;\n            goto free_str;\n        }\n\n        /* There is always a filter on T_DNAMES, which is the parent condition.\n         * Look for optional filters.\n         */\n        filter_where(p_mgr, p_filter, &filter_cnt, where,\n                     AOF_LEADING_SEP | AOF_SKIP_NAME);\n        /** @FIXME process other filters on NAMES */\n    }\n\n    from = g_string_new(DNAMES_TABLE);\n\n    /* add filter_count + field_count to build the FROM clause.\n     * Preserve field count which is needed to interpret the result.\n     */\n    filter_cnt.nb_main += field_cnt.nb_main;\n    filter_cnt.nb_annex += field_cnt.nb_annex;\n    filter_cnt.nb_names += field_cnt.nb_names;\n    /* query tab is DNAMES, skip_name=true, is_first_tab=T_DNAMES */\n    filter_from(p_mgr, &filter_cnt, from, &query_tab, &distinct,\n                AOF_LEADING_SEP | AOF_SKIP_NAME);\n\n    /* request is always on the DNAMES table (which contains [parent_id, id] relationship */\n    if (distinct)\n        req = g_string_new(\"SELECT DISTINCT(\"DNAMES_TABLE\".id) as id\");\n    else\n        req = g_string_new(\"SELECT \"DNAMES_TABLE\".id as id\");\n\n    /* build the whole request */\n    g_string_append_printf(req, \"%s FROM %s WHERE %s\", fields->str, from->str, where->str);\n\nretry:\n    rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2) {\n        rc = DB_RBH_SIG_SHUTDOWN;\n        goto free_str;\n    } else if (rc)\n        goto free_str;\n\n    /* copy result to output structures */\n    *child_count = db_result_nb_records(&p_mgr->conn, &result);\n\n    /* allocate entry_id array */\n    *child_id_list = MemCalloc(*child_count, sizeof(wagon_t));\n    if (*child_id_list == NULL)\n    {\n        rc = DB_NO_MEMORY;\n        goto free_str;\n    }\n\n    if (child_attr_list)\n    {\n        *child_attr_list = MemCalloc(*child_count, sizeof(attr_set_t));\n        if (*child_attr_list == NULL)\n        {\n            rc = DB_NO_MEMORY;\n            goto array_free;\n        }\n    }\n\n    /* Allocate a string long enough to contain the parent path and a\n     * child name. */\n    path_len = strlen(parent_list[0].fullname) + RBH_NAME_MAX + 2;\n    path = malloc(path_len);\n    if (!path) {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Can't alloc enough memory (%d bytes)\",\n                    path_len);\n        rc = DB_NO_MEMORY;\n        goto array_free;\n    }\n\n    for (i = 0; i < *child_count; i++)\n    {\n        char *res[128]; /* 128 fields per record is large enough */\n\n        rc = db_next_record(&p_mgr->conn, &result, res, sizeof(res)/sizeof(*res));\n        if (rc)\n            goto array_free;\n\n        /* copy id to array */\n        pk2entry_id(p_mgr, res[0], &((*child_id_list)[i].id));\n\n        /* copy attributes to array */\n        if (child_attr_list)\n        {\n            unsigned int shift = 1; /* first was NAMES.id */\n\n            (*child_attr_list)[i].attr_mask = attr_mask;\n\n            /* first id, then dnames attrs, then main attrs, then annex attrs */\n            if (field_cnt.nb_names > 0)\n            {\n                /* shift of 1 for id */\n                rc = result2attrset(T_DNAMES, res + shift, field_cnt.nb_names, &((*child_attr_list)[i]));\n                if (rc)\n                    goto array_free;\n                shift += field_cnt.nb_names;\n            }\n\n            if (field_cnt.nb_main > 0)\n            {\n                /* first id, then main attrs, then annex attrs */\n                /* shift of 1 for id */\n                rc = result2attrset(T_MAIN, res + shift, field_cnt.nb_main, &((*child_attr_list)[i]));\n                if (rc)\n                    goto array_free;\n                shift += field_cnt.nb_main;\n            }\n\n            if (field_cnt.nb_annex > 0)\n            {\n                /* shift of main_attrs count */\n                rc = result2attrset(T_ANNEX, res + shift, field_cnt.nb_annex,\n                                     &((*child_attr_list)[i]));\n                if (rc)\n                    goto array_free;\n                shift += field_cnt.nb_annex;\n            }\n\n#ifdef _LUSTRE\n            if (stripe_fields(attr_mask))\n            {\n                if (get_stripe_info(p_mgr, res[0], &ATTR(&(*child_attr_list)[i], stripe_info),\n                                     &ATTR(&(*child_attr_list)[i], stripe_items)))\n                {\n                    ATTR_MASK_UNSET(&(*child_attr_list)[i], stripe_info);\n                    ATTR_MASK_UNSET(&(*child_attr_list)[i], stripe_items);\n                }\n            }\n#endif\n\n            generate_fields(&((*child_attr_list)[i]));\n\n            /* Note: path is properly sized already to not overflow. */\n            snprintf(path, path_len, \"%s/%s\", parent_list[0].fullname,\n                     (*child_attr_list)[i].attr_values.name);\n            (*child_id_list)[i].fullname = strdup(path);\n        }\n    }\n\n    if (path)\n        free(path);\n\n    db_result_free(&p_mgr->conn, &result);\n    g_string_free(req, TRUE);\n    g_string_free(fields, TRUE);\n    g_string_free(from, TRUE);\n    g_string_free(where, TRUE);\n    return 0;\n\narray_free:\n    if (path)\n        free(path);\n    if (child_attr_list && *child_attr_list)\n    {\n        MemFree(*child_attr_list);\n        *child_attr_list = NULL;\n    }\n    MemFree(*child_id_list);\n    *child_id_list = NULL;\nfree_str:\n    g_string_free(req, TRUE);\n    g_string_free(fields, TRUE);\n    g_string_free(from, TRUE);\n    g_string_free(where, TRUE);\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_recov.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Special database function for disaster revocery (backup flavor)\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\n#if 0 /** @TODO reimplement in rbh v3 */\n\n#define MAX_DB_FIELDS 64\n\n/* table: id+... */\n/* TODO: generate this list automatically */\n/* /!\\ it must be in the same order as in MAIN, ANNEX, ... */\n#define BUILD_RECOV_LIST_FIELDS_NAMES THIS_PATH_FUNC\"(NAMES.parent_id, NAMES.name) as relpath,owner,gr_name,size,last_mod,type,mode,status,stripe_count,stripe_size,pool_name,backendpath,link\"\n#define BUILD_RECOV_LIST_FIELDS ONE_PATH_FUNC\"(\"MAIN_TABLE\".id) as relpath,owner,gr_name,size,last_mod,type,mode,status,stripe_count,stripe_size,pool_name,backendpath,link\"\n#define GET_RECOV_LIST_FIELDS \"relpath,owner,gr_name,size,last_mod,type,mode,status,stripe_count,stripe_size,pool_name,backendpath,link\"\n#define RECOV_FIELD_COUNT 13\n\n/**\n * \\retval DB_NOT_EXISTS if the recovery table does not exist\n */\nstatic int expected_recov_status(lmgr_t *p_mgr, lmgr_recov_stat_t *p_stats)\n{\n    int rc, i;\n    result_handle_t result;\n    char *status[5];\n\n    /* test if a RECOVERY table already exist, and contains entries */\n    rc = db_exec_sql_quiet(&p_mgr->conn,\n                           \"SELECT status,type,COUNT(*),(size=0) as empty,SUM(size) FROM \"\n                           RECOV_TABLE \" GROUP BY status,type,empty\", &result);\n    if (rc)\n        return rc;\n\n    /* @TODO manage dirs and symlinks differently */\n\n    p_stats->total = 0;\n    for (i = 0; i < RS_COUNT; i++) {\n        p_stats->status_count[i] = 0;\n        p_stats->status_size[i] = 0;\n    }\n\n    while ((rc = db_next_record(&p_mgr->conn, &result, status, 5))\n           != DB_END_OF_LIST) {\n        long long cnt;\n        uint64_t sz;\n        int isempty;\n\n        if (rc)\n            return rc;\n\n        cnt = str2bigint(status[2]);\n        if (cnt == -1LL)\n            return DB_INVALID_ARG;\n\n        isempty = str2int(status[3]);\n        if (isempty == -1)\n            return DB_INVALID_ARG;\n\n        sz = str2size(status[4]);\n        if (sz == -1LL)\n            return DB_INVALID_ARG;\n\n        p_stats->total += cnt;\n\n        if (status[0] != NULL) {\n            int st = str2int(status[0]);\n\n            /* archived entries: file and (optionally) symlinks  */\n            if (!strcasecmp(status[1], STR_TYPE_FILE)) {\n                if (isempty) {\n                    p_stats->status_count[RS_FILE_EMPTY] += cnt;\n                    p_stats->status_size[RS_FILE_EMPTY] += sz;\n                } else {\n                    switch (st) {\n                    case STATUS_NEW:\n                        p_stats->status_count[RS_NOBACKUP] += cnt;\n                        p_stats->status_size[RS_NOBACKUP] += sz;\n                        break;\n                    case STATUS_MODIFIED:\n                    case STATUS_ARCHIVE_RUNNING:\n                        p_stats->status_count[RS_FILE_DELTA] += cnt;\n                        p_stats->status_size[RS_FILE_DELTA] += sz;\n                        break;\n                    case STATUS_SYNCHRO:\n                    case STATUS_RELEASED:\n                        p_stats->status_count[RS_FILE_OK] += cnt;\n                        p_stats->status_size[RS_FILE_OK] += sz;\n                        break;\n                    }\n                }\n            } else if (!strcasecmp(status[1], STR_TYPE_LINK)\n                       || !strcasecmp(status[1], STR_TYPE_DIR)) {\n                /* symlinks and dirs always recoverable from DB */\n                p_stats->status_count[RS_NON_FILE] += cnt;\n                p_stats->status_size[RS_NON_FILE] += sz;\n            } else {\n                /* non recoverable : special entry like fifo, blk, ... */\n                p_stats->status_count[RS_NOBACKUP] += cnt;\n                p_stats->status_size[RS_NOBACKUP] += sz;\n            }\n        }\n    }\n\n    db_result_free(&p_mgr->conn, &result);\n    return 0;\n}\n\n/**\n * \\retval DB_NOT_EXISTS if the recovery table does not exist\n */\nint ListMgr_RecovStatus(lmgr_t *p_mgr, lmgr_recov_stat_t *p_stats)\n{\n    int rc, i;\n    result_handle_t result;\n    char *status[3];\n\n    /* test if a RECOVERY table already exist, and contains entries */\n    rc = db_exec_sql_quiet(&p_mgr->conn,\n                           \"SELECT recov_status,COUNT(*),SUM(size) FROM \"\n                           RECOV_TABLE \" GROUP BY recov_status\", &result);\n    if (rc)\n        return rc;\n\n    /* table exists, fill status tab */\n    p_stats->total = 0;\n    for (i = 0; i < RS_COUNT; i++) {\n        p_stats->status_count[i] = 0;\n        p_stats->status_size[i] = 0;\n    }\n\n    while ((rc = db_next_record(&p_mgr->conn, &result, status, 3))\n           != DB_END_OF_LIST) {\n        long long cnt;\n        uint64_t sz;\n        if (rc)\n            return rc;\n\n        cnt = str2bigint(status[1]);\n        if (cnt == -1LL)\n            return DB_INVALID_ARG;\n\n        sz = str2size(status[2]);\n        if (sz == -1LL)\n            return DB_INVALID_ARG;\n\n        p_stats->total += cnt;\n\n        if (status[0] != NULL) {\n            int idx = str2int(status[0]);\n            if ((idx >= RS_COUNT) || (idx == -1))\n                return DB_REQUEST_FAILED;\n            p_stats->status_count[idx] = cnt;\n            p_stats->status_size[idx] = sz;\n        }\n    }\n\n    db_result_free(&p_mgr->conn, &result);\n    return 0;\n}\n\n/**\n *  Initialize a recovery process.\n *  \\param p_filter[in] (optional) filter partial filesystem recovery\n *  \\retval DB_SUCCESS the recovery process successfully started;\n *          the stats indicate the recovery states we can expect.\n *  \\retval DB_ALREADY_EXISTS a recovery process already started\n *          and was not properly completed. stats indicate the current status.\n *  \\retval error   another error occurred.\n */\nint ListMgr_RecovInit(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                      lmgr_recov_stat_t *p_stats)\n{\n    int rc;\n    db_value_t report_val;\n    unsigned int nb;\n    struct lmgr_report_t *report;\n    report_field_descr_t report_count =\n        { -1, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL };\n\n/** @TODO use glib strings */\n    char query[4096];\n    char filter_str[4096] = \"\";\n    char *filter_curr = filter_str;\n#define has_filters (filter_curr != filter_str)\n    int distinct = 0;\n\n    rc = ListMgr_RecovStatus(p_mgr, p_stats);\n    if (rc == 0) {\n        if (p_stats->total != 0)    /* RECOVERY table exists and is not empty */\n            return DB_ALREADY_EXISTS;\n    } else if (rc != DB_NOT_EXISTS) /* other error */\n        return rc;\n\n    if (rc == 0) {\n        DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                   \"Dropping any previous \" RECOV_TABLE \" table\");\n        /* start from clean state (no table, no indexes, no addl field) */\n        rc = db_drop_component(&p_mgr->conn, DBOBJ_TABLE, RECOV_TABLE);\n        if (rc)\n            return rc;\n    }\n\n    if (p_filter) {\n        /* dummy vars */\n        char filter_dir_str[512] = \"\";\n        unsigned int filter_dir_index = 0;\n\n        if (dir_filter(p_mgr, filter_dir_str, p_filter, &filter_dir_index, NULL)\n            != FILTERDIR_NONE) {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Directory filter not supported for recovery\");\n            return DB_NOT_SUPPORTED;\n        }\n\n        if (filter2str(p_mgr, filter_curr, p_filter, T_MAIN, AOF_PREFIX) > 0)\n            filter_curr += strlen(filter_curr);\n\n        if (filter2str(p_mgr, filter_curr, p_filter, T_ANNEX,\n                       (has_filters ? AOF_LEADING_SEP : 0) | AOF_PREFIX) > 0)\n            filter_curr += strlen(filter_curr);\n\n        if (filter2str(p_mgr, filter_curr, p_filter, T_DNAMES,\n                       (has_filters ? AOF_LEADING_SEP : 0) | AOF_PREFIX) > 0) {\n            filter_curr += strlen(filter_curr);\n            distinct = 1;\n        }\n\n        if (filter2str(p_mgr, filter_curr, p_filter, T_STRIPE_INFO,\n                       (has_filters ? AOF_LEADING_SEP : 0) | AOF_PREFIX) > 0)\n            filter_curr += strlen(filter_curr);\n\n        if (filter2str(p_mgr, filter_curr, p_filter, T_STRIPE_ITEMS,\n                       (has_filters ? AOF_LEADING_SEP : 0) | AOF_PREFIX) > 0) {\n            filter_curr += strlen(filter_curr);\n            distinct = 1;\n        }\n    }\n\n    DisplayLog(LVL_EVENT, LISTMGR_TAG,\n               \"Populating \" RECOV_TABLE\n               \" table (this can take a few minutes)...\");\n\n    /* create the recovery table */\n    if (distinct) {\n        /* need to select only 1 instance of each object when joining with\n         * STRIPE_ITEMS or NAMES */\n        strcpy(query, \"CREATE TABLE \" RECOV_TABLE\n               \" SELECT DISTINCT(\" MAIN_TABLE \".id),\"\n               BUILD_RECOV_LIST_FIELDS_NAMES \" FROM \" MAIN_TABLE \" LEFT JOIN \"\n               ANNEX_TABLE \" ON \" \"(\" MAIN_TABLE \".id = \" ANNEX_TABLE \".id)\"\n               \" LEFT JOIN \" DNAMES_TABLE \" ON \" \"(\" MAIN_TABLE \".id = \"\n               DNAMES_TABLE \".id)\" \" LEFT JOIN \" STRIPE_INFO_TABLE \" ON \" \"(\"\n               MAIN_TABLE \".id = \" STRIPE_INFO_TABLE \".id)\" \" LEFT JOIN \"\n               STRIPE_ITEMS_TABLE \" ON \" \"(\" MAIN_TABLE \".id = \"\n               STRIPE_ITEMS_TABLE \".id)\");\n    } else {\n        strcpy(query, \"CREATE TABLE \" RECOV_TABLE\n               \" SELECT \" MAIN_TABLE \".id,\" BUILD_RECOV_LIST_FIELDS\n               \" FROM \" MAIN_TABLE \" LEFT JOIN \" ANNEX_TABLE \" ON \"\n               \"(\" MAIN_TABLE \".id = \" ANNEX_TABLE \".id)\"\n               \" LEFT JOIN \" STRIPE_INFO_TABLE \" ON \"\n               \"(\" MAIN_TABLE \".id = \" STRIPE_INFO_TABLE \".id)\");\n    }\n\n    if (has_filters) {\n        strcat(query, \" WHERE \");\n        strcat(query, filter_str);\n    }\n\n    /* the whole function is not atomic as we try to preserve the progress\n     * in case of DB engine failure. So we retry each step independently.\n     */\n retry1:\n    rc = db_exec_sql(&p_mgr->conn, query, NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry1;\n    else if (rc)\n        return rc;\n\n    DisplayLog(LVL_EVENT, LISTMGR_TAG,\n               \"Building indexes on \" RECOV_TABLE \" table...\");\n\n    /* create pk */\n retry2:\n    rc = db_exec_sql(&p_mgr->conn,\n                     \"ALTER TABLE \" RECOV_TABLE \" ADD PRIMARY KEY (id)\", NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry2;\n    else if (rc)\n        return rc;\n\n    /* add recov_status column */\n retry3:\n    rc = db_exec_sql(&p_mgr->conn,\n                     \"ALTER TABLE \" RECOV_TABLE\n                     \" ADD COLUMN recov_status INTEGER\", NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry3;\n    else if (rc)\n        return rc;\n\n    /* add index on status */\n retry4:\n    rc = db_exec_sql(&p_mgr->conn,\n                     \"CREATE INDEX recov_st_index ON \" RECOV_TABLE\n                     \"(recov_status)\", NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry4;\n    else if (rc)\n        return rc;\n\n    /* count entries of each status */\n    expected_recov_status(p_mgr, p_stats);\n\n    /* if there is a filter on OSTs, report distinct ids */\n    if (distinct)\n        report_count.report_type = REPORT_COUNT_DISTINCT;\n\n    /* double check entry count before deleting entries */\n    report = ListMgr_Report(p_mgr, &report_count, 1, NULL, p_filter, NULL);\n    if (report == NULL)\n        return DB_REQUEST_FAILED;\n\n    nb = 1;\n    rc = ListMgr_GetNextReportItem(report, &report_val, &nb, NULL);\n    ListMgr_CloseReport(report);\n\n    if (rc)\n        return rc;\n\n    if (nb == 0)\n        return DB_REQUEST_FAILED;\n\n    if (report_val.value_u.val_biguint != p_stats->total) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"ERROR: recovery count (%llu) is different from entry count in main table (%lld): preserving entries\",\n                   p_stats->total, report_val.value_u.val_biguint);\n        return DB_REQUEST_FAILED;\n    }\n\n    /* clean previous DB content */\n\n    return ListMgr_MassRemove(p_mgr, p_filter, NULL);\n}\n\n/**\n * Clear the recovery table.\n * /!\\ all previously unrecovered entries will be lost\n */\nint ListMgr_RecovReset(lmgr_t *p_mgr)\n{\n    DisplayLog(LVL_EVENT, LISTMGR_TAG,\n               \"Dropping any previous \" RECOV_TABLE \" table\");\n    return db_drop_component(&p_mgr->conn, DBOBJ_TABLE, RECOV_TABLE);\n}\n\n/**\n *  Continue a recovery process (returns an iterator on entry list),\n *  possibly using the specified filter.\n *  \\param retry   (boolean) Also retry previously errorneous entries\n *  \\retval iterator must be release using ListMgr_CloseIterator()\n */\nstruct lmgr_iterator_t *ListMgr_RecovResume(lmgr_t *p_mgr, const char *dir_path,\n                                  int retry,\n                                  const lmgr_iter_opt_t *p_opt)\n{\n    char query[4096];\n    char *curr;\n    lmgr_iterator_t *it;\n    int rc;\n\n    strcpy(query,\n           \"SELECT id,recov_status,\" GET_RECOV_LIST_FIELDS \" FROM \" RECOV_TABLE\n           \" WHERE \");\n    curr = query + strlen(query);\n    if (retry)\n        curr += sprintf(curr, \"(recov_status IS NULL OR recov_status=%u)\",\n                        RS_ERROR);\n    else\n        curr += sprintf(curr, \"recov_status IS NULL\");\n\n    if (dir_path) {\n        char rel[RBH_PATH_MAX] = \"\";\n        /* Recovery table contains path from DB (<root_id>/<rel_path>),\n         * and dirpath is absolute. So convert it. */\n        if (fullpath_attr2db(dir_path, rel))\n            return NULL;\n#ifdef _MYSQL\n        /* MySQL is case insensitive.\n         * To force case-sensitivity, use BINARY keyword. */\n        curr += sprintf(curr, \" AND relpath LIKE BINARY '%s/%%'\", rel);\n#else\n        curr += sprintf(curr, \" AND relpath LIKE '%s/%%'\", rel);\n#endif\n    }\n\n    /* allocate a new iterator */\n    it = (lmgr_iterator_t *) MemAlloc(sizeof(lmgr_iterator_t));\n    it->p_mgr = p_mgr;\n\n    /* execute request */\n    rc = db_exec_sql(&p_mgr->conn, query, &it->select_result);\n\n    if (rc) {\n        MemFree(it);\n        return NULL;\n    } else\n        return it;\n}\n\n/**\n *  List entries by recovery status.\n *  \\param st type of entries to be listed\n *  (done, failed, to be done, all)\n */\nstruct lmgr_iterator_t *ListMgr_RecovList(lmgr_t *p_mgr, recov_type_e st)\n{\n    char query[4096];\n    char *curr;\n    lmgr_iterator_t *it;\n    int rc;\n\n    strcpy(query,\n           \"SELECT id,recov_status,\" GET_RECOV_LIST_FIELDS \" FROM \"\n           RECOV_TABLE);\n    curr = query + strlen(query);\n    switch (st) {\n    case RT_ALL:\n        /* add no filter */\n        break;\n    case RT_TODO:\n        strcpy(curr, \" WHERE recov_status is NULL\");\n        break;\n    case RT_DONE:\n        sprintf(curr, \" WHERE recov_status in (%u, %u, %u, %u, %u)\",\n                RS_FILE_OK, RS_FILE_DELTA, RS_FILE_EMPTY, RS_NON_FILE,\n                RS_NOBACKUP);\n        break;\n    case RT_FAILED:\n        sprintf(curr, \" WHERE recov_status=%u\", RS_ERROR);\n        break;\n    }\n\n    /* allocate a new iterator */\n    it = (lmgr_iterator_t *) MemAlloc(sizeof(lmgr_iterator_t));\n    it->p_mgr = p_mgr;\n\n    /* execute request */\n    rc = db_exec_sql(&p_mgr->conn, query, &it->select_result);\n\n    if (rc) {\n        MemFree(it);\n        return NULL;\n    } else\n        return it;\n}\n\nint ListMgr_RecovGetNext(struct lmgr_iterator_t *p_iter,\n                         entry_id_t *p_id,\n                         attr_set_t *p_info, recov_status_t *last_status)\n{\n    int rc = 0;\n    char *result_tab[2 + RECOV_FIELD_COUNT];    /* +2 for id and recov_status */\n    DEF_PK(pk);\n    bool entry_disappeared = false;\n\n    do {\n        entry_disappeared = false;\n\n        rc = db_next_record(&p_iter->p_mgr->conn, &p_iter->select_result,\n                            result_tab, RECOV_FIELD_COUNT + 2);\n        if (rc)\n            return rc;\n        if (result_tab[0] == NULL)  /* no id? */\n            return DB_REQUEST_FAILED;\n\n        rc = parse_entry_id(p_iter->p_mgr, result_tab[0], PTR_PK(pk), p_id);\n        /* /!\\ If the entry disappeared from DB, we must go to next record */\n        if (rc == DB_NOT_EXISTS)\n            entry_disappeared = true;\n        else if (rc)\n            return rc;\n\n        if (result_tab[1] == NULL) {    /* no status */\n            if (last_status)\n                *last_status = -1;\n        } else if (last_status)\n            *last_status = str2int(result_tab[1]);\n    }\n    while (entry_disappeared);  /* goto next record if entry desappered */\n\n    return result2attrset(T_RECOV, result_tab + 2, RECOV_FIELD_COUNT, p_info);\n}\n\nint ListMgr_RecovComplete(lmgr_t *p_mgr, lmgr_recov_stat_t *p_stats)\n{\n    long long int diff;\n    int rc;\n\n    /* Check there is no more unprocessed entries */\n    rc = ListMgr_RecovStatus(p_mgr, p_stats);\n    if (rc)\n        return rc;\n\n    diff =\n        p_stats->total - p_stats->status_count[RS_FILE_OK] -\n        p_stats->status_count[RS_FILE_DELTA]\n        - p_stats->status_count[RS_FILE_EMPTY] -\n        p_stats->status_count[RS_NON_FILE]\n        - p_stats->status_count[RS_NOBACKUP] - p_stats->status_count[RS_ERROR];\n    if (diff > 0) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Cannot complete recovery: there are still %lld unprocessed files\",\n                   diff);\n        return DB_NOT_ALLOWED;\n    }\n    /* clear all */\n    return ListMgr_RecovReset(p_mgr);\n}\n\nint ListMgr_RecovSetState(lmgr_t *p_mgr, const entry_id_t *p_id,\n                          recov_status_t status)\n{\n    char query[4096];\n    DEF_PK(pk);\n\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    sprintf(query, \"UPDATE \" RECOV_TABLE \" SET recov_status=%u WHERE id=\" DPK,\n            status, pk);\n\n    /* execute request */\n    return db_exec_sql(&p_mgr->conn, query, NULL);\n}\n\n#endif\n"
  },
  {
    "path": "src/list_mgr/listmgr_remove.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"database.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <pthread.h>\n\n\nstatic int clean_names(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                       unsigned int *nb_filter_names)\n{\n    int      rc = DB_SUCCESS;\n    GString *req;\n\n    req = g_string_new(\"DELETE FROM \"DNAMES_TABLE\" WHERE \");\n    *nb_filter_names = filter2str(p_mgr, req, p_filter, T_DNAMES, 0);\n\n    if (*nb_filter_names == 0)\n        goto out;\n\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Direct deletion in \"DNAMES_TABLE\" table\");\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\nout:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** helper for listmgr_remove_single */\nstatic inline void append_table_join(GString *fields, GString *tables, GString *where,\n                                     const char *tname, const char *talias,\n                                     PK_ARG_T pk, const char **first_table)\n{\n    g_string_append_printf(fields, \"%s%s.*\", *first_table == NULL?\"\":\",\", talias);\n\n    if (GSTRING_EMPTY(tables))\n    {\n        *first_table = talias;\n        g_string_printf(tables, \"%s %s\", tname, talias);\n    }\n    else\n        g_string_append_printf(tables, \" LEFT JOIN %s %s ON %s.id = %s.id\",\n                               tname, talias, *first_table, talias);\n\n    if (GSTRING_EMPTY(where))\n        g_string_printf(where, \"%s.id=\"DPK, talias, pk);\n}\n\n/** removal of a single entry (no transaction management) */\nstatic int listmgr_remove_single(lmgr_t *p_mgr, PK_ARG_T pk, table_enum exclude_tab)\n{\n    const char *first_table = NULL;\n    GString *req, *tables, *where;\n    int rc;\n\n    req = g_string_new(\"DELETE \");\n    tables = g_string_new(NULL);\n    where = g_string_new(NULL);\n\n    if (exclude_tab != T_MAIN)\n        append_table_join(req, tables, where, MAIN_TABLE, \"M\", pk, &first_table);\n    if (exclude_tab != T_ANNEX)\n        append_table_join(req, tables, where, ANNEX_TABLE, \"A\", pk, &first_table);\n    if (exclude_tab != T_DNAMES)\n        append_table_join(req, tables, where, DNAMES_TABLE, \"N\", pk, &first_table);\n#ifdef _LUSTRE\n    if (exclude_tab != T_STRIPE_INFO)\n        append_table_join(req, tables, where, STRIPE_INFO_TABLE, \"I\", pk, &first_table);\n    if (exclude_tab != T_STRIPE_ITEMS)\n        append_table_join(req, tables, where, STRIPE_ITEMS_TABLE, \"S\", pk, &first_table);\n#endif\n\n    /* Doing this in a single request instead of 1 DELETE per table\n     * results in a huge speed up (246sec -> 59s).  */\n    /* - req already contains \"DELETE filed_list\"\n     * - using GSTRING_SAFE in case where or tables is still NULL */\n    g_string_append_printf(req, \" FROM %s WHERE %s\", GSTRING_SAFE(tables),\n                           GSTRING_SAFE(where));\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n\n    g_string_free(req, TRUE);\n    g_string_free(tables, TRUE);\n    g_string_free(where, TRUE);\n\n    return rc;\n}\n\n\nint listmgr_remove_no_tx(lmgr_t *p_mgr, const entry_id_t *p_id,\n                         const attr_set_t *p_attr_set, bool last)\n{\n    GString *req;\n    int      rc = DB_SUCCESS;\n    DEF_PK(pk);\n    DEF_PK(ppk);\n\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    req = g_string_new(NULL);\n\n    if (last)\n    {\n        /* remove from all tables except from NAMES (handled at the end of this function) */\n        rc = listmgr_remove_single(p_mgr, pk, T_DNAMES);\n        if (rc)\n            goto out;\n    }\n    else\n    {\n        /* XXX else update attributes according to attributes contents? */\n\n        /* Since we're removing one entry but not the file, decrement nlink. */\n        g_string_printf(req, \"UPDATE \"MAIN_TABLE\" SET nlink=nlink-1 WHERE \"\n                        \"id=\"DPK\" AND nlink>0\", pk);\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n        if (rc)\n            goto out;\n    }\n\n    /* Allow removing entry from MAIN_TABLE without removing it from NAMES */\n    if (p_attr_set && ATTR_MASK_TEST(p_attr_set, parent_id) && ATTR_MASK_TEST(p_attr_set, name))\n    {\n        char *escaped;\n        int   len;\n\n        entry_id2pk(&ATTR(p_attr_set, parent_id), PTR_PK(ppk));\n\n        /* according to MySQL documentation, escaped string can be up to 2*orig_len+1 */\n        len = 2 * strlen(ATTR(p_attr_set, name)) + 1;\n        escaped = MemAlloc(len);\n        if (escaped == NULL)\n        {\n            rc = DB_NO_MEMORY;\n            goto out;\n        }\n        db_escape_string(&p_mgr->conn, escaped, len, ATTR(p_attr_set, name));\n\n        g_string_printf(req, \"DELETE FROM \"DNAMES_TABLE\" WHERE pkn=\"HNAME_FMT\" AND id=\"DPK,\n                        ppk, escaped, pk);\n        MemFree(escaped);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n        if (rc)\n            return rc;\n    }\n    else if (!p_attr_set || !ATTR_MASK_TEST(p_attr_set, parent_id) || !ATTR_MASK_TEST(p_attr_set, name))\n    {\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"WARNING: missing attribute(s) to \"\n                    \"delete entry from \"DNAMES_TABLE\":%s%s%s\",\n                    !p_attr_set ? \" attrs=NULL\" : \"\",\n                    p_attr_set && !ATTR_MASK_TEST(p_attr_set, parent_id) ? \" parent\" : \"\",\n                    p_attr_set && !ATTR_MASK_TEST(p_attr_set, name) ? \" name\" : \"\");\n    }\n\nout:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n\nint ListMgr_Remove(lmgr_t *p_mgr, const entry_id_t *p_id,\n                   const attr_set_t *p_attr_set, bool last)\n{\n    int rc;\n    int retry_status;\n\n    /* We want the remove operation to be atomic */\nretry:\n    rc = lmgr_begin(p_mgr);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (retry_status == 2)\n        return DB_RBH_SIG_SHUTDOWN;\n    else if (rc)\n        return rc;\n\n    rc = listmgr_remove_no_tx(p_mgr, p_id, p_attr_set, last);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    else if (rc || retry_status == 2)\n    {\n        lmgr_rollback(p_mgr);\n        return (retry_status == 2) ? DB_RBH_SIG_SHUTDOWN : rc;\n    }\n\n    rc = lmgr_commit(p_mgr);\n    retry_status = lmgr_delayed_retry(p_mgr, rc);\n    if (retry_status == 1)\n        goto retry;\n    if (!rc)\n         p_mgr->nbop[OPIDX_RM]++;\n    return rc;\n}\n\n/**\n * Insert all entries to soft rm table.\n * @TODO check how it behaves with millions/billion entries.\n */\nstatic int listmgr_softrm_all(lmgr_t *p_mgr, time_t rm_time)\n{\n    int rc;\n    GString *req, *annex_fields;\n    attr_mask_t mask_tmp = softrm_attr_set;\n\n    /* manage fullpath independently to make sure it is the first attribute\n     * as we will set it to \"one_path(id)\". */\n    attr_mask_unset_index(&mask_tmp, ATTR_INDEX_fullpath);\n\n    req = g_string_new(\"INSERT IGNORE INTO \" SOFT_RM_TABLE \"(id,fullpath\");\n    attrmask2fieldlist(req, mask_tmp, T_SOFTRM, \"\", \"\", AOF_LEADING_SEP);\n\n    annex_fields = g_string_new(NULL);\n    attrmask2fieldlist(annex_fields, softrm_attr_set, T_ANNEX,\n                       ANNEX_TABLE\".\", \"\", AOF_LEADING_SEP);\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n\n    g_string_free(annex_fields, TRUE);\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/**\n * delete all entries from all tables (except softrm\n */\nstatic int listmgr_rm_all(lmgr_t * p_mgr)\n{\n    int rc;\n\n    /* stripes are only managed for lustre filesystems */\n#ifdef _LUSTRE\n    rc = db_exec_sql(&p_mgr->conn, \"DELETE FROM \" STRIPE_ITEMS_TABLE, NULL);\n    if (rc)\n        return rc;\n\n    rc = db_exec_sql(&p_mgr->conn, \"DELETE FROM \" STRIPE_INFO_TABLE, NULL);\n    if (rc)\n        return rc;\n#endif\n\n    rc = db_exec_sql(&p_mgr->conn, \"DELETE FROM \" ANNEX_TABLE, NULL);\n    if (rc)\n        return rc;\n\n    rc = db_exec_sql(&p_mgr->conn, \"DELETE FROM \" MAIN_TABLE, NULL);\n    if (rc)\n        return rc;\n\n    rc = db_exec_sql(&p_mgr->conn, \"DELETE FROM \" DNAMES_TABLE, NULL);\n    if (rc)\n        return rc;\n\n    return DB_SUCCESS;\n}\n\n/** try to build the full path, if it is missing */\nstatic void set_fullpath(lmgr_t *p_mgr, attr_set_t *attrs)\n{\n    int rc;\n\n    /* if fullpath is not determined, try to build it */\n    if (!ATTR_MASK_TEST(attrs, fullpath)\n        && ATTR_MASK_TEST(attrs, parent_id)\n        && ATTR_MASK_TEST(attrs, name))\n    {\n        attr_set_t dir_attrs = ATTR_SET_INIT;\n\n        /* try to get parent path, so we can build <parent_path>/<name> */\n        ATTR_MASK_SET(&dir_attrs, fullpath);\n        if ((ListMgr_Get(p_mgr, &ATTR(attrs, parent_id), &dir_attrs) == DB_SUCCESS)\n            && ATTR_MASK_TEST(&dir_attrs, fullpath))\n        {\n            rc = snprintf(ATTR(attrs, fullpath), RBH_PATH_MAX, \"%s/%s\",\n                          ATTR(&dir_attrs, fullpath), ATTR(attrs, name));\n            if (rc > RBH_PATH_MAX) {\n                DisplayLog(LVL_EVENT, LISTMGR_TAG, \"path truncated: %s/%s\",\n                           ATTR(&dir_attrs, fullpath), ATTR(attrs, name));\n            }\n            ATTR_MASK_SET(attrs, fullpath);\n        }\n        else /* display fullpath as <parent_id>/<name>*/\n        {\n            char tmp[RBH_PATH_MAX];\n            DEF_PK(parent_pk);\n\n            /* prefix with parent id */\n            entry_id2pk(&ATTR(attrs, parent_id), PTR_PK(parent_pk));\n            snprintf(tmp, RBH_PATH_MAX, \"%s/%s\", parent_pk, ATTR(attrs, name));\n            fullpath_db2attr(tmp, ATTR(attrs, fullpath));\n            ATTR_MASK_SET(attrs, fullpath);\n        }\n    }\n}\n\n\n/**\n * Insert a single entry to soft rm table.\n * p_old_attrs must include rm_time\n */\nstatic int listmgr_softrm_single(lmgr_t *p_mgr, const entry_id_t *p_id,\n                                 attr_set_t *p_old_attrs)\n{\n    DEF_PK(pk);\n    int  rc;\n    char err_buf[1024];\n    GString *req;\n    attr_mask_t tmp_mask;\n\n    if (!ATTR_MASK_TEST(p_old_attrs, rm_time))\n    {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error: rm_time attr is supposed to be set in %s()\",\n                   __func__);\n    }\n\n    set_fullpath(p_mgr, p_old_attrs);\n\n    /* if fullpath is set, update it */\n    if (ATTR_MASK_TEST(p_old_attrs, fullpath))\n        req = g_string_new(\"INSERT INTO \" SOFT_RM_TABLE \"(id\");\n    else /* else, don't update */\n        req = g_string_new(\"INSERT IGNORE INTO \" SOFT_RM_TABLE \"(id\");\n\n    tmp_mask = attr_mask_and(&softrm_attr_set, &p_old_attrs->attr_mask);\n    attrmask2fieldlist(req, tmp_mask, T_SOFTRM, \"\", \"\", AOF_LEADING_SEP);\n    g_string_append(req, \") VALUES (\");\n\n    entry_id2pk(p_id, PTR_PK(pk));\n    g_string_append_printf(req, DPK, pk);\n\n    attrset2valuelist(p_mgr, req, p_old_attrs, T_SOFTRM, AOF_LEADING_SEP);\n    g_string_append(req, \")\");\n\n    if (ATTR_MASK_TEST(p_old_attrs, fullpath))\n        g_string_append(req, \" ON DUPLICATE KEY UPDATE fullpath=VALUES(fullpath)\");\n\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n    if (rc)\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"DB query failed in %s line %d: query=\\\"%s\\\", code=%d: %s\",\n                   __FUNCTION__, __LINE__, req->str, rc,\n                   db_errmsg(&p_mgr->conn, err_buf, sizeof(err_buf)));\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** create a temporary table with all entries to be deleted */\nstatic int create_tmp_table_rm(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                               const struct field_count *counts,\n                               const char *tmpname, bool soft_rm,\n                               table_enum query_tab, GString *from,\n                               GString *filter_names, GString *where,\n                               bool distinct)\n{\n    GString *req = NULL;\n    int      rc;\n\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Creating temporary table\");\n\n    req = g_string_new(\"CREATE TEMPORARY TABLE\");\n    g_string_append_printf(req, \" %s AS \", tmpname);\n\n    if (soft_rm)\n    {\n        /* case A: full scan (no filter on fullpath), all non-updated entries are to be removed + all unseen names must be cleaned.\n         *          => filter_names + soft_rm\n         * case B: partial scan, we don't remove objects from ENTRIES (only from NAMES).\n         */\n        if (lmgr_filter_check_field(p_filter, ATTR_INDEX_fullpath)) /* partial scan with condition on NAMES + ENTRIES */\n        {\n                /* 1) get the entries to be removed (those with nb paths = removed paths)\n                 * 2) then we will clean the names\n                 */\n\n                /* To determine names whose we remove the last reference, we avoid huge JOIN like this one:\n                            (select id,COUNT(*) as rmd from NAMES LINKS WHERE <condition> GROUP BY id) rmname\n                            JOIN\n                            (select id, COUNT(*) as all FROM NAMES GROUP BY id) allname\n                            ON rmname.id=allname.id\n                            WHERE rmname.rmcount=paths.pathcount\n                 *\n                 * Instead we do:\n                 *  SELECT id,sum(this_path(parent_id,name) LIKE '%/foo' AND path_update < x) as rmcnt, count(*) as tot FROM NAMES GROUP BY id HAVING rmcnt=tot;\n                 *\n                 * BUT we must also get ENTRIES with no remaining name (no matching entry in NAMES)...\n                 * Finally we do:\n                 *  SELECT ENTRIES.id, this_path(parent_id, name) as fullpath, ...\n                    sum(path_update < 1377176998 and this_path(parent_id, name) like 'dir1/%') as rm, count(*) as tot\n                    FROM ENTRIES LEFT JOIN NAMES ON ENTRIES.id=NAMES.id GROUP BY ENTRIES.id HAVING s=tot or fullpath is NULL;\n                 */\n\n                g_string_append(req,\"SELECT \"MAIN_TABLE\".id\");\n                attrmask2fieldlist(req, softrm_attr_set, T_MAIN,\n                                   MAIN_TABLE\".\", \"\", AOF_LEADING_SEP);\n                attrmask2fieldlist(req, softrm_attr_set, T_ANNEX,\n                                   ANNEX_TABLE\".\", \"\", AOF_LEADING_SEP);\n                attrmask2fieldlist(req, softrm_attr_set, T_DNAMES,\n                                   DNAMES_TABLE\".\", \"\", AOF_LEADING_SEP);\n                g_string_append_printf(req, \",SUM(%s) AS rmcnt,COUNT(*) AS tot FROM \"MAIN_TABLE\n                                       \" LEFT JOIN \"DNAMES_TABLE\" ON \"MAIN_TABLE\".id=\"DNAMES_TABLE\".id\"\n                                       \" LEFT JOIN \"ANNEX_TABLE\" ON \"MAIN_TABLE\".id=\"ANNEX_TABLE\".id\"\n                                       \" WHERE %s GROUP BY \"MAIN_TABLE\".id\"\n                                       \" HAVING rmcnt=tot OR fullpath is NULL\",\n                                       GSTRING_SAFE(filter_names), GSTRING_SAFE(where));\n        }\n        else /* full scan */\n        {\n            g_string_append(req,\"SELECT \"MAIN_TABLE\".id,\" ONE_PATH_FUNC\"(\"MAIN_TABLE\".id) AS fullpath\");\n            attrmask2fieldlist(req, softrm_attr_set, T_MAIN,\n                               MAIN_TABLE\".\", \"\", AOF_LEADING_SEP);\n            attrmask2fieldlist(req, softrm_attr_set, T_ANNEX,\n                               ANNEX_TABLE\".\", \"\", AOF_LEADING_SEP);\n\n            g_string_append_printf(req, \" FROM \"MAIN_TABLE\n                                   \" LEFT JOIN \"ANNEX_TABLE\" ON \"MAIN_TABLE\".id=\"ANNEX_TABLE\".id\"\n                                   \" WHERE %s\", GSTRING_SAFE(where));\n        }\n    }\n    else\n    {\n        if (counts->nb_names > 0)\n        {\n            /* Only delete entries with no remaining name */\n            /* 2 requests were tested here, with a significant performance difference: use the fastest.\n             * (request time for 2.6M entries)\n             *  mysql> select * from ENTRIES WHERE id not in (select id from NAMES);\n             *  Empty set (7.06 sec)\n             *  mysql> select * from ENTRIES LEFT JOIN NAMES on ENTRIES.id=NAMES.id WHERE NAMES.id IS NULL;\n             *  Empty set (16.09 sec)\n             */\n            g_string_append_printf(where, \" AND %s.id NOT IN \"\n                                   \"(SELECT DISTINCT(id) FROM \"DNAMES_TABLE\")\",\n                                   table2name(query_tab));\n        }\n        if (distinct)\n            g_string_append_printf(req, \"SELECT DISTINCT(%s.id) FROM %s\"\n                                   \" WHERE %s\", table2name(query_tab),\n                                   GSTRING_SAFE(from), GSTRING_SAFE(where));\n        else\n            g_string_append_printf(req, \"SELECT %s.id FROM %s\" \" WHERE %s\",\n                                   table2name(query_tab), GSTRING_SAFE(from),\n                                   GSTRING_SAFE(where));\n    }\n\n    /* create the temporary table */\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n#define MAX_SOFTRM_FIELDS 128 /* id + std attributes + status + sminfo */\n\n/** Perform removal or soft removal for all entries matching a filter\n * (no transaction management).\n */\nstatic int listmgr_mass_remove_no_tx(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                                     bool soft_rm, time_t rm_time, rm_cb_func_t cb_func,\n                                     unsigned int *rm_count)\n{\n    struct field_count counts = {0};\n    table_enum          query_tab;\n    bool                distinct = false;\n    bool                direct_del = false;\n    char                tmp_table_name[256];\n    char           *field_tab[MAX_SOFTRM_FIELDS];\n    result_handle_t result;\n    DEF_PK(pk);\n    GString        *filter_names = NULL;\n    GString        *from = NULL;\n    GString        *where = NULL;\n    GString        *req = NULL;\n    int             rc;\n    unsigned int    nb;\n    attr_mask_t mask_no_rmtime = softrm_attr_set;\n\n    attr_mask_unset_index(&mask_no_rmtime, ATTR_INDEX_rm_time);\n\n    if (no_filter(p_filter))\n    {\n        if (soft_rm)\n        {\n            rc = listmgr_softrm_all(p_mgr, rm_time);\n            if (rc)\n                return rc;\n        }\n\n        /* Remove all !!! */\n        DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                    \"No filter is specified: removing entries from all tables.\");\n        return listmgr_rm_all(p_mgr);\n    }\n\n    if (!soft_rm)\n    {\n        /* no soft_rm:\n         * 1) clean names if there is a filter on them.\n         * 2) clean related entries in other tables if there is no remaining path.\n         */\n        rc = clean_names(p_mgr, p_filter, &counts.nb_names);\n        if (rc)\n            return rc;\n    }\n    else\n    {\n        filter_names = g_string_new(NULL);\n        /* soft rm: just build the name filter for the later request */\n        counts.nb_names = filter2str(p_mgr, filter_names, p_filter, T_DNAMES, 0);\n    }\n\n    from = g_string_new(NULL);\n    where = g_string_new(NULL);\n\n    /* build the where clause */\n    if (filter_where(p_mgr, p_filter, &counts, where, AOF_SKIP_NAME) == 0)\n    {\n        if (unlikely(counts.nb_names == 0))\n        {\n            /* empty filter should have been detected earlier */\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"How come empty filter has not been detected?!\");\n            rc = DB_REQUEST_FAILED;\n            goto free_str;\n        }\n\n        /* filter is only on names table */\n        if (soft_rm)\n            rc = clean_names(p_mgr, p_filter, &counts.nb_names);\n        /* else (no softrm): name cleaning has been done at the beginning of the function */\n        else\n            rc = 0;\n\n        goto free_str;\n    }\n\n    /* build the from clause */\n    filter_from(p_mgr, &counts, from, &query_tab, &distinct, AOF_SKIP_NAME);\n\n    /* sanity check */\n    if (unlikely(query_tab == T_NONE || GSTRING_EMPTY(from)))\n    {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Error: unexpected case: filters= \"\n                   MAIN_TABLE \":%u, \" ANNEX_TABLE \":%u, \"DNAMES_TABLE\": %u,\"\n                   STRIPE_ITEMS_TABLE \":%u, \" STRIPE_INFO_TABLE \":%u\",\n                   counts.nb_main, counts.nb_annex, counts.nb_names,\n                   counts.nb_stripe_items, counts.nb_stripe_info);\n        rc = DB_REQUEST_FAILED;\n        goto free_str;\n    }\n\n    snprintf(tmp_table_name, sizeof(tmp_table_name), \"TMP_TABLE_%u_%u\",\n        (unsigned int)getpid(), (unsigned int)pthread_self());\n\n    rc = create_tmp_table_rm(p_mgr, p_filter, &counts, tmp_table_name, soft_rm,\n                             query_tab, from, filter_names, where, distinct);\n    if (rc)\n        goto free_str;\n\n    req = g_string_new(NULL);\n\n    /* If the filter is only a single table, entries can be directly deleted in it. */\n    /* NOTE: can't delete directly in stripe_items with the select criteria. */\n    if ((nb_field_tables(&counts) == 1) && (query_tab != T_STRIPE_ITEMS))\n    {\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Direct deletion in %s table\", table2name(query_tab));\n        direct_del = true;\n\n        /* if filter is on a single table, we can directly use filter in WHERE clause */\n        g_string_printf(req, \"DELETE FROM %s WHERE %s\", table2name(query_tab),\n                        GSTRING_SAFE(where));\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n        if (rc)\n            goto free_str;\n    }\n\n    /* do the cleaning in other tables */\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"Starting indirect removal (soft_rm=%d)\",\n               soft_rm);\n\n    /* get all records from the tmp table to clean them from other tables */\n    nb = 1; /* at least 1 field for id */\n    if (soft_rm)\n    {\n        g_string_assign(req, \"SELECT id\");\n        nb += attrmask2fieldlist(req, mask_no_rmtime, T_TMP_SOFTRM,\n                                 \"\", \"\", AOF_LEADING_SEP);\n        g_string_append_printf(req, \" FROM %s\", tmp_table_name);\n        g_string_append_printf(req, \" ORDER BY CHAR_LENGTH(fullpath) DESC\");\n    }\n    else\n        g_string_printf(req, \"SELECT id FROM %s\", tmp_table_name);\n\n    rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    if (rc)\n        goto free_str;\n\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n               \"%d identifiers to be removed from all tables\",\n               db_result_nb_records(&p_mgr->conn, &result));\n\n    *rm_count = 0;\n\n    /* for each returned record from tmp table */\n    while ((rc = db_next_record(&p_mgr->conn, &result, field_tab, nb))\n                == DB_SUCCESS\n            && (field_tab[0] != NULL))\n    {\n        entry_id_t id;\n\n        rc = parse_entry_id(p_mgr, field_tab[0], PTR_PK(pk), &id);\n        if (rc)\n            goto free_res;\n\n        if (soft_rm)\n        {\n            attr_set_t old_attrs = ATTR_SET_INIT;\n\n            old_attrs.attr_mask = mask_no_rmtime;\n\n            /* parse result attributes + set rm_time for listmgr_softrm_single */\n            rc = result2attrset(T_TMP_SOFTRM, field_tab + 1,  nb - 1, &old_attrs);\n            if (rc)\n                goto free_res;\n\n            ATTR_MASK_SET(&old_attrs, rm_time);\n            ATTR(&old_attrs, rm_time) = rm_time;\n\n            /* insert into softrm table */\n            rc = listmgr_softrm_single(p_mgr, &id, &old_attrs);\n            ListMgr_FreeAttrs(&old_attrs);\n            if (rc)\n                goto free_res;\n        }\n\n        /* delete all entries related to this id (except from query table if we did\n         * a direct deletion in it) */\n        rc = listmgr_remove_single(p_mgr, pk, direct_del ? query_tab : T_NONE);\n        if (rc)\n            goto free_res;\n\n        if (cb_func)\n            cb_func(&id);\n\n        (*rm_count)++;\n    }\n\n    db_result_free(&p_mgr->conn, &result);\n\n    if ((rc != 0) && (rc != DB_END_OF_LIST))\n        goto free_str;\n\n    DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n               \"End of indirect removal: %u identifiers removed\", *rm_count);\n\n    /* drop tmp table */\n    rc = db_drop_component(&p_mgr->conn, DBOBJ_TABLE, tmp_table_name);\n    if (rc)\n        return rc;\n\n    /* Condition on names only (partial scan cleans not found names). */\n    if (soft_rm && filter_names)\n        rc = clean_names(p_mgr, p_filter, &counts.nb_names);\n    /* else, it has been done at the beginning of the function */\n\n    goto free_str;\n\nfree_res:\n    db_result_free(&p_mgr->conn, &result);\n\nfree_str:\n    if (from != NULL)\n        g_string_free(from, TRUE);\n    if (filter_names != NULL)\n        g_string_free(filter_names, TRUE);\n    if (where != NULL)\n        g_string_free(where, TRUE);\n    if (req != NULL)\n        g_string_free(req, TRUE);\n\n    return rc;\n}\n\n\n/** handles a mass_remove transaction */\nstatic int listmgr_mass_remove(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                               bool soft_rm, time_t rm_time, rm_cb_func_t cb_func)\n{\n    int             rc;\n    unsigned int    rmcount = 0;\n\n    /* We want the remove operation to be atomic */\nretry:\n    rc = lmgr_begin(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        return rc;\n\n    rc = listmgr_mass_remove_no_tx(p_mgr, p_filter, soft_rm, rm_time, cb_func, &rmcount);\n\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto rollback;\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    if (rc == DB_SUCCESS)\n        p_mgr->nbop[OPIDX_RM] += rmcount;\n\n    return rc;\n\nrollback:\n    lmgr_rollback(p_mgr);\n    return rc;\n}\n\nint ListMgr_MassRemove(lmgr_t * p_mgr, const lmgr_filter_t * p_filter,\n                        rm_cb_func_t cb_func)\n{\n    /* not a soft rm */\n    return listmgr_mass_remove(p_mgr, p_filter, false, 0, cb_func);\n}\n\nint ListMgr_MassSoftRemove(lmgr_t *p_mgr, const lmgr_filter_t *p_filter,\n                           time_t rm_time, rm_cb_func_t cb_func)\n{\n    /* soft rm */\n    return listmgr_mass_remove(p_mgr, p_filter, true, rm_time, cb_func);\n}\n\n/**\n * Remove an entry from the main database, and insert it to secondary table\n * for delayed removal.\n * \\param real_remove_time time when the entry was removed.\n * \\param p_old_attrs contains old attributes, parent+name and backendpath must be set.\n *        rm_time must be set too.\n *        If NULL, it is retrieved from the database.\n */\nint            ListMgr_SoftRemove(lmgr_t *p_mgr, const entry_id_t *p_id,\n                                  attr_set_t *p_old_attrs)\n{\n    int        rc;\n    attr_set_t all_attrs = ATTR_SET_INIT;\n\n    /* get missing attributes for SOFT_RM table from DB */\n    all_attrs.attr_mask = softrm_attr_set;\n    /* ...except rm_time */\n    attr_mask_unset_index(&all_attrs.attr_mask, ATTR_INDEX_rm_time);\n    /* ...except attributes already in p_old_attrs */\n    all_attrs.attr_mask = attr_mask_and_not(&all_attrs.attr_mask, &p_old_attrs->attr_mask);\n\n    /* these are needed for remove function */\n    if (!ATTR_MASK_TEST(&all_attrs, parent_id)\n        || !ATTR_MASK_TEST(&all_attrs, name))\n    {\n        ATTR_MASK_SET(&all_attrs, parent_id);\n        ATTR_MASK_SET(&all_attrs, name);\n    }\n\n    if (!attr_mask_is_null(all_attrs.attr_mask)\n        && (ListMgr_Get(p_mgr, p_id, &all_attrs) != DB_SUCCESS))\n        ATTR_MASK_INIT(&all_attrs);\n\n    if (p_old_attrs != NULL)\n        ListMgr_MergeAttrSets(&all_attrs, p_old_attrs, true);\n\n    if (!ATTR_MASK_TEST(&all_attrs, rm_time))\n    {\n        ATTR_MASK_SET(&all_attrs, rm_time);\n        ATTR(&all_attrs, rm_time) = time(NULL);\n    }\n\n    /* We want the removal sequence to be atomic */\nretry:\n    rc = lmgr_begin(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto out;\n\n    rc = listmgr_softrm_single(p_mgr, p_id, &all_attrs);\n\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n    {\n        lmgr_rollback(p_mgr);\n        goto out;\n    }\n\n    /* remove the entry from main tables, if it exists */\n    rc = listmgr_remove_no_tx(p_mgr, p_id, &all_attrs, true);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc != DB_SUCCESS && rc != DB_NOT_EXISTS)\n    {\n        lmgr_rollback(p_mgr);\n        goto out;\n    }\n\n    /* commit */\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    if (!rc)\n         p_mgr->nbop[OPIDX_RM]++;\n\nout:\n    ListMgr_FreeAttrs(&all_attrs);\n    return rc;\n}\n\ntypedef struct lmgr_rm_list_t\n{\n    lmgr_t        *p_mgr;\n    result_handle_t select_result;\n    unsigned int  result_len;\n} lmgr_rm_list_t;\n\n/* XXX selecting 'expired' entries is done using a rm_time criteria in p_filter */\nstruct lmgr_rm_list_t *ListMgr_RmList(lmgr_t *p_mgr, lmgr_filter_t *p_filter,\n                                      const lmgr_sort_type_t *p_sort_type)\n{\n    int             rc, nb;\n    lmgr_rm_list_t *p_list = MemAlloc(sizeof(lmgr_rm_list_t));\n    GString        *req;\n\n    if (!p_list)\n        return NULL;\n\n    req = g_string_new(\"SELECT id\");\n    nb = attrmask2fieldlist(req, softrm_attr_set, T_SOFTRM,\n                            \"\", \"\", AOF_LEADING_SEP);\n    g_string_append(req, \" FROM \"SOFT_RM_TABLE);\n\n    if (p_filter)\n    {\n        if (p_filter->filter_type != FILTER_SIMPLE)\n        {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Unsupported filter in %s(): simple filter expected\",\n                       __FUNCTION__);\n            goto free_err;\n        }\n        /* are there unsuported fields in this filter? */\n        if (lmgr_check_filter_fields(p_filter, softrm_attr_set, &rc))\n        {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Unsupported field in filter: %s (in %s())\",\n                       rc == -1 ? \"supported filter type\" :\n                       field_name(p_filter->filter_simple.filter_index[rc]), __func__);\n            goto free_err;\n        }\n        g_string_append(req, \" WHERE \");\n        if (filter2str(p_mgr, req, p_filter, T_SOFTRM, 0) <= 0)\n        {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Error converting filter to SQL request\");\n            goto free_err;\n        }\n    }\n\n    /*\n     * Is there a sort order ? add default order only if not specified,\n     * do not add ORDER BY if lru_sort_attr is none\n     */\n\n    if (p_sort_type == NULL)\n    {\n        /* default is rm_time */\n        g_string_append(req, \" ORDER BY rm_time ASC\");\n    }\n    else if (p_sort_type->order == SORT_NONE) {\n        // do nothing\n        // required to avoid assert on next else if, as NONE is not a DB field\n    }\n    else if (!is_softrm_field(p_sort_type->attr_index))\n    {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"ERROR: attribute '%s' is not part of %s table\",\n                   field_name(p_sort_type->attr_index), SOFT_RM_TABLE);\n        goto free_err;\n    }\n    else\n    {\n        g_string_append_printf(req, \" ORDER BY %s %s\",\n                               field_name(p_sort_type->attr_index),\n                               p_sort_type->order == SORT_ASC ? \"ASC\" : \"DESC\");\n    }\n\n    p_list->p_mgr = p_mgr;\n    p_list->result_len = nb + 1; /* id + attrs */\n\n    /* execute request (retry on connexion error or deadlock) */\n    do {\n        rc = db_exec_sql(&p_mgr->conn, req->str, &p_list->select_result);\n    } while (lmgr_delayed_retry(p_mgr, rc));\n\n    if (rc)\n    {\n        char msg_buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"DB query failed in %s line %d: query=\\\"%s\\\",code=%d, %s\",\n                   __FUNCTION__, __LINE__, req->str, rc,\n                   db_errmsg(&p_mgr->conn, msg_buff, sizeof(1024)));\n        goto free_err;\n    }\n\n    /* success */\n    g_string_free(req, TRUE);\n    return p_list;\n\nfree_err: /* error */\n    g_string_free(req, TRUE);\n    MemFree(p_list);\n    return NULL;\n}\n\nint            ListMgr_GetNextRmEntry(struct lmgr_rm_list_t *p_iter,\n                                      entry_id_t *p_id, attr_set_t *p_attrs)\n{\n    int            rc = 0;\n    int i;\n    char *record[MAX_SOFTRM_FIELDS];\n\n    if (p_iter->result_len > MAX_SOFTRM_FIELDS)\n        RBH_BUG(\"unexpected result length > MAX_SOFTRM_FIELDS\");\n\n    if (!p_id || !p_attrs)\n        return DB_INVALID_ARG;\n\n    for (i=0; i < MAX_SOFTRM_FIELDS; i++)\n        record[i] = NULL;\n\n    rc = db_next_record(&p_iter->p_mgr->conn, &p_iter->select_result, record,\n                        p_iter->result_len);\n    /* what to do on connexion error? */\n\n    if (rc)\n        return rc;\n    if (record[0] == NULL)\n        return DB_REQUEST_FAILED;\n\n    if (sscanf(record[0], SFID, RFID(p_id)) <= 0)\n        return DB_REQUEST_FAILED;\n\n    /* force fields of SOFTRM table */\n    p_attrs->attr_mask = softrm_attr_set;\n    rc = result2attrset(T_SOFTRM, record + 1, p_iter->result_len - 1, p_attrs);\n\n    return rc;\n}\n\n\nvoid           ListMgr_CloseRmList(struct lmgr_rm_list_t *p_iter)\n{\n    db_result_free(&p_iter->p_mgr->conn, &p_iter->select_result);\n    MemFree(p_iter);\n}\n\n/**\n * Get entry to be removed from its fid.\n */\nint     ListMgr_GetRmEntry(lmgr_t * p_mgr,\n                           const entry_id_t *p_id,\n                           attr_set_t *p_attrs)\n{\n    result_handle_t result;\n    int             rc, i, nb;\n    GString        *req;\n    char           *record[MAX_SOFTRM_FIELDS];\n\n    if (!p_id || !p_attrs)\n        return DB_INVALID_ARG;\n\n    /* only keep fields in SOFTRM table */\n    p_attrs->attr_mask = attr_mask_and(&p_attrs->attr_mask, &softrm_attr_set);\n\n    req = g_string_new(\"SELECT \");\n    nb = attrmask2fieldlist(req, p_attrs->attr_mask, T_SOFTRM, \"\", \"\", 0);\n    if (nb == 0)\n        g_string_append(req, \"id\");\n\n    g_string_append_printf(req, \" FROM \"SOFT_RM_TABLE\" WHERE id='\"DFID_NOBRACE\"'\",\n                           PFID(p_id));\n\n    /* execute request (retry on connexion error or timeout) */\n    do {\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    } while (lmgr_delayed_retry(p_mgr, rc));\n\n    if (rc)\n    {\n        char msg_buff[1024];\n\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"DB query failed in %s line %d: query=\\\"%s\\\",code=%d, %s\",\n                   __FUNCTION__, __LINE__, req->str, rc,\n                   db_errmsg(&p_mgr->conn, msg_buff, sizeof(msg_buff)));\n        goto free_str;\n    }\n\n    for (i=0; i < MAX_SOFTRM_FIELDS; i++)\n        record[i] = NULL;\n\n    rc = db_next_record(&p_mgr->conn, &result, record, nb);\n    if (rc == DB_END_OF_LIST)\n    {\n        rc = DB_NOT_EXISTS;\n        goto free_res;\n    } else if (rc)\n        goto free_str;\n\n    if (record[0] == NULL)\n        return DB_REQUEST_FAILED;\n\n    rc = result2attrset(T_SOFTRM, record, nb, p_attrs);\n\nfree_res:\n    db_result_free(&p_mgr->conn, &result);\nfree_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\nint ListMgr_SoftRemove_Discard(lmgr_t * p_mgr, const entry_id_t * p_id)\n{\n    int      rc;\n    GString *req;\n\n    req = g_string_new(\"DELETE FROM \"SOFT_RM_TABLE\" WHERE id=\");\n    g_string_append_printf(req, \"'\"DFID_NOBRACE\"'\", PFID(p_id));\n\n    do {\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n    } while(lmgr_delayed_retry(p_mgr, rc));\n\n    g_string_free(req, TRUE);\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_reports.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_internal.h\"\n#include \"database.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <inttypes.h>\n\nstruct result {\n    db_type_e type;\n    int flags;\n};\n\ntypedef struct lmgr_report_t {\n    lmgr_t *p_mgr;\n    result_handle_t select_result;\n\n    /* expected result content */\n    struct result *result;\n    unsigned int result_count;  /* report + profile */\n    unsigned int profile_count; /* profile only */\n    unsigned int ratio_count;   /* nbr of ratio field */\n    unsigned int profile_attr;  /* profile attr (if profile_count > 0) */\n\n    char **str_tab;\n} lmgr_report_t;\n\n/* Return field string */\nstatic inline const char *field_str(unsigned int index)\n{\n    /* count(id) for special COUNT attribute */\n    if (index == ATTR_INDEX_FLG_COUNT)\n        return \"id\";\n    else\n        return field_name(index);\n}\n\n/* Return field flag */\nstatic inline int field_flag(unsigned int index)\n{\n    if (index == ATTR_INDEX_FLG_COUNT)\n        return 0;\n    else if (index < ATTR_COUNT)\n        return field_infos[index].flags;\n    else    /* status, sm_info */\n        return 0;\n}\n\nstatic inline void append_filter_cond(GString *str, lmgr_t *p_mgr,\n                                      const char *attrname,\n                                      const report_field_descr_t *desc,\n                                      const struct result *res)\n{\n    if (!GSTRING_EMPTY(str))\n        g_string_append(str, \" AND \");\n\n    g_string_append_printf(str, \"(%s %s \", attrname,\n                           compar2str(desc->filter_compar));\n\n    /** TODO support list filters (IN NOT and IN) */\n    printdbtype(&p_mgr->conn, str, res->type, &desc->filter_value.value);\n\n    g_string_append(str, \")\");\n}\n\n/** append a filter on a given report field */\nstatic void listmgr_fieldfilter(const lmgr_report_t *p_report, lmgr_t *p_mgr,\n                                const report_field_descr_t *report_desc_array,\n                                const char *attrname,\n                                GString *having, GString *where, int i)\n{\n    /* is this field filtered ? */\n    if (report_desc_array[i].filter) {\n        if (report_desc_array[i].report_type != REPORT_GROUP_BY) {\n            /* sum, min, max, etc. are addressed by attr#n */\n            append_filter_cond(having, p_mgr, attrname, &report_desc_array[i],\n                               &p_report->result[i]);\n        } else {\n            /* this is a real db field, can be filtered in a 'where' clause */\n            /* sum, min, max, etc. are addressed by attr#n */\n            append_filter_cond(where, p_mgr,\n                               field_str(report_desc_array[i].attr_index),\n                               &report_desc_array[i], &p_report->result[i]);\n        }\n    }\n}\n\nstatic inline void coma_if_needed(GString *str)\n{\n    if (!GSTRING_EMPTY(str))\n        g_string_append(str, \",\");\n}\n\nstatic inline void append_sort_order(GString *order_by, const char *name,\n                                     sort_order_t sort)\n{\n    if (sort == SORT_ASC) {\n        coma_if_needed(order_by);\n        g_string_append_printf(order_by, \"%s ASC\", name);\n    } else if (sort == SORT_DESC) {\n        coma_if_needed(order_by);\n        g_string_append_printf(order_by, \"%s DESC\", name);\n    }\n}\n\nstatic void listmgr_optimizedstat(lmgr_report_t *p_report, lmgr_t *p_mgr,\n                                  unsigned int report_descr_count,\n                                  const report_field_descr_t *\n                                  report_desc_array,\n                                  const profile_field_descr_t *profile_descr,\n                                  GString *fields, GString *group_by,\n                                  GString *order_by, GString *having,\n                                  GString *where)\n{\n    int i;\n\n    /* sorting by ratio first */\n    if (profile_descr && profile_descr->range_ratio_len > 0) {\n        if (profile_descr->attr_index == ATTR_INDEX_size)\n            append_sort_order(order_by, \"sizeratio\",\n                              profile_descr->range_ratio_sort);\n    }\n\n    for (i = 0; i < report_descr_count; i++) {\n        char attrname[128];\n\n        snprintf(attrname, sizeof(attrname), \"attr%u\", i);\n\n        if ((report_desc_array[i].report_type == REPORT_COUNT)\n            || is_acct_pk(report_desc_array[i].attr_index)\n            || is_acct_field(report_desc_array[i].attr_index)) {\n            switch (report_desc_array[i].report_type) {\n            case REPORT_MIN:\n            case REPORT_MAX:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"NULL as %s\", attrname);\n                p_report->result[i].type = DB_TEXT;\n                break;\n\n            case REPORT_AVG:\n                coma_if_needed(fields);\n                g_string_append_printf(fields,\n                                       \"ROUND(SUM(%s)/SUM(\" ACCT_FIELD_COUNT\n                                       \")) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_SUM:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"SUM(%s) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_COUNT:\n                coma_if_needed(fields);\n                g_string_append_printf(fields,\n                                       \"SUM(\" ACCT_FIELD_COUNT \") as %s\",\n                                       attrname);\n                p_report->result[i].type = DB_BIGUINT;\n                break;\n\n            case REPORT_COUNT_DISTINCT:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"COUNT(DISTINCT(%s)) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type = DB_BIGUINT;\n                break;\n\n            case REPORT_GROUP_BY:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"%s as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                coma_if_needed(group_by);\n                g_string_append(group_by, attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n            }\n\n            /* is this field sorted ? */\n            append_sort_order(order_by, attrname,\n                              report_desc_array[i].sort_flag);\n        } else {\n            coma_if_needed(fields);\n            g_string_append_printf(fields, \"NULL as %s\", attrname);\n            p_report->result[i].type = DB_TEXT;\n        }\n\n        listmgr_fieldfilter(p_report, p_mgr, report_desc_array, attrname,\n                            having, where, i);\n\n        p_report->result[i].flags = field_flag(report_desc_array[i].attr_index);\n    }\n\n    if (profile_descr) {\n        /* XXX only size profile in managed for now */\n        if (profile_descr->attr_index == ATTR_INDEX_size) {\n            for (i = 0; i < SZ_PROFIL_COUNT; i++) {\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"SUM(%s)\", sz_field[i]);\n                /* count */\n                p_report->result[i + report_descr_count].type = DB_BIGUINT;\n            }\n\n            if (profile_descr->range_ratio_len > 0) {\n                /* add ratio field and sort it */\n                coma_if_needed(fields);\n                /* case i == 0 */\n                g_string_append_printf(fields, \"SUM(%s\",\n                                       sz_field[profile_descr->\n                                                range_ratio_start]);\n                for (i = 1; i < profile_descr->range_ratio_len; i++) {\n                    g_string_append_printf(fields, \"+%s\",\n                                           sz_field[profile_descr->\n                                                    range_ratio_start + i]);\n                }\n                g_string_append(fields,\n                                \")/SUM(\" ACCT_FIELD_COUNT \") as sizeratio\");\n            }\n        }\n    }\n}\n\n/** check if all fields and filters of a requested report and are in\n * ACCT_TABLE */\nstatic bool full_acct(const report_field_descr_t *report_desc_array,\n                      unsigned int report_descr_count,\n                      const lmgr_filter_t *p_filter)\n{\n    int i;\n\n    for (i = 0; i < report_descr_count; i++) {\n        if ((report_desc_array[i].report_type != REPORT_COUNT) &&\n            report_desc_array[i].attr_index != ATTR_INDEX_dircount &&\n            !is_acct_field(report_desc_array[i].attr_index) &&\n            !is_acct_pk(report_desc_array[i].attr_index))\n            return false;\n    }\n\n    if (!no_filter(p_filter)) {\n        if (p_filter->filter_type == FILTER_SIMPLE) {\n            for (i = 0; i < p_filter->filter_simple.filter_count; i++) {\n                if (!is_acct_pk(p_filter->filter_simple.filter_index[i]) &&\n                    !is_acct_field(p_filter->filter_simple.filter_index[i]))\n                    return false;\n            }\n        }\n    }\n    return true;\n}\n\n/**\n * Builds a report from database.\n */\nstruct lmgr_report_t *ListMgr_Report(lmgr_t *p_mgr,\n                                     const report_field_descr_t *\n                                     report_desc_array,\n                                     unsigned int report_descr_count,\n                                     const profile_field_descr_t *\n                                     profile_descr,\n                                     const lmgr_filter_t *p_filter,\n                                     const lmgr_iter_opt_t *p_opt)\n{\n    unsigned int i;\n    char attrname[128];\n    lmgr_report_t *p_report;\n    int rc;\n    table_enum query_tab;\n    /* supported report fields: ENTRIES, ANNEX_INFO or ACCT */\n    bool use_acct_table = false;\n    lmgr_iter_opt_t opt = { 0 };\n    unsigned int profile_len = 0;\n    unsigned int ratio = 0;\n    struct field_count fcnt = { 0 };\n    GString *req = NULL;\n    GString *fields = NULL;\n    GString *where = NULL;\n    GString *having = NULL;\n    GString *group_by = NULL;\n    GString *order_by = NULL;\n    GString *filter_name = NULL;\n\n    /* check profile argument and increase output array if needed */\n    if (profile_descr != NULL) {\n        if (profile_descr->attr_index != ATTR_INDEX_size) {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Profile on attribute '%s' (index=%u) is not supported\",\n                       field_name(profile_descr->attr_index),\n                       profile_descr->attr_index);\n            return NULL;\n        }\n        profile_len = SZ_PROFIL_COUNT;\n        if (profile_descr->range_ratio_len > 0)\n            ratio = 1;\n    }\n\n    /* allocate a new report structure */\n    p_report = (lmgr_report_t *) MemAlloc(sizeof(lmgr_report_t));\n    if (!p_report)\n        return NULL;\n\n    p_report->p_mgr = p_mgr;\n\n    p_report->result = (struct result *)MemCalloc(report_descr_count\n                                                  + profile_len + ratio,\n                                                  sizeof(struct result));\n    if (!p_report->result)\n        goto free_report;\n\n    p_report->result_count = report_descr_count + profile_len + ratio;\n    p_report->profile_count = profile_len;\n    p_report->ratio_count = ratio;\n    if (profile_descr != NULL)\n        p_report->profile_attr = ATTR_INDEX_size;\n\n    /* initially, no char * tab allocated */\n    p_report->str_tab = NULL;\n\n    if (p_opt)\n        opt = *p_opt;\n\n    fields = g_string_new(NULL);\n    group_by = g_string_new(NULL);\n    order_by = g_string_new(NULL);\n    having = g_string_new(NULL);\n    where = g_string_new(NULL);\n\n    if (full_acct(report_desc_array, report_descr_count, p_filter)\n        && !opt.force_no_acct) {\n        listmgr_optimizedstat(p_report, p_mgr, report_descr_count,\n                              report_desc_array, profile_descr, fields,\n                              group_by, order_by, having, where);\n        use_acct_table = true;\n    } else {    /* not only ACCT table */\n\n        /* sorting by ratio first */\n        if (profile_descr && profile_descr->range_ratio_len > 0) {\n            if (profile_descr->attr_index == ATTR_INDEX_size) {\n                coma_if_needed(order_by);\n                if (profile_descr->range_ratio_sort == SORT_ASC)\n                    g_string_append(order_by, \"sizeratio ASC\");\n                else\n                    g_string_append(order_by, \"sizeratio DESC\");\n            }\n        }\n\n        for (i = 0; i < report_descr_count; i++) {\n            /* no field for count or distinct count */\n            if (report_desc_array[i].report_type != REPORT_COUNT &&\n                report_desc_array[i].report_type != REPORT_COUNT_DISTINCT) {\n                /* in what table is this field ? */\n                if (is_main_field(report_desc_array[i].attr_index))\n                    fcnt.nb_main++;\n                else if (is_annex_field(report_desc_array[i].attr_index))\n                    fcnt.nb_annex++;\n                else {\n                    /* Not supported yet */\n                    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                               \"Error: report on attribute '%s' (index=%u) is not supported (report item #%u).\",\n                               field_name(report_desc_array[i].attr_index),\n                               report_desc_array[i].attr_index, i);\n                    rc = DB_NOT_SUPPORTED;\n                    goto free_str;\n                }\n            }\n\n            sprintf(attrname, \"attr%u\", i);\n\n            /* what kind of stat on this field ? */\n            switch (report_desc_array[i].report_type) {\n            case REPORT_MIN:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"MIN(%s) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_MAX:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"MAX(%s) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_AVG:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"ROUND(AVG(%s)) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_SUM:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"SUM(%s) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n\n            case REPORT_COUNT:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"COUNT(*) as %s\", attrname);\n                p_report->result[i].type = DB_BIGUINT;\n                break;\n\n            case REPORT_COUNT_DISTINCT:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"COUNT(DISTINCT(%s)) as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                p_report->result[i].type = DB_BIGUINT;\n                break;\n\n            case REPORT_GROUP_BY:\n                coma_if_needed(fields);\n                g_string_append_printf(fields, \"%s as %s\",\n                                       field_str(report_desc_array[i].\n                                                 attr_index), attrname);\n                coma_if_needed(group_by);\n                g_string_append(group_by, attrname);\n                p_report->result[i].type =\n                    field_type(report_desc_array[i].attr_index);\n                break;\n            }\n\n            /* is this field sorted ? */\n            append_sort_order(order_by, attrname,\n                              report_desc_array[i].sort_flag);\n\n            /* is this field filtered ? */\n            listmgr_fieldfilter(p_report, p_mgr, report_desc_array, attrname,\n                                having, where, i);\n\n            p_report->result[i].flags =\n                field_flag(report_desc_array[i].attr_index);\n        }\n\n        /* generate size profile */\n        if (profile_descr != NULL) {\n            if (profile_descr->attr_index == ATTR_INDEX_size) {\n                coma_if_needed(fields);\n                g_string_append(fields, \"SUM(size=0)\");\n\n                for (i = 1; i < SZ_PROFIL_COUNT - 1; i++)\n                    g_string_append_printf(fields,\n                                           \",SUM(\" SZRANGE_FUNC \"(size)=%u)\",\n                                           i - 1);\n\n                g_string_append_printf(fields,\n                                       \",SUM(\" SZRANGE_FUNC \"(size)>=%u)\",\n                                       SZ_PROFIL_COUNT - 1);\n\n                for (i = 0; i < SZ_PROFIL_COUNT; i++)\n                    p_report->result[i + report_descr_count].type = DB_BIGUINT;\n\n                if (profile_descr->range_ratio_len > 0) {\n                    /* add ratio field and sort it */\n                    coma_if_needed(fields);\n                    g_string_append_printf(fields, \"SUM(size>=%llu\",\n                                           SZ_MIN_BY_INDEX(profile_descr->\n                                                           range_ratio_start));\n\n                    /* is the last range = 1T->inf ? */\n                    if (profile_descr->range_ratio_start +\n                        profile_descr->range_ratio_len >= SZ_PROFIL_COUNT)\n                        g_string_append(fields, \")\");\n                    else\n                        g_string_append_printf(fields, \" and size<%llu)\",\n                            SZ_MIN_BY_INDEX(profile_descr->range_ratio_start\n                                            + profile_descr->range_ratio_len));\n\n                    g_string_append(fields, \"/COUNT(*) as sizeratio\");\n                }\n            }\n        }\n    }\n\n    /* process filter */\n    if (!(no_filter(p_filter))) {\n        if (full_acct(report_desc_array, report_descr_count, p_filter)\n            && !opt.force_no_acct) {\n            int filter_acct;\n\n            /* filter on acct fields only */\n            filter_acct = filter2str(p_mgr, where, p_filter, T_ACCT,\n                               (!GSTRING_EMPTY(where) ? AOF_LEADING_SEP : 0)\n                               | AOF_PREFIX);\n            if (filter_acct > 0)\n                use_acct_table = true;\n        } else {\n            /* process NAMES filters apart, as with must then join with\n             * DISTINCT(id) */\n            filter_where(p_mgr, p_filter, &fcnt, where,\n                         (!GSTRING_EMPTY(where) ? AOF_LEADING_SEP : 0)\n                         | AOF_SKIP_NAME);\n\n            filter_name = g_string_new(NULL);\n            fcnt.nb_names =\n                filter2str(p_mgr, filter_name, p_filter, T_DNAMES, 0);\n        }\n    }\n\n    /* start building the whole request */\n    req = g_string_new(\"SELECT \");\n    g_string_append_printf(req, \"%s FROM \", fields->str);\n\n    /* FROM clause */\n    if (use_acct_table) {\n        g_string_append(req, ACCT_TABLE);\n        query_tab = T_ACCT;\n    } else {\n        bool distinct;\n\n        filter_from(p_mgr, &fcnt, req, &query_tab, &distinct, AOF_SKIP_NAME);\n\n        if (filter_name != NULL && !GSTRING_EMPTY(filter_name)) {\n            g_string_append_printf(req, \" INNER JOIN (SELECT DISTINCT(id)\"\n                                   \" FROM \" DNAMES_TABLE \" WHERE %s) N\"\n                                   \" ON %s.id=N.id\", filter_name->str,\n                                   table2name(query_tab));\n            /* FIXME: what if NAMES is the query tab? */\n        }\n        /* FIXME: do the same for stripe items */\n    }\n\n    /* Build the request */\n    if (!GSTRING_EMPTY(where))\n        g_string_append_printf(req, \" WHERE %s\", where->str);\n\n    if (!GSTRING_EMPTY(group_by))\n        g_string_append_printf(req, \" GROUP BY %s\", group_by->str);\n\n    if (!GSTRING_EMPTY(having))\n        g_string_append_printf(req, \" HAVING %s\", having->str);\n\n    if (!GSTRING_EMPTY(order_by))\n        g_string_append_printf(req, \" ORDER BY %s\", order_by->str);\n\n    /* iterator opt */\n    if (opt.list_count_max > 0)\n        g_string_append_printf(req, \" LIMIT %u\", opt.list_count_max);\n\n retry:\n    /* execute request (expect that ACCT table does not exists) */\n    if (use_acct_table)\n        rc = db_exec_sql_quiet(&p_mgr->conn, req->str,\n                               &p_report->select_result);\n    else\n        rc = db_exec_sql(&p_mgr->conn, req->str, &p_report->select_result);\n\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    /* if the ACCT table does exist, switch to standard mode */\n    if (use_acct_table && (rc == DB_NOT_EXISTS)) {\n        lmgr_iter_opt_t new_opt;\n\n        if (p_opt != NULL)\n            new_opt = *p_opt;\n        else\n            new_opt.list_count_max = 0;\n\n        new_opt.force_no_acct = true;\n\n        DisplayLog(LVL_EVENT, LISTMGR_TAG,\n                   \"No accounting info: switching to standard query mode\");\n\n        g_string_free(req, TRUE);\n        g_string_free(fields, TRUE);\n        g_string_free(group_by, TRUE);\n        g_string_free(order_by, TRUE);\n        g_string_free(having, TRUE);\n        g_string_free(where, TRUE);\n        if (filter_name != NULL)\n            g_string_free(filter_name, TRUE);\n\n        return ListMgr_Report(p_mgr, report_desc_array, report_descr_count,\n                              profile_descr, p_filter, &new_opt);\n    }\n\n free_str:\n    /* these are always allocated */\n    g_string_free(fields, TRUE);\n    g_string_free(group_by, TRUE);\n    g_string_free(order_by, TRUE);\n    g_string_free(having, TRUE);\n    g_string_free(where, TRUE);\n    /* these may not be allocated */\n    if (req != NULL)\n        g_string_free(req, TRUE);\n    if (filter_name != NULL)\n        g_string_free(filter_name, TRUE);\n\n    if (rc == DB_SUCCESS)\n        return p_report;\n\n/* error */\n    MemFree(p_report->result);\n\n free_report:\n    MemFree(p_report);\n    return NULL;\n}   /* ListMgr_Report */\n\n/**\n * Get next report entry.\n * @param p_value_count is IN/OUT parameter. IN: size of output array. OUT: nbr of fields set in array.\n * @param p_profile OUT: output profile, if required.\n */\nint ListMgr_GetNextReportItem(struct lmgr_report_t *p_iter,\n                              db_value_t *p_value, unsigned int *p_value_count,\n                              profile_u *p_profile)\n{\n    int rc;\n    unsigned int i;\n\n    if (*p_value_count <\n        p_iter->result_count - p_iter->profile_count - p_iter->ratio_count)\n        return DB_BUFFER_TOO_SMALL;\n\n    if (p_iter->str_tab == NULL) {\n        p_iter->str_tab =\n            (char **)MemCalloc(p_iter->result_count, sizeof(char *));\n        if (!p_iter->str_tab)\n            return DB_NO_MEMORY;\n    }\n\n    rc = db_next_record(&p_iter->p_mgr->conn, &p_iter->select_result,\n                        p_iter->str_tab, p_iter->result_count);\n\n    if (rc)\n        return rc;\n\n    /* parse result values */\n    for (i = 0;\n         i < p_iter->result_count - p_iter->profile_count - p_iter->ratio_count;\n         i++) {\n        if (p_iter->str_tab[i] != NULL) {\n            p_value[i].type = p_iter->result[i].type;\n            if (parsedbtype(p_iter->str_tab[i], p_iter->result[i].type,\n                            &(p_value[i].value_u)) != 1) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Could not parse result field #%u: value='%s'\", i,\n                           p_iter->str_tab[i]);\n                return DB_INVALID_ARG;\n            }\n            if (p_iter->result[i].flags & SEPD_LIST)\n                separated_db2list_inplace((char *)p_value[i].value_u.val_str);\n        } else {\n            p_value[i].type = DB_TEXT;\n            p_value[i].value_u.val_str = NULL;\n        }\n    }\n\n    /* fill profile structure */\n    if (p_profile && (p_iter->profile_count > 0)) {\n        if (p_iter->profile_attr == ATTR_INDEX_size) {\n            db_type_u dbval;\n            for (i = 0; i < p_iter->profile_count; i++) {\n                unsigned int idx = p_iter->result_count - p_iter->profile_count\n                    - p_iter->ratio_count + i;\n                if (p_iter->str_tab[idx] == NULL) {\n                    p_profile->size.file_count[i] = 0;\n                } else\n                    if (parsedbtype\n                        (p_iter->str_tab[idx], p_iter->result[idx].type,\n                         &dbval) == 1) {\n                    p_profile->size.file_count[i] = dbval.val_biguint;\n                } else {\n                    DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                               \"Could not parse result field #%u: value='%s'\",\n                               idx, p_iter->str_tab[idx]);\n                    return DB_INVALID_ARG;\n                }\n            }\n        }\n    }\n\n    *p_value_count =\n        p_iter->result_count - p_iter->profile_count - p_iter->ratio_count;\n\n    return DB_SUCCESS;\n}\n\nvoid ListMgr_CloseReport(struct lmgr_report_t *p_iter)\n{\n    db_result_free(&p_iter->p_mgr->conn, &p_iter->select_result);\n\n    if (p_iter->str_tab != NULL)\n        MemFree(p_iter->str_tab);\n\n    MemFree(p_iter->result);\n    MemFree(p_iter);\n}\n\nint ListMgr_EntryCount(lmgr_t *p_mgr, uint64_t *count)\n{\n    int rc;\n\n    do {\n        rc = lmgr_table_count(&p_mgr->conn, MAIN_TABLE, count);\n    } while (rc != DB_SUCCESS && lmgr_delayed_retry(p_mgr, rc));\n\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_stripe.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"listmgr_stripe.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"Memory.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\n#define STRIPE_INFO_FIELDS \"id,validator,stripe_count,stripe_size,pool_name\"\n#define STRIPE_INFO_SET_VALUES \"validator=VALUES(validator),\"       \\\n                               \"stripe_count=VALUES(stripe_count),\" \\\n                               \"stripe_size=VALUES(stripe_size),\"   \\\n                               \"pool_name=VALUES(pool_name)\"\n\n#define STRIPE_ITEMS_FIELDS \"id,stripe_index,ostidx,details\"\n\nint update_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk, int validator,\n                       const stripe_info_t *p_stripe,\n                       const stripe_items_t *p_items, bool insert_if_absent)\n{\n    attr_set_t fake_attr;\n    attr_set_t *p_attr = &fake_attr;\n    pktype list[1];\n\n    rh_strncpy(list[0], pk, sizeof(*list));\n\n    ATTR_MASK_INIT(&fake_attr);\n    if (p_stripe) {\n        ATTR_MASK_SET(&fake_attr, stripe_info);\n        ATTR(&fake_attr, stripe_info) = *p_stripe;\n    }\n    if (p_items) {\n        ATTR_MASK_SET(&fake_attr, stripe_items);\n        ATTR(&fake_attr, stripe_items) = *p_items;\n    }\n\n    return batch_insert_stripe_info(p_mgr, list, &validator, &p_attr, 1, true);\n}\n\nint insert_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk,\n                       int validator, const stripe_info_t *p_stripe,\n                       const stripe_items_t *p_items, bool update_if_exists)\n{\n    attr_set_t fake_attr;\n    attr_set_t *p_attr = &fake_attr;\n    pktype list[1];\n\n    rh_strncpy(list[0], pk, sizeof(*list));\n\n    ATTR_MASK_INIT(&fake_attr);\n    if (p_stripe) {\n        ATTR_MASK_SET(&fake_attr, stripe_info);\n        ATTR(&fake_attr, stripe_info) = *p_stripe;\n    }\n    if (p_items) {\n        ATTR_MASK_SET(&fake_attr, stripe_items);\n        ATTR(&fake_attr, stripe_items) = *p_items;\n    }\n\n    return batch_insert_stripe_info(p_mgr, list, &validator, &p_attr, 1,\n                                    update_if_exists);\n}\n\nint batch_insert_stripe_info(lmgr_t *p_mgr, pktype *pklist, int *validators,\n                             attr_set_t **p_attrs, unsigned int count,\n                             bool update_if_exists)\n{\n    bool first;\n    int i, rc = 0;\n    int total_si;\n    GString *req = g_string_new(\"\");\n    attr_mask_t tmp_mask = { ATTR_MASK_stripe_info, 0, 0LL };\n\n    if (!attr_mask_is_null(sum_masks(p_attrs, count, tmp_mask))) {\n        /* build batch request for STRIPE_INFO table */\n        g_string_assign(req, \"INSERT INTO \" STRIPE_INFO_TABLE \" (\"\n                        STRIPE_INFO_FIELDS \") VALUES \");\n\n        first = true;\n        for (i = 0; i < count; i++) {\n            /* no request if the entry has no stripe info */\n            if (!ATTR_MASK_TEST(p_attrs[i], stripe_info))\n                continue;\n\n            g_string_append_printf(req, \"%s(\" DPK \",%d,%u,%u,'%s')\",\n                                   first ? \"\" : \",\", pklist[i], validators[i],\n                                   ATTR(p_attrs[i], stripe_info).stripe_count,\n                                   (unsigned int)ATTR(p_attrs[i],\n                                                      stripe_info).stripe_size,\n                                   ATTR(p_attrs[i], stripe_info).pool_name);\n            first = false;\n        }\n\n        if (update_if_exists)\n            /* append \"on duplicate key ...\" */\n            g_string_append(req,\n                            \" ON DUPLICATE KEY UPDATE \" STRIPE_INFO_SET_VALUES);\n\n        if (!first) {   /* do nothing if no entry had stripe info */\n            rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n            if (rc)\n                goto out;\n        }\n        /* reset the string */\n        g_string_assign(req, \"\");\n    }\n\n    /* Stripe items more tricky because we want to delete previous items\n     * on update. */\n    /* If update_if_exists is false, insert them all as a batch.\n     * For the update case, remove previous items before bluk insert.\n     */\n    if (update_if_exists) {\n        for (i = 0; i < count; i++) {\n            /* no request if the entry has no stripe items */\n            if (!ATTR_MASK_TEST(p_attrs[i], stripe_items))\n                continue;\n\n            g_string_printf(req,\n                            \"DELETE FROM \" STRIPE_ITEMS_TABLE \" WHERE id=\" DPK,\n                            pklist[i]);\n\n            rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n            if (rc)\n                goto out;\n        }\n    }\n\n    /* bulk insert stripe items (if any is set) */\n    tmp_mask.std = ATTR_MASK_stripe_items;\n    if (attr_mask_is_null(sum_masks(p_attrs, count, tmp_mask)))\n        goto out;\n\n    total_si = 0;\n    first = true;\n    g_string_assign(req, \"INSERT INTO \" STRIPE_ITEMS_TABLE\n                    \" (\" STRIPE_ITEMS_FIELDS \") VALUES \");\n\n    /* loop on all entries and all stripe items */\n    for (i = 0; i < count; i++) {\n        int s;\n        const stripe_items_t *p_items;\n\n        /* skip the entry if it has no stripe items */\n        if (!ATTR_MASK_TEST(p_attrs[i], stripe_items))\n            continue;\n\n        p_items = &ATTR(p_attrs[i], stripe_items);\n        for (s = 0; s < p_items->count; s++) {\n            char buff[2 * STRIPE_DETAIL_SZ + 1];\n\n            total_si++;\n            if (buf2hex\n                (buff, sizeof(buff),\n                 (unsigned char *)(&p_items->stripe[s].ost_gen),\n                 STRIPE_DETAIL_SZ) < 0) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Buffer too small to store details stripe info\");\n                memset(buff, 0, sizeof(buff));\n            }\n            g_string_append_printf(req, \"%s(\" DPK \",%u,%u,x'%s')\",\n                                   first && (s == 0) ? \"\" : \",\", pklist[i],\n                                   s, p_items->stripe[s].ost_idx, buff);\n            first = false;\n        }\n    }\n\n    /* only execute it if there was some stripe items */\n    if (total_si > 0)\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n\n out:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\nint get_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk, stripe_info_t *p_stripe_info,\n                    stripe_items_t *p_items)\n{\n/* stripe_count, stripe_size, pool_name, validator => 4 */\n#define STRIPE_INFO_COUNT 4\n    char *res[STRIPE_INFO_COUNT];\n    result_handle_t result;\n    int i;\n    int rc = DB_SUCCESS;\n    GString *req;\n\n    /* retrieve basic stripe info */\n    req =\n        g_string_new\n        (\"SELECT stripe_count, stripe_size, pool_name,validator FROM \"\n         STRIPE_INFO_TABLE \" WHERE id=\");\n    g_string_append_printf(req, DPK, pk);\n\n    rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    if (rc)\n        goto out;\n\n    rc = db_next_record(&p_mgr->conn, &result, res, STRIPE_INFO_COUNT);\n\n    if (rc == DB_END_OF_LIST)\n        rc = DB_NOT_EXISTS;\n    if (rc)\n        goto res_free;\n\n    for (i = 0; i < STRIPE_INFO_COUNT; i++) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG, \"stripe_res[%u] = %s\", i,\n                   res[i] ? res[i] : \"<null>\");\n        if (res[i] == NULL) {\n            rc = DB_ATTR_MISSING;\n            goto res_free;\n        }\n    }\n\n    p_stripe_info->stripe_count = atoi(res[0]);\n    p_stripe_info->stripe_size = atoi(res[1]);\n    rh_strncpy(p_stripe_info->pool_name, res[2], MAX_POOL_LEN);\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n    p_stripe_info->validator = atoi(res[3]);\n#endif\n\n    db_result_free(&p_mgr->conn, &result);\n\n    if (p_items) {\n        /* retrieve stripe list */\n        g_string_printf(req, \"SELECT stripe_index,ostidx,details FROM \"\n                        STRIPE_ITEMS_TABLE \" WHERE id=\" DPK\n                        \" ORDER BY stripe_index ASC\", pk);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n        if (rc)\n            goto out;\n\n#ifndef _LUSTRE_HSM\n        /* this is abnormal if LUSTRE/HSM feature is not present */\n        if (p_stripe_info->stripe_count !=\n            db_result_nb_records(&p_mgr->conn, &result)) {\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Warning: the number of stripe items (%d) doesn't \"\n                       \"match stripe count (%u)! (Pk=\" DPK \")\",\n                       db_result_nb_records(&p_mgr->conn, &result),\n                       p_stripe_info->stripe_count, pk);\n        }\n#endif\n        p_items->count = db_result_nb_records(&p_mgr->conn, &result);\n\n        if (p_items->count > 0) {\n\n            /* allocate stripe array */\n            p_items->stripe = MemCalloc(p_items->count, sizeof(stripe_item_t));\n\n            if (!p_items->stripe) {\n                rc = DB_NO_MEMORY;\n                goto res_free;\n            }\n\n            /* fill stripe units */\n            for (i = 0; i < p_items->count; i++) {\n                rc = db_next_record(&p_mgr->conn, &result, res,\n                                    STRIPE_INFO_COUNT);\n                if (rc)\n                    goto stripe_free;\n\n                if (res[0] == NULL) {\n                    rc = DB_ATTR_MISSING;\n                    goto stripe_free;\n                }\n\n                if (i != atoi(res[0])) {\n                    DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                               \"Warning: inconsistent stripe order: \"\n                               \"stripe %s returned in position %u\",\n                               res[0], i);\n                }\n                p_items->stripe[i].ost_idx = atoi(res[1]);\n                /* raw copy of binary buffer (last 3 fields of stripe_item_t\n                 *                            = address of ost_gen field) */\n                memcpy(&p_items->stripe[i].ost_gen, res[2], STRIPE_DETAIL_SZ);\n            }\n        } else\n            p_items->stripe = NULL;\n\n        /* last query result must be freed */\n        rc = DB_SUCCESS;\n        goto res_free;\n    }\n\n    rc = DB_SUCCESS;\n    /* result is already freed */\n    goto out;\n\n stripe_free:\n    free_stripe_items(p_items);\n res_free:\n    db_result_free(&p_mgr->conn, &result);\n out:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** release stripe information */\nvoid free_stripe_items(stripe_items_t *p_stripe_items)\n{\n    if (p_stripe_items->stripe)\n        MemFree(p_stripe_items->stripe);\n    p_stripe_items->stripe = NULL;\n    p_stripe_items->count = 0;\n}\n\n/** duplicate stripe information */\nint dup_stripe_items(stripe_items_t *p_stripe_out,\n                     const stripe_items_t *p_stripe_in)\n{\n    if (p_stripe_in == NULL || p_stripe_out == NULL)\n        return DB_INVALID_ARG;\n\n    p_stripe_out->count = p_stripe_in->count;\n\n    if (p_stripe_in->stripe == NULL) {\n        p_stripe_out->stripe = NULL;\n        return 0;\n    }\n\n    p_stripe_out->stripe = MemAlloc(p_stripe_out->count\n                                    * sizeof(stripe_item_t));\n    if (p_stripe_out->stripe == NULL)\n        return DB_NO_MEMORY;\n\n    memcpy(p_stripe_out->stripe, p_stripe_in->stripe,\n           p_stripe_out->count * sizeof(stripe_item_t));\n    return 0;\n}\n\n/** check that validator is matching for a given entry */\nint ListMgr_CheckStripe(lmgr_t *p_mgr, const entry_id_t *p_id, int validator)\n{\n    char *res;\n    result_handle_t result;\n    int rc = DB_SUCCESS;\n    GString *req = NULL;\n    DEF_PK(pk);\n\n#ifndef HAVE_LLAPI_FSWAP_LAYOUTS\n    if (validator != VALID_EXISTS)\n        validator = VALID(p_id);\n#endif\n\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    req = g_string_new(\"SELECT validator FROM \" STRIPE_INFO_TABLE \" WHERE id=\");\n    g_string_append_printf(req, DPK, pk);\n\n retry:\n    rc = db_exec_sql(&p_mgr->conn, req->str, &result);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto out;\n\n    rc = db_next_record(&p_mgr->conn, &result, &res, 1);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    if (rc == DB_END_OF_LIST)\n        rc = DB_NOT_EXISTS;\n\n    if (rc)\n        goto res_free;\n\n    if (res == NULL) {\n        rc = DB_ATTR_MISSING;\n        goto res_free;\n    }\n\n    if (validator == VALID_EXISTS) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG, DFID \": validator exists (%s): OK\",\n                   PFID(p_id), res);\n        /* just check it exists */\n        rc = DB_SUCCESS;\n    } else if (atoi(res) != validator) {\n        DisplayLog(LVL_FULL, LISTMGR_TAG,\n                   DFID \": stripe change detected: gen %s->%d\", PFID(p_id), res,\n                   validator);\n        rc = DB_OUT_OF_DATE;\n    } else {    /* validator matches */\n\n        DisplayLog(LVL_FULL, LISTMGR_TAG, DFID \": stripe gen is unchanged (%d)\",\n                   PFID(p_id), validator);\n        rc = DB_SUCCESS;\n    }\n\n res_free:\n    db_result_free(&p_mgr->conn, &result);\n out:\n    g_string_free(req, TRUE);\n    DisplayLog(LVL_FULL, LISTMGR_TAG, DFID \": %s returns with status=%d\",\n               PFID(p_id), __func__, rc);\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_stripe.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#include \"list_mgr.h\"\n#include \"listmgr_internal.h\"\n\n#ifndef _LISTMGR_STRIPE_H\n#define _LISTMGR_STRIPE_H\n\nint insert_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk,\n                       int validator, const stripe_info_t *p_stripe,\n                       const stripe_items_t *p_items, bool update_if_exists);\nint update_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk,\n                       int validator, const stripe_info_t *p_stripe,\n                       const stripe_items_t *p_items, bool insert_if_absent);\nint batch_insert_stripe_info(lmgr_t *p_mgr, pktype *pklist, int *validators,\n                             attr_set_t **p_attrs, unsigned int count,\n                             bool update_if_exists);\n\nint get_stripe_info(lmgr_t *p_mgr, PK_ARG_T pk, stripe_info_t *p_stripe,\n                    stripe_items_t *p_items);\n\n/** duplicate stripe information */\nint dup_stripe_items(stripe_items_t *p_stripe_out,\n                     const stripe_items_t *p_stripe_in);\n\nvoid free_stripe_items(stripe_items_t *p_stripe_items);\n\n#endif\n"
  },
  {
    "path": "src/list_mgr/listmgr_tags.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"rbh_logs.h\"\n#include \"Memory.h\"\n#include <stdio.h>\n#include <stdlib.h>\n\n/**\n * Create a (persitent) table to tag entries.\n * \\param filter indicate this applies to a restricted set of entries.\n * \\param reset indicate if the table is cleaned in case it already exists.\n */\nint ListMgr_CreateTag(lmgr_t * p_mgr, const char *tag_name,\n                      lmgr_filter_t * p_filter, bool reset)\n{\n    GString         *req = NULL;\n    GString         *from = NULL;\n    GString         *where = NULL;\n    table_enum       query_tab = T_NONE;\n    bool             distinct = false;\n    int              rc;\n    struct field_count fcnt = {0};\n\n    /* create table statement */\n    req = g_string_new(\"CREATE TABLE \");\n    g_string_append_printf(req, \"TAG_%s (id \"PK_TYPE\" PRIMARY KEY) AS \",\n                           tag_name);\n\n    /* now build the SELECT clause */\n    if (no_filter(p_filter))\n    {\n        /* no filter, create a table with all ids */\n        g_string_append(req, \"SELECT id FROM \"MAIN_TABLE);\n    }\n    else\n    {\n        where = g_string_new(NULL);\n        filter_where(p_mgr, p_filter, &fcnt, where, 0);\n\n        if (nb_field_tables(&fcnt) == 0)\n        {\n            /* finally, no filter */\n            g_string_append(req, \"SELECT id FROM \"MAIN_TABLE);\n        }\n        else\n        {\n            /* build the FROM clause */\n            from = g_string_new(NULL);\n            filter_from(p_mgr, &fcnt, from, &query_tab, &distinct, 0);\n\n            if (distinct)\n                g_string_append_printf(req, \"SELECT DISTINCT(%s.id) AS id\",\n                                table2name(query_tab));\n            else\n                g_string_append_printf(req, \"SELECT %s.id AS id\",\n                                table2name(query_tab));\n\n            g_string_append_printf(req, \" FROM %s WHERE %s\", from->str, where->str);\n        }\n    }\n\nretry:\n    rc = lmgr_begin( p_mgr );\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto free_str;\n\n    /* create the table */\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto rollback;\n\n    /** TODO handle 'reset' option if table already exists */\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    goto free_str;\n\nrollback:\n    lmgr_rollback(p_mgr);\nfree_str:\n    if (req != NULL)\n        g_string_free(req, TRUE);\n    if (from != NULL)\n        g_string_free(from, TRUE);\n    if (where != NULL)\n        g_string_free(where, TRUE);\n    return rc;\n}\n\n/** destroy a tag */\nint ListMgr_DestroyTag(lmgr_t * p_mgr, const char *tag_name)\n{\n    char tabname[1024];\n    snprintf(tabname, 1024, \"TAG_%s\", tag_name);\n\n    return db_drop_component( &p_mgr->conn, DBOBJ_TABLE, tabname );\n}\n\n/**\n * Tag an entry (in the set specified by CreateTag filter)\n */\nint ListMgr_TagEntry(lmgr_t * p_mgr, const char *tag_name, const entry_id_t * p_id)\n{\n    char           request[1024];\n    int            rc;\n    DEF_PK(pk);\n\n    /* We want the remove operation to be atomic */\nretry:\n    rc = lmgr_begin(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        return rc;\n\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    /* Only keep untagged entries in the table, as the goal\n     * is to list untagged entries at the end. */\n    sprintf( request, \"DELETE FROM TAG_%s WHERE id=\"DPK, tag_name, pk);\n    rc = db_exec_sql( &p_mgr->conn, request, NULL );\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        return rc;\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    return rc;\n}\n\n\n/**\n * Return an iterator on non-tagged entries (in the set specified by CreateTag filter)\n */\nstruct lmgr_iterator_t *ListMgr_ListUntagged( lmgr_t * p_mgr,\n                                          const char * tag_name,\n                                          const lmgr_iter_opt_t * p_opt )\n{\n    char query[1024];\n    char * query_end = query;\n    struct lmgr_iterator_t * it;\n    int rc;\n\n    query_end += sprintf(query_end, \"SELECT id FROM TAG_%s\", tag_name);\n\n    if (p_opt && (p_opt->list_count_max > 0))\n        query_end += sprintf(query_end, \" LIMIT %u\", p_opt->list_count_max);\n\n    /* allocate a new iterator */\n    it = (lmgr_iterator_t *) MemAlloc(sizeof(lmgr_iterator_t));\n    it->p_mgr = p_mgr;\n    if (p_opt)\n    {\n        it->opt = *p_opt;\n        it->opt_is_set = 1;\n    }\n    else\n    {\n        it->opt_is_set = 0;\n    }\n\n    /* execute request */\nretry:\n    rc = db_exec_sql( &p_mgr->conn, query, &it->select_result );\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n    {\n        MemFree( it );\n        return NULL;\n    }\n    else\n        return it;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_update.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"listmgr_stripe.h\"\n#include \"rbh_logs.h\"\n#include <inttypes.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <pthread.h>\n\nint ListMgr_Update(lmgr_t *p_mgr, const entry_id_t *p_id,\n                   const attr_set_t *p_update_set)\n{\n    int rc;\n    GString *req;\n    DEF_PK(pk);\n\n    /* read only fields in info mask? */\n    if (readonly_fields(p_update_set->attr_mask)) {\n        attr_mask_t and =\n            attr_mask_and(&p_update_set->attr_mask, &readonly_attr_set);\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Error: trying to update read only \" \"values: attr_mask=\"\n                   DMASK, PMASK(&and));\n        return DB_INVALID_ARG;\n    }\n\n    entry_id2pk(p_id, PTR_PK(pk));\n\n    req = g_string_new(NULL);\n\n retry:\n    rc = lmgr_begin(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    /* update fields in main table */\n    if (main_fields(p_update_set->attr_mask)) {\n        g_string_assign(req, \"UPDATE \" MAIN_TABLE \" SET \");\n\n        rc = attrset2updatelist(p_mgr, req, p_update_set, T_MAIN, 0);\n        if (rc < 0) {\n            rc = -rc;\n            goto free_str;\n        } else if (rc > 0) {\n            g_string_append_printf(req, \" WHERE id=\" DPK, pk);\n            rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n            if (lmgr_delayed_retry(p_mgr, rc))\n                goto retry;\n            else if (rc)\n                goto rollback;\n        }\n    }\n\n    /* update names table */\n    if (ATTR_MASK_TEST(p_update_set, name)\n        && ATTR_MASK_TEST(p_update_set, parent_id)) {\n        g_string_assign(req, \"INSERT INTO \" DNAMES_TABLE \"(id\");\n        attrmask2fieldlist(req, p_update_set->attr_mask, T_DNAMES, \"\", \"\",\n                           AOF_LEADING_SEP);\n        g_string_append_printf(req, \",pkn) VALUES (\" DPK, pk);\n        attrset2valuelist(p_mgr, req, p_update_set, T_DNAMES, AOF_LEADING_SEP);\n        g_string_append(req,\n                        \",\" HNAME_DEF\n                        \") ON DUPLICATE KEY UPDATE id=VALUES(id)\");\n        attrset2updatelist(p_mgr, req, p_update_set, T_DNAMES,\n                           AOF_LEADING_SEP | AOF_GENERIC_VAL);\n\n        rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n        if (lmgr_delayed_retry(p_mgr, rc))\n            goto retry;\n        else if (rc)\n            goto rollback;\n    } else if (ATTR_MASK_TEST(p_update_set, name)\n               || ATTR_MASK_TEST(p_update_set, parent_id)) {\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                   \"WARNING: missing attribute to update name information\"\n                   \" (entry \" DPK \"): name %s, parent_id %s\", pk,\n                   ATTR_MASK_TEST(p_update_set, name) ? \"is set\" : \"is not set\",\n                   ATTR_MASK_TEST(p_update_set,\n                                  parent_id) ? \"is set\" : \"is not set\");\n    }\n\n    /* update annex table */\n    if (annex_fields(p_update_set->attr_mask)) {\n        g_string_assign(req, \"UPDATE \" ANNEX_TABLE \" SET \");\n        rc = attrset2updatelist(p_mgr, req, p_update_set, T_ANNEX, 0);\n        if (rc < 0) {\n            rc = -rc;\n            goto free_str;\n        } else if (rc > 0) {\n            g_string_append_printf(req, \" WHERE id=\" DPK, pk);\n            rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n            if (lmgr_delayed_retry(p_mgr, rc))\n                goto retry;\n            else if (rc)\n                goto rollback;\n        }\n    }\n#ifdef _LUSTRE\n    if (ATTR_MASK_TEST(p_update_set, stripe_info)) {\n#ifdef HAVE_LLAPI_FSWAP_LAYOUTS\n        int validator = ATTR(p_update_set, stripe_info).validator;\n#else\n        int validator = VALID(p_id);\n#endif\n\n        const stripe_items_t *p_items = NULL;\n\n        if (ATTR_MASK_TEST(p_update_set, stripe_items))\n            p_items = &ATTR(p_update_set, stripe_items);\n\n        rc = update_stripe_info(p_mgr, pk, validator,\n                                &ATTR(p_update_set, stripe_info), p_items,\n                                true);\n        if (lmgr_delayed_retry(p_mgr, rc))\n            goto retry;\n        else if (rc)\n            goto rollback;\n    }\n#endif\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    if (rc == DB_SUCCESS)\n        p_mgr->nbop[OPIDX_UPDATE]++;\n\n    goto free_str;\n\n rollback:\n    lmgr_rollback(p_mgr);\n free_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\n/** XXX ListMgr_MassUpdate() is not used => dropped in v3.0 */\n\nint ListMgr_Replace(lmgr_t *p_mgr, entry_id_t *old_id, attr_set_t *old_attrs,\n                    entry_id_t *new_id, attr_set_t *new_attrs,\n                    bool src_is_last, bool update_target_if_exists)\n{\n    GString *req = NULL;\n    DEF_PK(oldpk);\n    DEF_PK(newpk);\n    int rc;\n\n retry:\n    rc = lmgr_begin(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        return rc;\n\n    /* delete the old entry */\n    rc = listmgr_remove_no_tx(p_mgr, old_id, old_attrs, src_is_last);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto rollback;\n\n    /* create the new one */\n    rc = listmgr_batch_insert_no_tx(p_mgr, &new_id, &new_attrs, 1,\n                                    update_target_if_exists);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto rollback;\n\n    /* update parent ids in NAMES table */\n    entry_id2pk(old_id, PTR_PK(oldpk));\n    entry_id2pk(new_id, PTR_PK(newpk));\n\n    req = g_string_new(\"UPDATE \" DNAMES_TABLE);\n    g_string_append_printf(req, \" SET parent_id=\" DPK \" WHERE parent_id=\" DPK,\n                           newpk, oldpk);\n    rc = db_exec_sql(&p_mgr->conn, req->str, NULL);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else if (rc)\n        goto rollback;\n\n    rc = lmgr_commit(p_mgr);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    else {\n        g_string_free(req, TRUE);\n        return rc;\n    }\n\n rollback:\n    lmgr_rollback(p_mgr);\n    if (req != NULL)\n        g_string_free(req, TRUE);\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/listmgr_vars.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * Persistent variables management\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"listmgr_common.h\"\n#include \"rbh_logs.h\"\n#include <stdio.h>\n\nint lmgr_get_var(db_conn_t *pconn, const char *varname, char *value,\n                 int bufsize)\n{\n    int rc;\n    result_handle_t result;\n    char *str_val = NULL;\n    GString *req = NULL;\n\n    if (!varname || !value)\n        return DB_INVALID_ARG;\n\n    req = g_string_new(\"SELECT value FROM \" VAR_TABLE \" WHERE varname=\");\n    g_string_append_printf(req, \"'%s'\", varname);\n\n    /* execute the request */\n    rc = db_exec_sql(pconn, req->str, &result);\n    if (rc)\n        goto free_str;\n\n    rc = db_next_record(pconn, &result, &str_val, 1);\n\n    if (rc == DB_END_OF_LIST)\n        rc = DB_NOT_EXISTS;\n\n    if (rc)\n        goto free_res;\n\n    if (str_val == NULL) {\n        rc = DB_REQUEST_FAILED;\n        goto free_res;\n    }\n\n    /* copy the result */\n    if (strlen(str_val) >= bufsize) {\n        rc = DB_BUFFER_TOO_SMALL;\n    } else {\n        strcpy(value, str_val);\n        rc = DB_SUCCESS;\n    }\n\n free_res:\n    db_result_free(pconn, &result);\n free_str:\n    g_string_free(req, TRUE);\n    return rc;\n}\n\nint lmgr_set_var(db_conn_t *pconn, const char *varname, const char *value)\n{\n    GString *query;\n    int rc;\n    char escaped[1024];\n\n    /* delete var if value is NULL */\n    if (value == NULL) {\n        query = g_string_new(\"DELETE FROM \" VAR_TABLE \" WHERE varname =\");\n        g_string_append_printf(query, \"'%s'\", varname);\n\n        rc = db_exec_sql(pconn, query->str, NULL);\n        goto out;\n    } else\n        query = g_string_new(NULL);\n\n    /* escape special characters in value */\n    rc = db_escape_string(pconn, escaped, sizeof(escaped), value);\n    if (rc != DB_SUCCESS)\n        goto out;\n\n    g_string_printf(query,\n                    \"INSERT INTO \" VAR_TABLE\n                    \" (varname,value) VALUES ('%s','%s') \"\n                    \"ON DUPLICATE KEY UPDATE value='%s'\", varname, escaped,\n                    escaped);\n\n    rc = db_exec_sql(pconn, query->str, NULL);\n out:\n    g_string_free(query, TRUE);\n    return rc;\n}\n\n/**\n *  Get variable value.\n */\nint ListMgr_GetVar(lmgr_t *p_mgr, const char *varname, char *value,\n                   int bufsize)\n{\n    int rc;\n retry:\n    rc = lmgr_get_var(&p_mgr->conn, varname, value, bufsize);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n\n    return rc;\n}\n\n/**\n *  Set variable value.\n *  @param value size must not exceed 1024.\n */\nint ListMgr_SetVar(lmgr_t *p_mgr, const char *varname, const char *value)\n{\n    int rc;\n retry:\n    rc = lmgr_set_var(&p_mgr->conn, varname, value);\n    if (lmgr_delayed_retry(p_mgr, rc))\n        goto retry;\n    return rc;\n}\n"
  },
  {
    "path": "src/list_mgr/mysql_wrapper.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include <stdio.h>\n#include <unistd.h>\n#include <glib.h>\n#include <time.h>\n/* mysql includes */\n#include <mysqld_error.h>\n#include <errmsg.h>\n\n#define _DEBUG_DB\n\nstatic int mysql_error_convert(int err, bool verb)\n{\n    switch (err) {\n    case 0:\n        return DB_SUCCESS;\n    case ER_NO_SUCH_TABLE:\n        return DB_NOT_EXISTS;\n    case ER_DUP_ENTRY:\n        return DB_ALREADY_EXISTS;\n#ifdef _MYSQL5\n    case ER_TRG_DOES_NOT_EXIST:\n        return DB_TRG_NOT_EXISTS;\n#endif\n    case ER_BAD_FIELD_ERROR:\n        if (verb)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Invalid DB field\");\n        return DB_INVALID_ARG;\n    case ER_PARSE_ERROR:\n        if (verb)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"SQL request parse error\");\n        return DB_REQUEST_FAILED;\n    case ER_LOCK_DEADLOCK:\n        if (verb)\n            DisplayLog(LVL_EVENT, LISTMGR_TAG, \"DB deadlock detected\");\n        return DB_DEADLOCK;\n    case ER_LOCK_WAIT_TIMEOUT:\n        if (verb) {\n            if (!strcasecmp(lmgr_config.db_config.engine, \"InnoDB\"))\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Lock timeout detected. \"\n                           \"Consider increasing \\\"innodb_lock_wait_timeout\\\" in MySQL config.\");\n            else\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG, \"Lock timeout detected. \"\n                           \"Consider increasing \\\"lock_wait_timeout\\\" in MySQL config.\");\n        }\n        return DB_DEADLOCK;\n\n        /* connection relative errors */\n\n        /* In case of a deconnection, mysql_stmt_fetch returns this error\n         * CR_COMMANDS_OUT_OF_SYNC which is actually not very appropriate... */\n    case CR_COMMANDS_OUT_OF_SYNC:\n        /* when connection is lost, statements are no longer valid */\n    case ER_UNKNOWN_STMT_HANDLER:\n\n        /* It also returns ER_UNKNOWN_ERROR... In this case, we treat it as a\n         * disconnection anyway, to give a chance to the client to clean its\n         * internal state.\n         */\n    case ER_UNKNOWN_ERROR:\n\n        /* query may be interrupted for a connexion shutdown */\n    case ER_QUERY_INTERRUPTED:\n\n        /* These are really connection errors: */\n    case ER_SERVER_SHUTDOWN:\n    case CR_CONNECTION_ERROR:\n    case CR_SERVER_GONE_ERROR:\n    case CR_SERVER_LOST:\n    case CR_CONN_HOST_ERROR:\n        if (verb)\n            DisplayLog(LVL_CRIT, LISTMGR_TAG, \"DB connection error %d\", err);\n        return DB_CONNECT_FAILED;\n\n    default:\n        DisplayLog(verb ? LVL_MAJOR : LVL_DEBUG, LISTMGR_TAG,\n                   \"Unhandled error %d: default conversion to DB_REQUEST_FAILED\",\n                   err);\n        return DB_REQUEST_FAILED;\n    }\n}\n\nbool db_is_retryable(int db_err)\n{\n    switch (db_err) {\n    case DB_CONNECT_FAILED:\n    case DB_DEADLOCK:  /* Note: the whole transaction must be retryed */\n        return true;\n    default:\n        return false;\n    }\n}\n\n/* create client connection */\nint db_connect(db_conn_t *conn)\n{\n    my_bool reconnect = 1;\n    unsigned int retry = 0;\n\n    /* Connect to database */\n    if (mysql_init(conn) == NULL) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"ERROR: failed to create MySQL client struct\");\n        return DB_CONNECT_FAILED;\n    }\n#if (MYSQL_VERSION_ID >= 50013)\n    /* set auto-reconnect option */\n    mysql_options(conn, MYSQL_OPT_RECONNECT, &reconnect);\n#else\n    /* older version */\n    conn->reconnect = 1;\n#endif\n\n    while (1) {\n        /* connect to server */\n        if (!mysql_real_connect\n            (conn, lmgr_config.db_config.server, lmgr_config.db_config.user,\n             lmgr_config.db_config.password, lmgr_config.db_config.db,\n             lmgr_config.db_config.port,\n             EMPTY_STRING(lmgr_config.db_config.socket) ?\n             NULL : lmgr_config.db_config.socket, 0)) {\n            /* connection error is retried at DB level */\n            if ((retry < 3)\n                && db_is_retryable(mysql_error_convert(mysql_errno(conn), 0))) {\n                DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                           \"Failed to connect to MySQL: Error: %s. Retrying...\",\n                           mysql_error(conn));\n                retry++;\n                sleep(1);\n            } else {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Failed to connect to MySQL after %u retries: Error: %s. Aborting.\",\n                           retry, mysql_error(conn));\n                return DB_CONNECT_FAILED;\n            }\n        } else {\n            if (retry)\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Connection to MySQL server successful after %u retries.\",\n                           retry);\n            /* OK */\n            break;\n        }\n    }\n\n    /* Note [MySQL reference guide]: mysql_real_connect()  incorrectly reset\n     * the MYSQL_OPT_RECONNECT  option to its default value before MySQL 5.1.6.\n     * Therefore, prior to that version, if you want reconnect to be enabled for\n     * each connection, you must call mysql_options() with the\n     * MYSQL_OPT_RECONNECT option after each call to mysql_real_connect().\n     */\n#if (MYSQL_VERSION_ID >= 50013) && (MYSQL_VERSION_ID < 50106)\n    /* reset auto-reconnect option */\n    mysql_options(conn, MYSQL_OPT_RECONNECT, &reconnect);\n#endif\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Logged on to database '%s' successfully\",\n               lmgr_config.db_config.db);\n    return DB_SUCCESS;\n}\n\nint db_close_conn(db_conn_t *conn)\n{\n    /* XXX Ensure there is no pending transactions? */\n    mysql_close(conn);\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Database connection closed\");\n\n    return DB_SUCCESS;\n}\n\n/* retrieve error message */\nchar *db_errmsg(db_conn_t *conn, char *errmsg, unsigned int buflen)\n{\n    if (strlen(mysql_error(conn)) + 1 > buflen)\n        rh_strncpy(errmsg, mysql_error(conn), buflen);\n    else\n        strcpy(errmsg, mysql_error(conn));\n\n    return errmsg;\n}\n\nstatic int _db_exec_sql(db_conn_t *conn, const char *query,\n                        result_handle_t *p_result, bool quiet)\n{\n    int rc;\n    int dberr;\n#ifdef _DEBUG_DB\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"SQL query: %s\", query);\n#endif\n\n    rc = mysql_real_query(conn, query, strlen(query));\n    dberr = mysql_errno(conn);\n    if (rc) {\n        rc = mysql_error_convert(dberr, quiet ? 0 : 1);\n        if (dberr == ER_DUP_ENTRY) {\n            DisplayLog(quiet ? LVL_DEBUG : LVL_EVENT, LISTMGR_TAG,\n                       \"A database record already exists for this entry: '%s' (%s)\",\n                       query, mysql_error(conn));\n        }\n#ifdef _MYSQL5\n        else if (dberr == ER_TRG_DOES_NOT_EXIST) {\n            DisplayLog(quiet ? LVL_DEBUG : LVL_EVENT, LISTMGR_TAG,\n                       \"Trigger does not exist: '%s' (%s)\",\n                       query, mysql_error(conn));\n        }\n#endif\n        else if (dberr == ER_NO_SUCH_TABLE)\n            DisplayLog(quiet ? LVL_DEBUG : LVL_EVENT, LISTMGR_TAG,\n                       \"Table does not exist: '%s' (%s)\",\n                       query, mysql_error(conn));\n        else if (!db_is_retryable(rc))\n            DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                       \"Error %d executing query '%s': %s\", rc, query,\n                       mysql_error(conn));\n\n        return rc;\n    } else {\n        /* fetch results to the client */\n        if (p_result) {\n            *p_result = mysql_store_result(conn);\n            if (*p_result == NULL)\n                return DB_NOT_EXISTS;\n        }\n\n        return DB_SUCCESS;\n    }\n}\n\nint db_exec_sql_quiet(db_conn_t *conn, const char *query,\n                      result_handle_t *p_result)\n{\n    return _db_exec_sql(conn, query, p_result, true);\n}\n\nint db_exec_sql(db_conn_t *conn, const char *query, result_handle_t *p_result)\n{\n    return _db_exec_sql(conn, query, p_result, false);\n}\n\n/* free result resources */\nint db_result_free(db_conn_t *conn, result_handle_t *p_result)\n{\n    if (*p_result)\n        mysql_free_result(*p_result);\n    return DB_SUCCESS;\n}\n\n/* get the next record from result */\nint db_next_record(db_conn_t *conn, result_handle_t *p_result,\n                   char *outtab[], unsigned int outtabsize)\n{\n    int i;\n    MYSQL_ROW row;\n    unsigned int nb_fields;\n\n    /* init ouput tab */\n    for (i = 0; i < outtabsize; i++)\n        outtab[i] = NULL;\n\n    if (!(row = mysql_fetch_row(*p_result)))\n        return DB_END_OF_LIST;\n\n    nb_fields = mysql_num_fields(*p_result);\n\n    for (i = 0; (i < outtabsize) && (i < nb_fields); i++)\n        outtab[i] = row[i];\n\n    if (nb_fields > outtabsize) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"Output array too small: size = %u, num_fields = %u\",\n                   outtabsize, nb_fields);\n        return DB_BUFFER_TOO_SMALL;\n    }\n\n    return DB_SUCCESS;\n\n}\n\n/* retrieve number of records in result */\nint db_result_nb_records(db_conn_t *conn, result_handle_t *p_result)\n{\n    return mysql_num_rows(*p_result);\n}\n\nint db_list_table_info(db_conn_t *conn, const char *table,\n                       char **field_tab, char **type_tab, char **default_tab,\n                       unsigned int outtabsize,\n                       char *inbuffer, unsigned int inbuffersize)\n{\n    char request[4096];\n    MYSQL_RES *result;\n    MYSQL_ROW row;\n    int i, rc, curr_output;\n    char *curr_ptr = inbuffer;\n\n    snprintf(request, sizeof(request), \"SHOW COLUMNS FROM %s\", table);\n    rc = db_exec_sql_quiet(conn, request, &result);\n\n    if (rc)\n        return rc;\n\n    if (!result) {\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"%s does not exist\", table);\n        return DB_NOT_EXISTS;\n    }\n\n    /* init ouput tabs */\n    for (i = 0; i < outtabsize; i++) {\n        field_tab[i] = NULL;\n        if (type_tab)\n            type_tab[i] = NULL;\n        if (default_tab)\n            default_tab[i] = NULL;\n    }\n\n    curr_output = 0;\n    while ((row = mysql_fetch_row(result))) {\n        strcpy(curr_ptr, row[0]);\n        field_tab[curr_output] = curr_ptr;\n        curr_ptr += strlen(curr_ptr) + 1;\n\n        if (type_tab) {\n            strcpy(curr_ptr, row[1]);\n            type_tab[curr_output] = curr_ptr;\n            curr_ptr += strlen(curr_ptr) + 1;\n        }\n\n        if (default_tab && row[4] != NULL) {\n            strcpy(curr_ptr, row[4]);\n            default_tab[curr_output] = curr_ptr;\n            curr_ptr += strlen(curr_ptr) + 1;\n        }\n\n        curr_output++;\n    }\n    mysql_free_result(result);\n\n    return DB_SUCCESS;\n}\n\nunsigned long long db_last_id(db_conn_t *conn)\n{\n    return mysql_insert_id(conn);\n}\n\n/* escape a string in a SQL request */\nint db_escape_string(db_conn_t *conn, char *str_out, size_t out_size,\n                     const char *str_in)\n{\n    int len_in = strlen(str_in);\n\n    /* output size must be at least 2 x instrlen + 1 for the worst case */\n    if (out_size < 2 * len_in + 1)\n        return DB_BUFFER_TOO_SMALL;\n\n    /* escape special characters in value */\n    mysql_real_escape_string(conn, str_out, str_in, len_in);\n    return DB_SUCCESS;\n}\n\n/* remove a database component (table, trigger, function, ...) */\nint db_drop_component(db_conn_t *conn, db_object_e obj_type, const char *name)\n{\n    const char *tname = \"\";\n    char query[1024];\n\n    switch (obj_type) {\n    case DBOBJ_TABLE:\n        tname = \"TABLE\";\n        break;\n    case DBOBJ_FUNCTION:\n        tname = \"FUNCTION\";\n        break;\n    case DBOBJ_PROC:\n        tname = \"PROCEDURE\";\n        break;\n    case DBOBJ_TRIGGER:\n        tname = \"TRIGGER\";\n        break;\n    default:\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Object type not supported in %s\",\n                   __func__);\n        return DB_NOT_SUPPORTED;\n    }\n\n#ifndef _MYSQL5 /* only tables are supported before MySQL 5 */\n    if (obj_type != DBOBJ_TABLE) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                   \"You should upgrade to MYSQL 5 or + to use %s\", tname);\n        return DB_NOT_SUPPORTED;\n    }\n#endif\n\n    if (mysql_get_server_version(conn) < 50032) {\n        sprintf(query, \"DROP %s %s \", tname, name);\n        return _db_exec_sql(conn, query, NULL, true);\n    } else {\n        sprintf(query, \"DROP %s IF EXISTS %s \", tname, name);\n        return _db_exec_sql(conn, query, NULL, false);\n    }\n}\n\n/**\n * check a component exists in the database\n * \\param arg depends on the object type: src table for triggers, NULL for\n *            others.\n */\nint db_check_component(db_conn_t *conn, db_object_e obj_type, const char *name,\n                       const char *arg)\n{\n    char query[1024];\n    MYSQL_RES *result;\n    MYSQL_ROW row;\n    int rc;\n\n    if (obj_type == DBOBJ_TRIGGER) {\n        sprintf(query,\n                \"SELECT EVENT_OBJECT_TABLE FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_SCHEMA='%s'\"\n                \"AND TRIGGER_NAME='%s'\", lmgr_config.db_config.db, name);\n\n        rc = _db_exec_sql(conn, query, &result, false);\n        if (rc)\n            return rc;\n\n        if (!result) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"%s does not exist\", name);\n            return DB_NOT_EXISTS;\n        }\n\n        row = mysql_fetch_row(result);\n        if (row) {\n            DisplayLog(LVL_FULL, LISTMGR_TAG,\n                       \"Trigger %s exists and is defined on %s\", name,\n                       row[0] ? row[0] : \"<null>\");\n            if (!arg) {\n                /* just check the row is set */\n                if (row[0] == NULL || row[0][0] == '\\0')\n                    rc = DB_BAD_SCHEMA;\n                else\n                    rc = DB_SUCCESS;\n            } else if (strcmp(arg, row[0])) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Trigger %s is on wrong table: expected %s, got %s\",\n                           name, arg, row[0]);\n                rc = DB_BAD_SCHEMA;\n            } else\n                rc = DB_SUCCESS;\n\n            if (mysql_fetch_row(result)) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Unexpected multiple definition of %s on %s\", name,\n                           row[0]);\n                rc = DB_BAD_SCHEMA;\n            }\n        } else\n            rc = DB_NOT_EXISTS;\n\n        mysql_free_result(result);\n        return rc;\n    } else if (obj_type == DBOBJ_FUNCTION) {\n        sprintf(query, \"SHOW FUNCTION STATUS WHERE DB='%s' AND NAME='%s'\",\n                lmgr_config.db_config.db, name);\n\n        rc = _db_exec_sql(conn, query, &result, false);\n        if (rc)\n            return rc;\n\n        if (!result) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"%s does not exist\", name);\n            return DB_NOT_EXISTS;\n        }\n\n        row = mysql_fetch_row(result);\n        if (row) {\n            DisplayLog(LVL_FULL, LISTMGR_TAG, \"Function %s exists\", name);\n            rc = DB_SUCCESS;\n\n            if (mysql_fetch_row(result)) {\n                DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                           \"Unexpected multiple definition of %s\", name);\n                rc = DB_BAD_SCHEMA;\n            }\n        } else\n            rc = DB_NOT_EXISTS;\n\n        mysql_free_result(result);\n        return rc;\n    } else {\n        RBH_BUG(\"Only triggers are supported for now\");\n    }\n}\n\n/* create a trigger */\nint db_create_trigger(db_conn_t *conn, const char *name, const char *event,\n                      const char *table, const char *body)\n{\n#ifdef _MYSQL5\n    int rc;\n    GString *request = g_string_new(\"CREATE TRIGGER \");\n\n    g_string_append_printf(request, \"%s %s ON %s FOR EACH ROW \"\n                           \"BEGIN %s END\", name, event, table, body);\n    rc = _db_exec_sql(conn, request->str, NULL, false);\n    g_string_free(request, TRUE);\n    return rc;\n#else\n\n    DisplayLog(LVL_CRIT, LISTMGR_TAG, \"Trigger %s was not created: \"\n               \"you should upgrade to MYSQL 5 to use triggers\", name);\n    return DB_NOT_SUPPORTED;\n#endif\n}\n\nstatic inline const char *txlvl_str(tx_level_e lvl)\n{\n    switch (lvl) {\n    case TXL_SERIALIZABLE:\n        return \"SERIALIZABLE\";\n    case TXL_REPEATABLE_RD:\n        return \"REPEATABLE READ\";\n    case TXL_READ_COMMITTED:\n        return \"READ COMMITTED\";\n    case TXL_READ_UNCOMMITTED:\n        return \"READ UNCOMMITTED\";\n    default:\n        return \"\";\n    }\n}\n\n/** set transaction level (optimize performance or locking) */\nint db_transaction_level(db_conn_t *conn, what_trans_e what_tx,\n                         tx_level_e tx_level)\n{\n    char query[1024];\n    if (what_tx == TRANS_NEXT)\n        sprintf(query, \"SET TRANSACTION ISOLATION LEVEL %s\",\n                txlvl_str(tx_level));\n    else\n        sprintf(query, \"SET SESSION TRANSACTION ISOLATION LEVEL %s\",\n                txlvl_str(tx_level));\n\n    return _db_exec_sql(conn, query, NULL, false);\n}\n"
  },
  {
    "path": "src/list_mgr/sqlite_wrapper.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2008, 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"database.h\"\n#include \"rbh_logs.h\"\n#include <stdio.h>\n#include <unistd.h>\n\nstatic int sqlite_error_convert(int err)\n{\n    switch (err) {\n    case SQLITE_OK:\n        return DB_SUCCESS;\n    case SQLITE_NOTFOUND:\n        return DB_NOT_EXISTS;\n    case SQLITE_CONSTRAINT:    /* unique constraint violation */\n        return DB_ALREADY_EXISTS;\n    default:\n        DisplayLog(LVL_MAJOR, LISTMGR_TAG,\n                   \"Unhandled error %d: default conversion to DB_REQUEST_FAILED\",\n                   err);\n        return DB_REQUEST_FAILED;\n    }\n}\n\nstatic int db_is_busy_err(int rc)\n{\n    /* sometimes, SQLITE_CANTOPEN meens the db is busy (locked)... */\n    return (rc == SQLITE_BUSY) || (rc == SQLITE_CANTOPEN);\n}\n\nstatic int set_cache_size(sqlite3 *conn)\n{\n    int rc;\n    char *errmsg;\n\n    rc = sqlite3_exec(conn, \"PRAGMA cache_size=1000000\", NULL, NULL, &errmsg);\n    if (rc != SQLITE_OK) {\n        DisplayLog(LVL_CRIT, LISTMGR_TAG, \"SQL error: %s\", errmsg);\n        sqlite3_free(errmsg);\n        return DB_REQUEST_FAILED;\n    }\n\n    return DB_SUCCESS;\n}\n\n/* create client connection */\nint db_connect(db_conn_t *conn)\n{\n    int rc;\n\n    /* Connect to database */\n    rc = sqlite3_open(lmgr_config.db_config.filepath, conn);\n    if (rc != 0) {\n        if (*conn) {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Failed to connect to SQLite DB (file %s): Error: %s\",\n                       lmgr_config.db_config.filepath, sqlite3_errmsg(*conn));\n        } else {\n            DisplayLog(LVL_CRIT, LISTMGR_TAG,\n                       \"Failed to connect to SQLite DB (file %s): Error: %d\",\n                       lmgr_config.db_config.filepath, rc);\n        }\n        return DB_CONNECT_FAILED;\n    }\n\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"Logged on to database successfully\");\n\n    set_cache_size(*conn);\n\n    return DB_SUCCESS;\n}\n\n/* retrieve error message */\nchar *db_errmsg(db_conn_t *conn, char *errmsg, unsigned int buflen)\n{\n    if (*conn == NULL) {\n        strcpy(errmsg, \"Connection not initialized\");\n        return errmsg;\n    }\n\n    if (strlen(sqlite3_errmsg(*conn)) + 1 > buflen)\n        rh_strncpy(errmsg, sqlite3_errmsg(*conn), buflen);\n    else\n        strcpy(errmsg, sqlite3_errmsg(*conn));\n\n    return errmsg;\n}\n\nint db_exec_sql(db_conn_t *conn, const char *query, result_handle_t *p_result)\n{\n    int rc;\n    char *errmsg = NULL;\n\n#ifdef _DEBUG_DB\n    DisplayLog(LVL_FULL, LISTMGR_TAG, \"SQL query: %s\", query);\n#endif\n\n    if (!p_result) {\n        do {\n            rc = sqlite3_exec(*conn, query, NULL, NULL, &errmsg);\n\n            if (db_is_busy_err(rc))\n                usleep(lmgr_config.db_config.retry_delay_microsec);\n\n        }\n        while (db_is_busy_err(rc));\n\n        if (rc != SQLITE_OK) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                       \"SQLite command failed (%d): %s: %s\", rc,\n                       errmsg ? errmsg : sqlite3_errmsg(*conn), query);\n            if (errmsg)\n                sqlite3_free(errmsg);\n            return sqlite_error_convert(rc);\n        }\n    } else {\n        p_result->curr_row = 0;\n        p_result->result_array = 0;\n\n        do {\n            rc = sqlite3_get_table(*conn, query, &p_result->result_array,\n                                   &p_result->nb_rows, &p_result->nb_cols,\n                                   &errmsg);\n\n            if (db_is_busy_err(rc))\n                usleep(lmgr_config.db_config.retry_delay_microsec);\n\n        }\n        while (db_is_busy_err(rc));\n\n        if (rc != SQLITE_OK) {\n            DisplayLog(LVL_DEBUG, LISTMGR_TAG,\n                       \"SQLite command failed (%d): %s: %s\", rc,\n                       errmsg ? errmsg : sqlite3_errmsg(*conn), query);\n            if (errmsg)\n                sqlite3_free(errmsg);\n            if (p_result->result_array)\n                sqlite3_free_table(p_result->result_array);\n            return sqlite_error_convert(rc);\n        }\n    }\n\n    return DB_SUCCESS;\n}\n\nint db_exec_sql_quiet(db_conn_t *conn, const char *query,\n                      result_handle_t *p_result)\n{\n    return db_exec_sql(conn, query, p_result);\n}\n\n/* get the next record from result */\nint db_next_record(db_conn_t *conn,\n                   result_handle_t *p_result, char *outtab[],\n                   unsigned int outtabsize)\n{\n    int i;\n\n    if (p_result->curr_row >= p_result->nb_rows)\n        return DB_END_OF_LIST;\n\n    if (p_result->nb_cols > outtabsize)\n        return DB_BUFFER_TOO_SMALL;\n\n    for (i = 0; i < p_result->nb_cols; i++) {\n        /* /!\\ in sqlite, the request retuns columns header as row 0 !!! */\n        outtab[i] =\n            p_result->result_array[(p_result->curr_row + 1) *\n                                   p_result->nb_cols + i];\n    }\n    p_result->curr_row++;\n\n    return DB_SUCCESS;\n}\n\nint db_result_free(db_conn_t *conn, result_handle_t *p_result)\n{\n    if (p_result->result_array)\n        sqlite3_free_table(p_result->result_array);\n    memset(p_result, 0, sizeof(result_handle_t));\n\n    return DB_SUCCESS;\n}\n\n/* retrieve number of records in result */\nint db_result_nb_records(db_conn_t *conn, result_handle_t *p_result)\n{\n    return p_result->nb_rows;\n}\n\nint db_close_conn(db_conn_t *conn)\n{\n    /* XXX Ensure there is no pending transactions? */\n    sqlite3_close(*conn);\n    return DB_SUCCESS;\n}\n\nint db_list_table_fields(db_conn_t *conn, const char *table,\n                         char **outtab,\n                         unsigned int outtabsize, char *inbuffer,\n                         unsigned int inbuffersize)\n{\n    char request[4096];\n    char **result = NULL;\n    int rows, cols;\n    char *errmsg = NULL;\n    int i, rc, curr_output;\n    char *curr_ptr = inbuffer;\n\n    sprintf(request, \"PRAGMA table_info(%s)\", table);\n\n    rc = sqlite3_get_table(*conn, request, &result, &rows, &cols, &errmsg);\n\n    if (rc != SQLITE_OK) {\n        DisplayLog(LVL_DEBUG, LISTMGR_TAG, \"SQLite command failed (%d):  \"\n                   \"%s: %s\", rc, errmsg ? errmsg : sqlite3_errmsg(*conn),\n                   request);\n        if (errmsg)\n            sqlite3_free(errmsg);\n        if (result)\n            sqlite3_free_table(result);\n        return sqlite_error_convert(rc);\n    } else if (rows == 0) {\n        if (errmsg)\n            sqlite3_free(errmsg);\n        if (result)\n            sqlite3_free_table(result);\n        return DB_NOT_EXISTS;\n    }\n\n    /* init ouput tab */\n    for (i = 0; i < outtabsize; i++)\n        outtab[i] = NULL;\n\n    curr_output = 0;\n\n    /* starting at 1 because first raw contains headers */\n    for (i = 1; i < rows + 1; i++) {\n        strcpy(curr_ptr, result[1 + i * cols]);\n        outtab[curr_output] = curr_ptr;\n        curr_ptr += strlen(curr_ptr) + 1;\n        curr_output++;\n    }\n\n    sqlite3_free_table(result);\n\n    return DB_SUCCESS;\n\n}\n\nunsigned long long db_last_id(db_conn_t *conn)\n{\n    return sqlite3_last_insert_rowid(*conn);;\n}\n\n/* escape a string in a SQL request */\nvoid db_escape_string(db_conn_t *conn, char *str_out, size_t out_size,\n                      const char *str_in)\n{\n    /* using slqite3_snprintf with \"%q\" format, to escape strings */\n    sqlite3_snprintf(out_size, str_out, str_in);\n}\n"
  },
  {
    "path": "src/modules/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\npkglib_LTLIBRARIES=\n\npkglib_LTLIBRARIES+=librbh_mod_common.la\nlibrbh_mod_common_la_SOURCES=common_actions.c common_sched.c sched_ratelimit.c \\\n\t\t\t     mod_internal.c\nlibrbh_mod_common_la_LDFLAGS=-version-info 0:0:0\nlibrbh_mod_common_la_LIBADD=-lz\n\npkglib_LTLIBRARIES+=librbh_mod_alerter.la\nlibrbh_mod_alerter_la_SOURCES=alerter.c mod_internal.c mod_internal.h\nlibrbh_mod_alerter_la_LDFLAGS=-version-info 0:0:0\n\npkglib_LTLIBRARIES+=librbh_mod_checker.la\nlibrbh_mod_checker_la_SOURCES=checker.c mod_internal.c mod_internal.h\nlibrbh_mod_checker_la_LDFLAGS=-version-info 0:0:0\n\npkglib_LTLIBRARIES+=librbh_mod_basic.la\nlibrbh_mod_basic_la_SOURCES=basic.c mod_internal.c mod_internal.h\nlibrbh_mod_basic_la_LDFLAGS=-version-info 0:0:0\n\npkglib_LTLIBRARIES+=librbh_mod_modeguard.la\nlibrbh_mod_modeguard_la_SOURCES=modeguard.c mod_internal.c mod_internal.h\nlibrbh_mod_modeguard_la_LDFLAGS=-version-info 0:0:0\n\npkglib_LTLIBRARIES+=librbh_mod_test.la\nlibrbh_mod_test_la_SOURCES=test_sched.c mod_internal.c mod_internal.h\nlibrbh_mod_test_la_LDFLAGS=-version-info 0:0:0\n\nif LUSTRE_HSM\npkglib_LTLIBRARIES+=librbh_mod_lhsm.la\nlibrbh_mod_lhsm_la_SOURCES=lhsm.c mod_internal.c mod_internal.h\nlibrbh_mod_lhsm_la_LDFLAGS=-version-info 0:0:0\nlibrbh_mod_lhsm_la_LIBADD=-llustreapi\nendif\nif HSM_LITE\npkglib_LTLIBRARIES+=librbh_mod_backup.la\nlibrbh_mod_backup_la_SOURCES=backup.c backup.h mod_internal.c mod_internal.h\nlibrbh_mod_backup_la_CFLAGS=$(AM_CFLAGS) -D_HSM_LITE\nlibrbh_mod_backup_la_LDFLAGS=-version-info 0:0:0\nendif\nif SHOOK\npkglib_LTLIBRARIES+=librbh_mod_shook.la\nlibrbh_mod_shook_la_SOURCES=shook.c backup.c backup.h mod_internal.c mod_internal.h\nlibrbh_mod_shook_la_CFLAGS=$(AM_CFLAGS) -DHAVE_SHOOK\nlibrbh_mod_shook_la_LDFLAGS=-version-info 0:0:0 -lshooksvr\nendif\n"
  },
  {
    "path": "src/modules/alerter.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015,2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file alerter.c\n * \\brief manage alerts on filesystem entries\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"mod_internal.h\"\n\n#define TAG \"alerter\"\n\n/** set of managed status */\ntypedef enum {\n    STATUS_CLEAR,   /* checked, no alert raised */\n    STATUS_ALERT,   /* alert raised */\n\n    STATUS_COUNT,   /* number of possible file status */\n} alert_status_t;\n\nstatic const char *alerter_status_list[] = {\n    [STATUS_CLEAR] = \"clear\",\n    [STATUS_ALERT] = \"alert\",\n};\n\nstatic const char *alerter_status2str(alert_status_t st)\n{\n    switch (st) {\n    case STATUS_CLEAR:\n    case STATUS_ALERT:\n        return alerter_status_list[st];\n    default:\n        return NULL;\n    }\n}\n\n/** enum of specific attributes */\nenum alerter_info_e {\n    ATTR_LAST_CHECK = 0,\n    ATTR_LAST_ALERT,\n};\n\n/** size of specific info to be stored in DB:\n * last_check: unix epoch\n * last_alert: unix epoch\n */\nstatic sm_info_def_t alerter_info[] = {\n    [ATTR_LAST_CHECK] =\n        {\"last_check\", \"lstchk\", DB_UINT, 0, {.val_uint = 0}, PT_DURATION},\n    [ATTR_LAST_ALERT] =\n        {\"last_alert\", \"lstalrt\", DB_UINT, 0, {.val_uint = 0}, PT_DURATION},\n};\n\nstatic int alerter_executor(struct sm_instance *smi,\n                            const char *implements,\n                            const policy_action_t *action,\n                            /* arguments for the action : */\n                            const entry_id_t *p_id, attr_set_t *p_attrs,\n                            const action_params_t *params,\n                            post_action_e *what_after, db_cb_func_t db_cb_fn,\n                            void *db_cb_arg)\n{\n    const char *val;\n    const char *status_str = NULL;\n    int rc = 0;\n    bool alert = false;\n\n    if (params == NULL) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Missing action parameters for 'alerter' status manager\");\n        return -EINVAL;\n    }\n\n    val = rbh_param_get(params, \"alert\");\n    if (val == NULL) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Missing action parameter 'alert = yes/clear' for 'alerter' status manager\");\n        return -EINVAL;\n    }\n\n    if (!strcasecmp(val, \"clear\")) {\n        /* if the action succeed new status will be: clear */\n        status_str = alerter_status2str(STATUS_CLEAR);\n    } else if (!strcasecmp(val, \"raise\")) {\n        /* if the action succeed new status will be: alert */\n        status_str = alerter_status2str(STATUS_ALERT);\n        alert = true;\n    } else {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Invalid value for 'alert' action parameter: 'raise' or 'clear' expected\");\n        return -EINVAL;\n    }\n\n    /* set it now, at it may be modified by the specified function */\n    *what_after = PA_UPDATE;\n\n    rc = action_helper(action, \"alert\", p_id, p_attrs, params, smi,\n                       NULL, what_after, db_cb_fn, db_cb_arg);\n    if (rc)\n        return rc;\n\n    set_uint_info(smi, p_attrs, ATTR_LAST_CHECK, (unsigned int)time(NULL));\n    if (alert)\n        set_uint_info(smi, p_attrs, ATTR_LAST_ALERT, (unsigned int)time(NULL));\n\n    return set_status_attr(smi, p_attrs, status_str);\n}\n\nstatic int alerter_alert(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                         const action_params_t *params, post_action_e *after,\n                         db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    char *str_id = NULL;\n    bool str_is_alloc = false;\n    GString *gs = g_string_new(NULL);\n\n    /* build alert string */\n    if (ATTR_MASK_TEST(p_attrs, fullpath)) {\n        str_id = ATTR(p_attrs, fullpath);\n    } else {\n        if (asprintf(&str_id, DFID, PFID(p_entry_id)) < 0) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Could not allocate string for fid \"DFID\" path in %s\",\n                       PFID(p_entry_id), __func__);\n            return -ENOMEM;\n        };\n        str_is_alloc = true;\n    }\n\n    /** TODO build specific parameter that represents the alert rule */\n#if 0\n    rc = BoolExpr2str(&entry_proc_conf.alert_list[i].boolexpr, stralert,\n                      2 * RBH_PATH_MAX);\n    if (rc < 0)\n        strcpy(stralert, \"Error building alert string\");\n#endif\n\n    /** TODO build specific parameter that represents attr mask for the rule\n     * (alert mask) */\n    print_attrs(gs, p_attrs, null_mask, 0);\n\n    /* title: alert rule name */\n    RaiseEntryAlert(rbh_param_get(params, \"title\"), \"entry matches alert rule\",\n                    str_id, gs->str);\n\n    g_string_free(gs, TRUE);\n    if (str_is_alloc)\n        free(str_id);\n\n    return 0;\n}\n\n/** Status manager for alerts management */\nstatus_manager_t alerter_sm = {\n    .name = \"alerter\",\n    /* TODO can possibly raise alerts on file deletion? if so, must set\n     * a softrm_* fields */\n    .flags = 0,\n    .status_enum = alerter_status_list, /* initial state is empty(unset)\n                                           status */\n    .status_count = STATUS_COUNT,\n    .nb_info = G_N_ELEMENTS(alerter_info),\n    .info_types = alerter_info,\n\n    /* note: no get_status support */\n\n    .executor = alerter_executor,\n};\n\n/* ======= PUBLIC FUNCTIONS ======= */\nconst char *mod_get_name(void)\n{\n    return alerter_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &alerter_sm;\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n    if (strcmp(action_name, \"alerter.alert\") == 0)\n        return alerter_alert;\n    else\n        return NULL;\n}\n"
  },
  {
    "path": "src/modules/backup.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2010-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"rbh_modules.h\"\n#include \"xplatform_print.h\"\n#include \"Memory.h\"\n#include \"rbh_basename.h\"\n#include \"backup.h\"\n#include <stdlib.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <time.h>\n#include <utime.h>\n#include <libgen.h>\n#include <pwd.h>\n#include <grp.h>\n#include <ctype.h>\n#include <fnmatch.h>\n#include <zlib.h>\n#include <sys/sendfile.h>\n\n#ifdef HAVE_SHOOK\n#include <shook_svr.h>\n#else\n#endif\n\n/* ---------- config parsing stuff ------------ */\n#define OLD_BACKUP_BLOCK \"Backend\"\n\n#ifdef HAVE_SHOOK\n#define MOD_NAME \"shook\"\n#define TAG \"Shook\"\n#define BACKUP_BLOCK \"shook_config\"\n#define BKL_TAG \"shook_cfg\"\n#else\n#define MOD_NAME \"backup\"\n#define TAG \"Backup\"\n#define BACKUP_BLOCK \"backup_config\"\n#define BKL_TAG \"backup_cfg\"\n#endif\n\ntypedef struct backup_config_t {\n    char root[RBH_PATH_MAX];\n    char mnt_type[RBH_NAME_MAX];\n    bool check_mounted;\n#ifdef HAVE_SHOOK\n    char shook_cfg[RBH_PATH_MAX];\n#endif\n    /** Distinct from action_timeout that is only defined at policy run time.\n     * This one is needed to determine the current entry status\n     * by checking if a transfer is currently active.\n     */\n    time_t copy_timeout;\n    /** This is not only an action parameter, as it is needed to check\n     * entry status (backend path differs if the entry is compressed).\n     */\n    bool compress;\n\n    /** recovery action */\n    policy_action_t recovery_action;\n\n} backup_config_t;\n\n/* backup config is global as the status manager is shared */\nstatic backup_config_t config;\n\nstatic void backup_cfg_set_default(void *module_config)\n{\n    backup_config_t *conf = (backup_config_t *) module_config;\n\n    strcpy(conf->root, \"/backend\");\n    strcpy(conf->mnt_type, \"nfs\");\n    conf->check_mounted = true;\n    conf->compress = false;\n    conf->copy_timeout = 6 * 3600;  /* 6h */\n#ifdef HAVE_SHOOK\n    strcpy(conf->shook_cfg, \"/etc/shook.cfg\");\n#endif\n\n    /* must be explicitly specified */\n    conf->recovery_action.type = ACTION_UNSET;\n    conf->recovery_action.action_u.func.name = \"\";\n    conf->recovery_action.action_u.func.call = NULL;\n}\n\nstatic void backup_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, BACKUP_BLOCK, NULL);\n    print_line(output, 1, \"root          : \\\"/backend\\\"\");\n    print_line(output, 1, \"mnt_type      : nfs\");\n    print_line(output, 1, \"check_mounted : yes\");\n    print_line(output, 1, \"copy_timeout  : 6h\");\n    print_line(output, 1, \"compress      : no\");\n#ifdef HAVE_SHOOK\n    print_line(output, 1, \"shook_cfg     : \\\"/etc/shook.cfg\\\"\");\n#endif\n        print_line(output, 1, \"recovery_action: <mandatory>\");\n\n    print_end_block(output, 0);\n}\n\n/* forward declaration */\nstatic status_manager_t backup_sm;\n\nstatic int backup_cfg_read(config_file_t config, void *module_config,\n                           char *msg_out)\n{\n    int rc;\n    backup_config_t *conf = (backup_config_t *) module_config;\n    config_item_t block;\n    char tmp[RBH_PATH_MAX];\n    char **extra = NULL;\n    unsigned int extra_cnt = 0;\n    attr_mask_t mask = null_mask;\n\n    const cfg_param_t backend_params[] = {\n        {\"root\", PT_STRING, PFLG_ABSOLUTE_PATH | PFLG_REMOVE_FINAL_SLASH\n         | PFLG_NO_WILDCARDS | PFLG_NOT_EMPTY, conf->root, sizeof(conf->root)}\n        ,\n        {\"mnt_type\", PT_STRING, 0, conf->mnt_type, sizeof(conf->mnt_type)}\n        ,\n        {\"check_mounted\", PT_BOOL, 0, &conf->check_mounted, 0}\n        ,\n        {\"compress\", PT_BOOL, 0, &conf->compress, 0}\n        ,\n        {\"copy_timeout\", PT_DURATION, 0, &conf->copy_timeout, 0}\n        ,\n#ifdef HAVE_SHOOK\n        /* shook only */\n        {\"shook_cfg\", PT_STRING, PFLG_ABSOLUTE_PATH | PFLG_NO_WILDCARDS,\n         conf->shook_cfg, sizeof(conf->shook_cfg)}\n        ,\n#endif\n        /** TODO parse recovery action */\n        END_OF_PARAMS\n    };\n\n    static const char *allowed_params[] = {\n        \"root\", \"mnt_type\", \"check_mounted\", \"copy_timeout\", \"compress\",\n        \"recovery_action\",\n#ifdef HAVE_SHOOK\n        \"shook_cfg\",\n#endif\n        NULL\n    };\n\n    /* get Backup block */\n    rc = get_cfg_block(config, BACKUP_BLOCK, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(block, BACKUP_BLOCK, backend_params, msg_out);\n    if (rc)\n        return rc;\n\n    /* read specific params */\n    rc = GetStringParam(block, BACKUP_BLOCK, \"recovery_action\",\n                        PFLG_MANDATORY, tmp, sizeof(tmp), &extra,\n                        &extra_cnt, msg_out);\n    if (rc != 0)\n        return rc;\n\n    rc = parse_policy_action(\"recovery_action\", tmp, extra, extra_cnt,\n                             &conf->recovery_action, &mask, msg_out);\n    if (rc)\n        return rc;\n\n    /* add the mask to softrm table mask */\n    backup_sm.softrm_table_mask =\n        attr_mask_or(&backup_sm.softrm_table_mask, &mask);\n\n    CheckUnknownParameters(block, BACKUP_BLOCK, allowed_params);\n\n    return 0;\n}\n\nstatic void backup_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, BACKUP_BLOCK, NULL);\n    print_line(output, 1, \"# backend path and type\");\n    print_line(output, 1, \"root          = \\\"/backend\\\";\");\n    print_line(output, 1, \"mnt_type      = nfs;\");\n    print_line(output, 1, \"# check if the backend is mounted on startup\");\n    print_line(output, 1, \"check_mounted = yes;\");\n    print_line(output, 1, \"copy_timeout  = 6h;\");\n#ifdef HAVE_SHOOK\n    print_line(output, 1, \"# shook server configuration\");\n    print_line(output, 1, \"shook_cfg     = \\\"/etc/shook.cfg\\\";\");\n#endif\n#ifdef HAVE_SHOOK\n    print_line(output, 1, \"#recovery_action = shook.recover;\");\n#else\n    print_line(output, 1, \"#recovery_action = common.copy;\");\n#endif\n    print_end_block(output, 0);\n}\n\nstatic void *backup_cfg_new(void)\n{\n    return calloc(1, sizeof(backup_config_t));\n}\n\nstatic void backup_cfg_free(void *cfg)\n{\n    if (cfg != NULL)\n        free(cfg);\n}\n\nstatic int backup_cfg_set(void *cfg, bool reload)\n{\n    backup_config_t *new = cfg;\n\n    if (!reload) {\n        config = *new;\n        return 0;\n    }\n\n    /* reload case */\n    /* only copy timeout can be modified dynamically */\n    if (new->copy_timeout != config.copy_timeout) {\n        DisplayLog(LVL_EVENT, BKL_TAG,\n                   BACKUP_BLOCK \"::copy_timeout updated: %ld->%ld\",\n                   config.copy_timeout, new->copy_timeout);\n        config.copy_timeout = new->copy_timeout;\n    }\n\n    return 0;\n}\n\nstatic const mod_cfg_funcs_t backup_cfg_hdlr = {\n#ifdef HAVE_SHOOK\n    .module_name = \"shook\",\n#else\n    .module_name = \"backup\",\n#endif\n    .new = backup_cfg_new,\n    .free = backup_cfg_free,\n    .set_default = backup_cfg_set_default,\n    .read = backup_cfg_read,\n    .set_config = backup_cfg_set,\n    .write_default = backup_cfg_write_default,\n    .write_template = backup_cfg_write_template,\n};\n\n/* -------------- status management stuff ------------- */\n\n/* XXX /!\\ Must match file_status_t order */\nstatic const char *backup_status_list[] =\n    { \"new\", \"modified\", \"retrieving\", \"archiving\",\n    \"synchro\", \"released\", \"release_pending\"\n};\n\nstatic const char *backup_status2str(file_status_t st)\n{\n    if ((st >= STATUS_COUNT) || (st == STATUS_UNKNOWN))\n        return NULL;\n    else\n        return backup_status_list[st - 1];  /* st=1 => new */\n}\n\n/** enum of specific attributes */\nenum backup_info_e {\n    ATTR_BK_PATH = 0,\n    ATTR_LAST_ARCH\n};\n\n/** size of specific info to be stored in DB:\n * backend_path: full path in backend\n * last_archive: unix epoch\n */\nstatic sm_info_def_t backup_info[] = {\n    [ATTR_BK_PATH] =\n        {\"backend_path\", \"bkpath\", DB_TEXT, RBH_PATH_MAX - 1, {.val_str = NULL},\n         PT_STRING},\n    [ATTR_LAST_ARCH] =\n        {\"last_archive\", \"lstarc\", DB_UINT, 0, {.val_uint = 0}, PT_DURATION}\n};\n\n/** helper to compare a status */\nstatic bool status_equal(struct sm_instance *smi, const attr_set_t *attrs,\n                         file_status_t status)\n{\n    return !strcmp(STATUS_ATTR(attrs, smi->smi_index),\n                   backup_status2str(status));\n}\n\n/** to check backend mount point */\nstatic dev_t backend_dev = 0;\nstatic char backend_name[RBH_PATH_MAX] = \"\";\n\n/* is it a special shell character */\nstatic inline bool is_shell_special(char c)\n{\n    static const char specials[] = \"`#$*?!|;&<>[]{}'\\\"\\\\\";\n    const char *curr;\n\n    for (curr = specials; (*curr) != '\\0'; curr++)\n        if (c == (*curr))\n            return true;\n\n    /* not found */\n    return false;\n}\n\n#define is_allowed_char(_c) (isprint(_c) && isascii(_c) && !isspace(_c) \\\n                             && !is_shell_special(_c))\n\n/* clean non printable characters, spaces, special chars, ... */\nstatic void clean_bad_chars(char *path)\n{\n    char *curr;\n    for (curr = path; *curr != '\\0'; curr++) {\n        if (!is_allowed_char(*curr))\n            *curr = '_';\n    }\n}\n\n#ifdef HAVE_SHOOK\nstatic char lock_dirname[RBH_NAME_MAX] = \"\";\nstatic char restripe_dirname[RBH_NAME_MAX] = \"\";\n#endif\n\n/**\n * Initialize the extension module.\n * set_config function is supposed to have to been called before.\n * \\param[in] flags from command line.\n */\nstatic int backup_init(struct sm_instance *smi, run_flags_t flags)\n{\n    int rc;\n\n#ifdef HAVE_SHOOK   /* releasing files need shook */\n    rc = shook_svr_init(config.shook_cfg);\n    if (rc) {\n        DisplayLog(LVL_CRIT, TAG, \"ERROR %d initializing shook server library\",\n                   rc);\n        return rc;\n    }\n\n    rh_strncpy(lock_dirname, rh_basename(LOCK_DIR), sizeof(lock_dirname));\n    rh_strncpy(restripe_dirname, rh_basename(RESTRIPE_DIR),\n               sizeof(restripe_dirname));\n#endif\n\n    /* check that backend filesystem is mounted */\n    rc = check_fs_info(config.root, config.mnt_type, &backend_dev,\n                       backend_name, config.check_mounted, false);\n    if (rc)\n        return rc;\n\n    return 0;\n}\n\ntypedef enum {\n    FOR_LOOKUP,\n    FOR_NEW_COPY\n} what_for_e;\n\n/* path for entry we don't known the path in Lustre */\n#define UNK_PATH    \"__unknown_path\"\n/* name for entry we don't known the name in Lustre */\n#define UNK_NAME    \"__unknown_name\"\n/* extension for temporary copy file */\n#define COPY_EXT    \"xfer\"\n/* trash directory for orphan files */\n#define TRASH_DIR   \".orphans\"\n\n/**\n * Build the path of a given entry in the backend.\n */\nstatic void entry2backend_path(sm_instance_t *smi,\n                               const entry_id_t *p_id,\n                               const attr_set_t *p_attrs_in,\n                               what_for_e what_for,\n                               char *backend_path, int allow_compress)\n{\n    int pathlen;\n    char rel_path[RBH_PATH_MAX];\n\n    if (ATTR_MASK_INFO_TEST(p_attrs_in, smi, ATTR_BK_PATH)) {\n        DisplayLog(LVL_DEBUG, TAG, \"%s: previous backend_path: %s\",\n                   (what_for == FOR_LOOKUP) ? \"LOOKUP\" : \"NEW_COPY\",\n                   (char *)SMI_INFO(p_attrs_in, smi, ATTR_BK_PATH));\n    } else if (ATTR_MASK_TEST(p_attrs_in, type) &&\n               !strcasecmp(ATTR(p_attrs_in, type), STR_TYPE_DIR)) {\n        if (ATTR_MASK_TEST(p_attrs_in, fullpath) &&\n            ATTR(p_attrs_in, fullpath)[0] == '/' &&\n            relative_path(ATTR(p_attrs_in, fullpath), global_config.fs_path,\n                          rel_path) == 0) {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"%s is a directory: backend path is the same\",\n                       ATTR(p_attrs_in, fullpath));\n\n            if (!strcmp(config.root, \"/\"))  /* root is '/' */\n                sprintf(backend_path, \"/%s\", rel_path);\n            else\n                sprintf(backend_path, \"%s/%s\", config.root, rel_path);\n        } else {    /* we don't have fullpath available */\n\n            const char *fname;\n\n            /* There is something in the fullpath, but it is not under FS root\n             * or it is relative */\n            if (ATTR_MASK_TEST(p_attrs_in, fullpath)) {\n                if (ATTR(p_attrs_in, fullpath)[0] == '/')\n                    fname = ATTR(p_attrs_in, fullpath) + 1;\n                else\n                    fname = ATTR(p_attrs_in, fullpath);\n            } else if (ATTR_MASK_TEST(p_attrs_in, name))\n                fname = ATTR(p_attrs_in, name);\n            else\n                fname = UNK_NAME;\n\n            /* backup entry to a special dir */\n            if (!strcmp(config.root, \"/\"))  /* root is '/' */\n                sprintf(backend_path, \"/%s/%s\", UNK_PATH, fname);\n            else\n                sprintf(backend_path, \"%s/%s/%s\", config.root, UNK_PATH, fname);\n        }\n\n        /* clean bad characters */\n        clean_bad_chars(backend_path);\n        return;\n    }\n#ifdef HAVE_SHOOK\n    else if (what_for != FOR_NEW_COPY) {\n        int rc;\n        char fidpath[RBH_PATH_MAX];\n\n        BuildFidPath(p_id, fidpath);\n\n        /* retrieve backend path from shook xattrs */\n        rc = shook_get_hsm_info(fidpath, backend_path, NULL);\n        if ((rc == 0) && !EMPTY_STRING(backend_path))\n            return;\n    }\n#endif\n\n    if ((what_for == FOR_LOOKUP)\n        && ATTR_MASK_INFO_TEST(p_attrs_in, smi, ATTR_BK_PATH)) {\n        /* For lookup, if there is a previous path in the backend, use it. */\n        strcpy(backend_path, (char *)SMI_INFO(p_attrs_in, smi, ATTR_BK_PATH));\n    } else {    /* in any other case, build a path from scratch */\n\n        /* if the fullpath is available, build human readable path */\n        if (ATTR_MASK_TEST(p_attrs_in, fullpath) &&\n            ATTR(p_attrs_in, fullpath)[0] == '/' &&\n            relative_path(ATTR(p_attrs_in, fullpath), global_config.fs_path,\n                          rel_path) == 0) {\n            /* backend path is '<bakend_root>/<rel_path>' */\n\n            if (!strcmp(config.root, \"/\"))  /* root is '/' */\n                sprintf(backend_path, \"/%s\", rel_path);\n            else\n                sprintf(backend_path, \"%s/%s\", config.root, rel_path);\n        } else {\n            /* no fullpath available (or not in FS root, or relative) */\n            const char *fname;\n\n            /* There is something in the fullpath, but it is not under FS root\n             * or it is relative */\n            if (ATTR_MASK_TEST(p_attrs_in, fullpath)) {\n                if (ATTR(p_attrs_in, fullpath)[0] == '/')\n                    fname = ATTR(p_attrs_in, fullpath) + 1;\n                else\n                    fname = ATTR(p_attrs_in, fullpath);\n            } else if (ATTR_MASK_TEST(p_attrs_in, name))\n                fname = ATTR(p_attrs_in, name);\n            else\n                fname = UNK_NAME;\n\n            /* backup entry to a special dir */\n            if (!strcmp(config.root, \"/\"))  /* root is '/' */\n                sprintf(backend_path, \"/%s/%s\", UNK_PATH, fname);\n            else\n                sprintf(backend_path, \"%s/%s/%s\", config.root, UNK_PATH, fname);\n        }\n\n        /* clean bad characters */\n        clean_bad_chars(backend_path);\n\n        /* add __<id> after the name */\n        pathlen = strlen(backend_path);\n#ifdef _HAVE_FID\n        sprintf(backend_path + pathlen, \"__\" DFID_NOBRACE, PFID(p_id));\n#else\n        sprintf(backend_path + pathlen, \"__%#LX:%#LX\",\n                (unsigned long long)p_id->device,\n                (unsigned long long)p_id->inode);\n#endif\n        /* check if compression is enabled and if the entry is a file */\n        if (allow_compress\n            && !strcasecmp(ATTR(p_attrs_in, type), STR_TYPE_FILE)) {\n            /* append z in this case */\n            strcat(backend_path, \"z\");\n        }\n    }\n    return;\n}\n\n/**\n * Determine if an entry is being archived\n * \\retval 0: not archiving\n * \\retval <0: error\n * \\retval >0: last modification time\n */\nstatic int entry_is_archiving(const char *backend_path)\n{\n    char xfer_path[RBH_PATH_MAX];\n    struct stat cp_md;\n    int rc;\n    sprintf(xfer_path, \"%s.%s\", backend_path, COPY_EXT);\n\n    if (lstat(xfer_path, &cp_md) != 0) {\n        rc = -errno;\n        if ((rc == -ENOENT) || (rc == -ESTALE))\n            return 0;\n        else\n            return rc;\n    }\n    /* xfer is running. return last action time */\n    return MAX3(cp_md.st_mtime, cp_md.st_ctime, cp_md.st_atime);\n}\n\n/**\n * Cleans a timed-out transfer\n */\nstatic int transfer_cleanup(const char *backend_path)\n{\n    char xfer_path[RBH_PATH_MAX];\n    int rc;\n    sprintf(xfer_path, \"%s.%s\", backend_path, COPY_EXT);\n\n    if (unlink(xfer_path) != 0) {\n        rc = -errno;\n        return rc;\n    }\n    return 0;\n}\n\n/**\n * Move an orphan file to orphan directory\n */\nstatic int move_orphan(const char *path)\n{\n    char dest[RBH_PATH_MAX];\n    const char *fname;\n    int rc;\n\n    /* does the trash directory exist? */\n    if (snprintf(dest, sizeof(dest), \"%s/%s\", config.root, TRASH_DIR)\n            >= RBH_PATH_MAX)\n        return -EOVERFLOW;\n\n    if ((mkdir(dest, 0750) != 0) && (errno != EEXIST)) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, TAG, \"Error creating directory %s: %s\",\n                   dest, strerror(-rc));\n        return rc;\n    }\n\n    fname = rh_basename(path);\n    if (fname == NULL || (strcmp(fname, \"/\") == 0) || EMPTY_STRING(fname)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Invalid path '%s'\", path);\n        return -EINVAL;\n    }\n    /* move the orphan to the directory */\n    if (snprintf(dest, RBH_PATH_MAX, \"%s/%s/%s\", config.root, TRASH_DIR, fname)\n            >= RBH_PATH_MAX)\n            return -EOVERFLOW;\n\n    if (rename(path, dest) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, TAG, \"Error moving '%s' to '%s'\", path, dest);\n        return rc;\n    }\n\n    DisplayLog(LVL_EVENT, TAG, \"'%s' moved to '%s'\", path, dest);\n    return 0;\n}\n\n/* check if there is a running copy and if it timed-out\n * return <0 on error\n * 0 if no copy is running\n * 1 if a copy is already running\n * */\nstatic int check_running_copy(const char *bkpath)\n{\n    int rc;\n    /* is a copy running for this entry? */\n    rc = entry_is_archiving(bkpath);\n    if (rc < 0) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Error %d checking if copy is running for %s: %s\", rc,\n                   bkpath, strerror(-rc));\n        return rc;\n    } else if (rc > 0) {\n        if (config.copy_timeout && (time(NULL) - rc > config.copy_timeout)) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"Copy timed out for %s (inactive for %us)\", bkpath,\n                       (unsigned int)(time(NULL) - rc));\n            /* previous copy timed out: clean it */\n            transfer_cleanup(bkpath);\n        } else {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"'%s' is being archived (last mod: %us ago)\",\n                       bkpath, (unsigned int)(time(NULL) - rc));\n            return 1;\n        }\n    }\n    return 0;\n}\n\n/**\n * Get entry info from the backend (like lstat), but also check if the\n * entry is compressed.\n * Prioritarily check the entry with the selected compression on/off.\n */\nstatic int bk_lstat(const char *bkpath, struct stat *bkmd,\n                    bool check_compressed, bool *compressed)\n{\n    char tmp[RBH_PATH_MAX];\n    int len = strlen(bkpath);\n\n    *compressed = !!(bkpath[len - 1] == 'z');\n\n    if (!check_compressed)  /* not a file, call standard lstat() */\n        return lstat(bkpath, bkmd);\n\n    if (!lstat(bkpath, bkmd))\n        return 0;\n\n    if ((errno == ENOENT) || (errno == ESTALE)) {\n        if (*compressed) {\n            /* try without compression */\n            strcpy(tmp, bkpath);\n            tmp[len - 1] = '\\0';\n\n            if (lstat(tmp, bkmd) == 0) {\n                *compressed = 0;\n                return 0;\n            }\n        } else if (!(*compressed)) {\n            /* try with compression */\n            sprintf(tmp, \"%sz\", bkpath);\n            if (lstat(tmp, bkmd) == 0) {\n                *compressed = true;\n                return 0;\n            }\n        }\n    }\n    return -1;\n}\n\n/** helper to set the entry status for the given SMI */\nstatic inline int set_backup_status(sm_instance_t *smi, attr_set_t *pattrs,\n                                    file_status_t st)\n{\n    return set_status_attr(smi, pattrs, backup_status2str(st));\n}\n\n/** helper to get backend path from attribute structure */\n#define BKPATH(_pattr, _smi) ((char *)SMI_INFO((_pattr), (_smi), ATTR_BK_PATH))\n\n/** helper to set backend path */\nstatic inline int set_backend_path(sm_instance_t *smi, attr_set_t *pattrs,\n                                   const char *bkpath)\n{\n    char *info = strdup(bkpath);\n    int rc;\n\n    if (info == NULL)\n        return -ENOMEM;\n\n    rc = set_sm_info(smi, pattrs, ATTR_BK_PATH, info);\n    if (rc)\n        free(info);\n\n    return rc;\n}\n\n/** helper to set last_archive */\nstatic inline int set_last_archive(sm_instance_t *smi, attr_set_t *pattrs,\n                                   time_t last_arch)\n{\n    return set_uint_info(smi, pattrs, ATTR_LAST_ARCH, (unsigned int)last_arch);\n}\n\n/** return the path to access an entry in the filesystem */\nstatic int entry_fs_path(const entry_id_t *p_id, const attr_set_t *p_attrs,\n                         char *fspath)\n{\n#ifdef _HAVE_FID\n    /* for Lustre 2, use fid path so the operation is not disturbed by\n     * renames... */\n    BuildFidPath(p_id, fspath);\n#else\n    /* we need the posix path */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath)) {\n        DisplayLog(LVL_CRIT, TAG, \"Error in %s(): path argument is \"\n                   \"mandatory for archive command\", __FUNCTION__);\n        return -EINVAL;\n    }\n    strcpy(fspath, ATTR(p_attrs, fullpath));\n#endif\n    return 0;\n}\n\n/** Called by get_status function, changelog callback, etc. */\nstatic bool backup_ignore(const entry_id_t *p_id, const attr_set_t *attrs)\n{\n#ifndef HAVE_SHOOK\n    /* ignore nothing */\n    return false;\n#else\n    /* if we don't know the full path, but the name looks like\n     * an ignored entry, ignore it */\n    if (!ATTR_MASK_TEST(attrs, fullpath) && ATTR_MASK_TEST(attrs, name)) {\n        if (!strcmp(ATTR(attrs, name), SHOOK_DIR)\n            || !strcmp(ATTR(attrs, name), lock_dirname)\n            || !strcmp(ATTR(attrs, name), restripe_dirname)\n            || !strncmp(SHOOK_LOCK_PREFIX, ATTR(attrs, name),\n                        strlen(SHOOK_LOCK_PREFIX))\n            || !strncmp(RESTRIPE_SRC_PREFIX, ATTR(attrs, name),\n                        strlen(RESTRIPE_SRC_PREFIX))\n            || !strncmp(RESTRIPE_TGT_PREFIX, ATTR(attrs, name),\n                        strlen(RESTRIPE_TGT_PREFIX))) {\n\n            /* no fullpath attr and name looks like something to ignore */\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"Special entry '%s' ignored by shook\",\n                       ATTR(attrs, name));\n\n            return true;\n        } else  /* no possible match */\n            return false;\n    }\n\n    if (ATTR_MASK_TEST(attrs, fullpath)) {\n        if (!fnmatch(\"*/\" LOCK_DIR \"/\" SHOOK_LOCK_PREFIX \"*\",\n                     ATTR(attrs, fullpath), 0)) {\n            /* lock file */\n            DisplayLog(LVL_DEBUG, TAG, \"%s is a shook lock\",\n                       ATTR(attrs, fullpath));\n            /** raise special event for the file: LOCK/UNLOCK? */\n            return true;\n        } else if (!fnmatch(\"*/\" LOCK_DIR, ATTR(attrs, fullpath), 0)) {\n            /* lock dir */\n            DisplayLog(LVL_DEBUG, TAG, \"%s is a shook lock dir\",\n                       ATTR(attrs, fullpath));\n            return true;\n        } else if (!fnmatch(\"*/\" RESTRIPE_DIR, ATTR(attrs, fullpath), 0)) {\n            /* restripe dir */\n            DisplayLog(LVL_DEBUG, TAG, \"%s is a shook restripe dir\",\n                       ATTR(attrs, fullpath));\n            return true;\n        }\n    }\n\n    /* match '.shook' directory */\n    if (attrs && ATTR_MASK_TEST(attrs, name)\n        && ATTR_MASK_TEST(attrs, type)) {\n        if (!strcmp(STR_TYPE_DIR, ATTR(attrs, type)) &&\n            !strcmp(SHOOK_DIR, ATTR(attrs, name))) {\n            /* skip the entry */\n            DisplayLog(LVL_DEBUG, TAG, \"\\\"%s\\\" is a shook dir\",\n                       ATTR(attrs, name));\n            return true;\n        }\n    }\n\n    /* if the removed entry is a restripe source,\n     * we MUST NOT remove the backend entry\n     * as it will be linked to the restripe target\n     */\n    if ((ATTR_MASK_TEST(attrs, fullpath)\n         && !fnmatch(\"*/\" RESTRIPE_DIR \"/\" RESTRIPE_SRC_PREFIX \"*\",\n                     ATTR(attrs, fullpath), 0))\n        || (ATTR_MASK_TEST(attrs, name)\n            && !strncmp(RESTRIPE_SRC_PREFIX, ATTR(attrs, name),\n                        strlen(RESTRIPE_SRC_PREFIX)))) {\n        DisplayLog(LVL_DEBUG, TAG,\n                   \"'%s' is a shook restripe source\",\n                   ATTR_MASK_TEST(attrs, fullpath) ?\n                        ATTR(attrs, fullpath) : ATTR(attrs, name));\n        return true;\n    }\n\n    return false;\n#endif\n}\n\n\n/**\n * Get the status for an entry.\n * \\param[in] p_id pointer to entry id\n * \\param[in] attrs_in pointer to entry attributes\n * \\param[out] p_attrs_changed changed/retrieved attributes\n */\nstatic int backup_status(struct sm_instance *smi,\n                         const entry_id_t *p_id, const attr_set_t *p_attrs_in,\n                         attr_set_t *p_attrs_changed)\n{\n    int rc;\n    struct stat bkmd;\n    obj_type_t entry_type;\n    char bkpath[RBH_PATH_MAX];\n    bool compressed = false;\n\n    /* check if mtime is provided (mandatory) */\n    if (!ATTR_MASK_TEST(p_attrs_in, last_mod)\n        || !ATTR_MASK_TEST(p_attrs_in, type)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Missing mandatory attribute for checking entry status\");\n        return -EINVAL;\n    }\n\n    /* path to lookup the entry in the backend */\n    entry2backend_path(smi, p_id, p_attrs_in, FOR_LOOKUP, bkpath,\n                       config.compress);\n\n    /* is the entry has a supported type? */\n    entry_type = db2type(ATTR(p_attrs_in, type));\n\n    if ((entry_type != TYPE_FILE) && (entry_type != TYPE_LINK)) {\n        DisplayLog(LVL_VERB, TAG, \"Unsupported type %s for this backend\",\n                   ATTR(p_attrs_in, type));\n        return -ENOTSUP;\n    }\n\n    /* ignore special entries */\n    if (backup_ignore(p_id, p_attrs_in))\n        return -ENOTSUP;\n\n#ifdef HAVE_SHOOK\n    /* check status from libshook.\n     * return if status != ONLINE\n     * else, continue checking.\n     */\n    char fidpath[RBH_PATH_MAX];\n    file_status_t status;\n\n    BuildFidPath(p_id, fidpath);\n\n    rc = rbh_shook_status(fidpath, &status);\n    if (rc)\n        return rc;\n\n    /* if status is 'release_pending' or 'restore_running',\n     * check timeout. */\n    if (status == STATUS_RELEASE_PENDING || status == STATUS_RESTORE_RUNNING) {\n        rc = rbh_shook_recov_by_id(p_id, &status);\n        if (rc < 0)\n            return rc;\n    }\n\n    if (status != STATUS_SYNCHRO) {\n        DisplayLog(LVL_FULL, TAG, \"shook reported status<>online: %d\", status);\n        rc = set_backup_status(smi, p_attrs_changed, status);\n        if (rc)\n            return rc;\n\n        /* set backend path if it is not known */\n        if (!ATTR_MASK_INFO_TEST(p_attrs_in, smi, ATTR_BK_PATH)\n            && !ATTR_MASK_INFO_TEST(p_attrs_changed, smi, ATTR_BK_PATH)) {\n            rc = set_backend_path(smi, p_attrs_changed, bkpath);\n            if (rc)\n                return rc;\n        }\n        return 0;\n    }\n    /* else: must compare status with backend */\n#endif\n\n    if (entry_type == TYPE_FILE) {\n        /* is a copy running for this entry? */\n        rc = check_running_copy(bkpath);\n        if (rc < 0)\n            return rc;\n        else if (rc > 0) {  /* current archive */\n            return set_backup_status(smi, p_attrs_changed,\n                                     STATUS_ARCHIVE_RUNNING);\n        }\n    }\n\n    /* get entry info */\n    if (bk_lstat(bkpath, &bkmd, entry_type == TYPE_FILE, &compressed) != 0) {\n        rc = -errno;\n        if ((rc != -ENOENT) && (rc != -ESTALE)) {\n            DisplayLog(LVL_MAJOR, TAG, \"Lookup error for path '%s': %s\",\n                       bkpath, strerror(-rc));\n            return rc;\n        } else {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"'%s' does not exist in the backend (new entry): %s\",\n                       bkpath, strerror(-rc));\n            /* no entry in the backend: new entry */\n            return set_backup_status(smi, p_attrs_changed, STATUS_NEW);\n        }\n    }\n\n    if (entry_type == TYPE_FILE) {\n        if (!S_ISREG(bkmd.st_mode)) {\n            /* entry of invalid type */\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Different type in backend for entry %s. Moving it to orphan dir.\",\n                       bkpath);\n            rc = move_orphan(bkpath);\n            if (rc)\n                return rc;\n            return set_backup_status(smi, p_attrs_changed, STATUS_NEW);\n        }\n        /* compare mtime and size to check if the entry changed */\n        /* XXX consider it modified this even if mtime is smaller */\n        if ((ATTR(p_attrs_in, last_mod) != bkmd.st_mtime)\n            || ((ATTR(p_attrs_in, size) != bkmd.st_size) && !compressed)) {\n            /* display a warning if last_mod in FS < mtime in backend */\n            if (ATTR(p_attrs_in, last_mod) < bkmd.st_mtime)\n                DisplayLog(LVL_MAJOR, TAG,\n                           \"Warning: mtime in filesystem < mtime in backend (%s)\",\n                           bkpath);\n\n            rc = set_backup_status(smi, p_attrs_changed, STATUS_MODIFIED);\n            if (rc)\n                return rc;\n\n            /* update path in the backend */\n            return set_backend_path(smi, p_attrs_changed, bkpath);\n        } else {\n            rc = set_backup_status(smi, p_attrs_changed, STATUS_SYNCHRO);\n            if (rc)\n                return rc;\n\n            /* update path in the backend */\n            return set_backend_path(smi, p_attrs_changed, bkpath);\n        }\n    } else if (entry_type == TYPE_LINK) {\n        char lnk1[RBH_PATH_MAX];\n        char lnk2[RBH_PATH_MAX];\n        char fspath[RBH_PATH_MAX];\n\n        if (!S_ISLNK(bkmd.st_mode)) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Different type in backend for entry %s. Moving it to orphan dir.\",\n                       bkpath);\n            rc = move_orphan(bkpath);\n            if (rc)\n                return rc;\n\n            return set_backup_status(smi, p_attrs_changed, STATUS_NEW);\n        }\n\n        rc = entry_fs_path(p_id, p_attrs_in, fspath);\n        if (rc)\n            return rc;\n\n        /* compare symlink contents */\n        if ((rc = readlink(bkpath, lnk1, RBH_PATH_MAX)) < 0) {\n            rc = -errno;\n            if (rc == -ENOENT) {\n                /* entry disappeared */\n                return set_backup_status(smi, p_attrs_changed, STATUS_NEW);\n            } else\n                return rc;\n        }\n        lnk1[rc] = '\\0';\n        DisplayLog(LVL_FULL, TAG, \"backend symlink => %s\", lnk1);\n        if ((rc = readlink(fspath, lnk2, RBH_PATH_MAX)) < 0) {\n            rc = -errno;\n            DisplayLog(LVL_EVENT, TAG, \"Error performing readlink(%s): %s\",\n                       fspath, strerror(-rc));\n            return rc;\n        }\n        lnk2[rc] = '\\0';\n        DisplayLog(LVL_FULL, TAG, \"FS symlink => %s\", lnk2);\n        if (strcmp(lnk1, lnk2)) {\n            /* symlink contents is different */\n            rc = set_backup_status(smi, p_attrs_changed, STATUS_MODIFIED);\n            if (rc)\n                return rc;\n\n            /* update path in the backend */\n            return set_backend_path(smi, p_attrs_changed, bkpath);\n        } else {    /* same contents */\n\n            rc = set_backup_status(smi, p_attrs_changed, STATUS_SYNCHRO);\n            if (rc)\n                return rc;\n\n            /* update path in the backend */\n            return set_backend_path(smi, p_attrs_changed, bkpath);\n        }\n    } else {\n        return -ENOTSUP;\n    }\n\n    /* TODO What about STATUS_REMOVED? */\n}\n\n/**\n * Function to determine if a deleted entry must be inserted to SOFTRM table\n */\nstatic proc_action_e backup_softrm_filter(struct sm_instance *smi,\n                                          const entry_id_t *id,\n                                          const attr_set_t *attrs)\n{\n    /** @TODO support dirs here to clean empty directories? */\n\n    if (ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)\n        && status_equal(smi, attrs, STATUS_NEW)) {\n        DisplayLog(LVL_DEBUG, TAG,\n                   \"Removing 'new' entry (\" DFID \"): no remove in backend\",\n                   PFID(id));\n        return PROC_ACT_RM_ALL;\n    }\n#ifdef HAVE_SHOOK\n    /* if the removed entry is a restripe source,\n     * we MUST NOT remove the backend entry\n     * as it will be linked to the restripe target\n     */\n    else if ((ATTR_MASK_TEST(attrs, fullpath)\n              && !fnmatch(\"*/\" RESTRIPE_DIR \"/\" RESTRIPE_SRC_PREFIX \"*\",\n                          ATTR(attrs, fullpath), 0))\n             || (ATTR_MASK_TEST(attrs, name)\n                 && !strncmp(RESTRIPE_SRC_PREFIX, ATTR(attrs, name),\n                             strlen(RESTRIPE_SRC_PREFIX)))) {\n        DisplayLog(LVL_DEBUG, TAG,\n                   \"Removing shook stripe source %s: no remove in backend!\",\n                   ATTR_MASK_TEST(attrs, fullpath) ?  ATTR(attrs, fullpath) :\n                       ATTR(attrs, name));\n        return PROC_ACT_NONE;\n    }\n#endif\n    /* If we have a doubt, always insert to softrm.\n     *  In the worst case, it's just a useless backup_rm operation.\n     */\n    return PROC_ACT_SOFTRM_ALWAYS;\n}\n\nstatic bool backup_ignore_2attrs(const entry_id_t *id,\n                                 const attr_set_t *favor_attrs,\n                                 const attr_set_t *second_attrs)\n{\n    /* favor matching of fullpath, then fresh attributes */\n    if (ATTR_MASK_TEST(favor_attrs, fullpath))\n        return backup_ignore(id, favor_attrs);\n\n    if (ATTR_MASK_TEST(second_attrs, fullpath))\n        return backup_ignore(id, second_attrs);\n\n    if (ATTR_MASK_TEST(favor_attrs, name))\n        return backup_ignore(id, favor_attrs);\n\n    if (ATTR_MASK_TEST(second_attrs, name))\n        return backup_ignore(id, second_attrs);\n\n    return false;\n}\n\nstatic int backup_cl_cb(struct sm_instance *smi, const CL_REC_TYPE *logrec,\n                        const entry_id_t *id, const attr_set_t *attrs,\n                        attr_set_t *refreshed_attrs, bool *getit,\n                        proc_action_e *rec_action)\n{\n    /* favor fresh attributes with fullpath */\n    if (backup_ignore_2attrs(id, refreshed_attrs, attrs)) {\n        *getit = false;\n\n        if ((logrec->cr_type == CL_UNLINK)\n             && (logrec->cr_flags & CLF_UNLINK_LAST)) {\n            /* special file can be cleaned from DB when deleted */\n            *rec_action = PROC_ACT_RM_ALL;\n        }\n\n        return 0;\n    }\n    /* If this is a CREATE record, we know its status is NEW\n     * (except if it is already set to another value) */\n    if (logrec->cr_type == CL_CREATE || logrec->cr_type == CL_SOFTLINK) {\n        if (!ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)) {\n            /* new file, status is new */\n            set_backup_status(smi, refreshed_attrs, STATUS_NEW);\n            /* no need to retrieve it from filesystem */\n            *getit = false;\n        }\n        /* else: file is already known. Preserve the known status. */\n    } else if ((logrec->cr_type == CL_MKDIR) || (logrec->cr_type == CL_RMDIR)) {\n        /* no status for directories */\n        *getit = false;\n    } else if (logrec->cr_type == CL_MTIME || logrec->cr_type == CL_TRUNC ||\n               logrec->cr_type == CL_CLOSE) {\n        /* If file is modified or truncated, need to check its status\n         * (probably modified) EXCEPT if its status is already 'modified' */\n        if (!ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)\n            || (!status_equal(smi, attrs, STATUS_NEW) &&\n                !status_equal(smi, attrs, STATUS_MODIFIED))) {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"Getstatus needed because this is a %s event \"\n                       \"and status is not already 'modified' or 'new': status=%s\",\n                       changelog_type2str(logrec->cr_type),\n                       ATTR_MASK_STATUS_TEST(attrs, smi->smi_index) ?\n                       STATUS_ATTR(attrs, smi->smi_index) : \"<not set>\");\n            *getit = true;\n        }\n    } else if ((logrec->cr_type == CL_UNLINK)\n               && (logrec->cr_flags & CLF_UNLINK_LAST)) {\n        *rec_action = backup_softrm_filter(smi, id, attrs);\n    }\n#ifdef HAVE_SHOOK\n    else if (logrec->cr_type == CL_XATTR) {\n        /* need to update status */\n        *getit = true;\n    } else if (logrec->cr_type == CL_CTIME || (logrec->cr_type == CL_SETATTR)) {\n        /* in Lustre v2.O, changing trusted xattr generates CTIME/SATTR event */\n        *getit = true;\n    }\n\n    /* if the old name is a restripe file, update the status */\n    if (ATTR_MASK_TEST(attrs, name)\n        && !strncmp(RESTRIPE_TGT_PREFIX, ATTR(attrs, name),\n                    strlen(RESTRIPE_TGT_PREFIX))) {\n        *getit = true;\n        DisplayLog(LVL_DEBUG, TAG,\n                   \"Getstatus needed because entry was a restripe target:\"\n                   \" '%s'\", ATTR(attrs, name));\n    }\n#endif\n\n    return 0;\n}\n\ntypedef enum { TO_FS, TO_BACKEND } target_e;\n\n/**\n * get metadata of a directory in filesystem or in backend\n * by target path\n */\nstatic int get_orig_dir_md(const char *target_dir, struct stat *st,\n                           target_e target)\n{\n    char rel_path[RBH_PATH_MAX];\n    char orig_path[RBH_PATH_MAX];\n    int rc;\n    const char *dest_root;\n    const char *src_root;\n\n    if (target == TO_BACKEND) {\n        dest_root = config.root;\n        src_root = global_config.fs_path;\n    } else {\n        dest_root = global_config.fs_path;\n        src_root = config.root;\n    }\n\n    rc = relative_path(target_dir, dest_root, rel_path);\n    if (rc)\n        return rc;\n\n    /* orig path is '<fs_root>/<rel_path>' */\n    if (snprintf(orig_path, sizeof(orig_path), \"%s/%s\", src_root, rel_path)\n            >= RBH_PATH_MAX)\n          return -EOVERFLOW;\n\n    DisplayLog(LVL_FULL, TAG, \"Target directory: %s, source directory: %s\",\n               target_dir, orig_path);\n\n    if (lstat(orig_path, st)) {\n        rc = -errno;\n        DisplayLog(LVL_DEBUG, TAG, \"Cannot stat %s: %s\",\n                   orig_path, strerror(-rc));\n        return rc;\n    } else\n        return 0;\n}\n\n/**\n *  Ensure POSIX directory exists\n */\nstatic int mkdir_recurse_clone_attrs(const char *full_path, mode_t default_mode,\n                                     target_e target)\n{\n    char path_copy[MAXPATHLEN];\n    const char *curr;\n    struct stat st;\n    mode_t mode;\n    int rc;\n    bool setattrs = false;\n\n    /* to backend or the other way? */\n    if (target == TO_BACKEND) {\n        if (strncmp(config.root, full_path, strlen(config.root)) != 0) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Error: '%s' is not under backend root '%s'\", full_path,\n                       config.root);\n            return -EINVAL;\n        }\n        /* skip backend root */\n        curr = full_path + strlen(config.root);\n    } else {\n        /* is it relative? */\n        if (!EMPTY_STRING(full_path) && (full_path[0] != '/')) {\n            curr = full_path;\n            goto relative;\n        } else\n            if (strncmp\n                (global_config.fs_path, full_path,\n                 strlen(global_config.fs_path)) != 0) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Error: '%s' is not under filesystem root '%s'\",\n                       full_path, global_config.fs_path);\n            return -EINVAL;\n        }\n        /* skip fs root */\n        curr = full_path + strlen(global_config.fs_path);\n    }\n\n    if (*curr == '\\0')  /* full_path is root dir */\n        return 0;\n    else if (*curr != '/') {    /* slash expected */\n        DisplayLog(LVL_MAJOR, TAG, \"Error: '%s' is not under backend root '%s'\",\n                   full_path,\n                   (target ==\n                    TO_BACKEND) ? config.root : global_config.fs_path);\n        return -EINVAL;\n    }\n\n    /* skip first slash */\n    curr++;\n relative:\n\n    while ((curr = strchr(curr, '/')) != NULL) {\n        /* if fullpath = '/a/b',\n         * curr = &(fullpath[2]);\n         * so, copy 2 chars to get '/a'.\n         * and set fullpath[2] = '\\0'\n         */\n        int path_len = curr - full_path;\n\n        /* extract directory name */\n        strncpy(path_copy, full_path, path_len);\n        path_copy[path_len] = '\\0';\n\n        /* stat dir */\n        if (lstat(path_copy, &st) != 0) {\n            rc = -errno;\n            if (rc != -ENOENT) {\n                DisplayLog(LVL_CRIT, TAG, \"Cannot lstat() '%s': %s\", path_copy,\n                           strerror(-rc));\n                return rc;\n            }\n\n            if (get_orig_dir_md(path_copy, &st, target) == 0) {\n                mode = st.st_mode & 07777;\n                setattrs = true;\n            } else {\n                mode = default_mode;\n                setattrs = false;\n            }\n\n            DisplayLog(LVL_FULL, TAG, \"mkdir(%s)\", path_copy);\n            if ((mkdir(path_copy, mode) != 0) && (errno != EEXIST)) {\n                rc = -errno;\n                DisplayLog(LVL_CRIT, TAG, \"mkdir(%s) failed: %s\",\n                           path_copy, strerror(-rc));\n                return rc;\n            }\n\n            if (setattrs) {\n                /* set owner and group */\n                if (lchown(path_copy, st.st_uid, st.st_gid))\n                    DisplayLog(LVL_MAJOR, TAG,\n                               \"Error setting owner/group for '%s': %s\",\n                               path_copy, strerror(errno));\n                /* mode is set by mkdir */\n            }\n        } else if (!S_ISDIR(st.st_mode)) {\n            DisplayLog(LVL_CRIT, TAG,\n                       \"Cannot create directory '%s': existing non-directory\",\n                       path_copy);\n            return -ENOTDIR;\n        }\n\n        curr++;\n    }\n\n    if (get_orig_dir_md(full_path, &st, target) == 0) {\n        mode = st.st_mode & 07777;\n        setattrs = true;\n    } else {\n        mode = default_mode;\n        setattrs = false;\n    }\n\n    /* finally create this dir */\n    DisplayLog(LVL_FULL, TAG, \"mkdir(%s)\", full_path);\n    if ((mkdir(full_path, mode) != 0) && (errno != EEXIST)) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, TAG, \"mkdir(%s) failed: %s\", full_path,\n                   strerror(-rc));\n        return rc;\n    } else if (setattrs) {\n        /* set owner and group */\n        if (lchown(full_path, st.st_uid, st.st_gid))\n            DisplayLog(LVL_MAJOR, TAG, \"Error setting owner/group for '%s': %s\",\n                       full_path, strerror(errno));\n        /* mode is set by mkdir (FIXME but can be cleared by chown) */\n    }\n\n    return 0;\n}\n\n/**\n * Create the parent of the given entry.\n * if target = FS, return the parent fid.\n */\nstatic int create_parent(const char *child_path, target_e target,\n                         entry_id_t *p_parent_id)\n{\n    char tmp[RBH_PATH_MAX];\n    char *destdir;\n    int rc;\n\n    /* extract parnet dir path */\n    strcpy(tmp, child_path);\n    destdir = dirname(tmp);\n    if (destdir == NULL) {\n        DisplayLog(LVL_CRIT, TAG, \"Error extracting directory path of '%s'\",\n                   child_path);\n        return -EINVAL;\n    }\n\n    /* create the directory */\n    rc = mkdir_recurse_clone_attrs(destdir, 0750, target);\n    if (rc)\n        return rc;\n\n    if (target == TO_FS && p_parent_id != NULL) {\n        /* retrieve parent fid */\n        rc = path2id(destdir, p_parent_id, NULL);\n    }\n\n    return rc;\n}\n\n/** create the parent directory (target = filesystem) */\nstatic inline int create_parent_in_fs(const char *child_path,\n                                      entry_id_t *p_parent_id)\n{\n    return create_parent(child_path, TO_FS, p_parent_id);\n}\n\n/** create the parent directory (in backend storage) */\nstatic int create_parent_in_backend(const char *child_path)\n{\n    return create_parent(child_path, TO_BACKEND, NULL);\n}\n\n/** get entry information before performing an archive operation */\nstatic int copy_action_precheck(sm_instance_t *smi, const entry_id_t *p_id,\n                                attr_set_t *p_attrs, char *bkpath,\n                                obj_type_t *entry_type, bool *bk_moved)\n{\n    int rc;\n    struct stat void_stat;\n\n    /* if status is not determined, retrieve it */\n    if (!ATTR_MASK_STATUS_TEST(p_attrs, smi->smi_index)) {\n        DisplayLog(LVL_DEBUG, TAG, \"%s not provided to perform pre-copy checks\",\n                   smi->db_field);\n        rc = backup_status(smi, p_id, p_attrs, p_attrs);\n        if (rc)\n            return rc;\n    }\n\n    if (!ATTR_MASK_TEST(p_attrs, type)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Missing mandatory attribute 'type' to perform pre-copy checks\");\n        return -EINVAL;\n    }\n\n    /* is it the good type? */\n    *entry_type = db2type(ATTR(p_attrs, type));\n    if ((*entry_type != TYPE_FILE) && (*entry_type != TYPE_LINK)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unsupported type for copy operation: %s\",\n                   ATTR(p_attrs, type));\n        return -ENOTSUP;\n    }\n\n    /* compute path for target file */\n    entry2backend_path(smi, p_id, p_attrs, FOR_NEW_COPY, bkpath,\n                       config.compress);\n\n    /* check the status */\n    if (status_equal(smi, p_attrs, STATUS_NEW)) {\n        /* check the entry does not already exist */\n        errno = 0;\n        if ((lstat(bkpath, &void_stat) == 0) || (errno != ENOENT)) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"ERROR: new entry %s already exists in the backend?! errno=%d, %s\",\n                       bkpath, -rc, strerror(-rc));\n            return rc;\n        }\n    } else if (status_equal(smi, p_attrs, STATUS_MODIFIED)\n               || status_equal(smi, p_attrs, STATUS_ARCHIVE_RUNNING)) {\n        /* check if somebody else is about to copy */\n        rc = check_running_copy(bkpath);\n        if (rc < 0)\n            return rc;\n        else if (rc > 0)    /* current archive */\n            return -EALREADY;\n\n        /* check that previous path exists */\n        if (ATTR_MASK_INFO_TEST(p_attrs, smi, ATTR_BK_PATH)) {\n            char *bp = BKPATH(p_attrs, smi);\n\n            /* need to check if the entry was renamed */\n            *bk_moved = true;\n            if (lstat(bp, &void_stat) != 0) {\n                rc = -errno;\n                DisplayLog(LVL_MAJOR, TAG,\n                           \"Warning: previous copy %s not found in the backend (errno=%d, %s): \"\n                           \"entry will be archived again as %s.\", bp, -rc,\n                           strerror(-rc), bkpath);\n            }\n        }\n    } else {    /* invalid status */\n\n        /* invalid status for performing archive() */\n        DisplayLog(LVL_MAJOR, TAG, \"Unexpected status '%s' in %s()\",\n                   STATUS_ATTR(p_attrs, smi->smi_index), __FUNCTION__);\n        return -EINVAL;\n    }\n    return 0;\n}\n\n/** clone a symlink from the filesystem to the archive */\nstatic int backup_symlink(sm_instance_t *smi, attr_set_t *p_attrs,\n                          const char *src, const char *dst)\n{\n    int rc;\n    struct stat info;\n    char link[RBH_PATH_MAX] = \"\";\n\n    /* read link content from filesystem */\n    if (readlink(src, link, RBH_PATH_MAX) < 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, TAG, \"Error reading symlink contents (%s): %s\",\n                   src, strerror(-rc));\n        return rc;\n    }\n    /* link contents is not supposed to change during its lifetime */\n    if (symlink(link, dst) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Error creating symlink %s->\\\"%s\\\" in backend: %s\", dst,\n                   link, strerror(-rc));\n        /* keep the same status */\n        return rc;\n    }\n\n    set_backup_status(smi, p_attrs, STATUS_SYNCHRO);\n    set_backend_path(smi, p_attrs, dst);\n    set_last_archive(smi, p_attrs, time(NULL));\n\n    /* set symlink owner/group (ignore error, as link contents is saved) */\n    if (lstat(src, &info) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_EVENT, TAG, \"Error performing lstat(%s): %s\",\n                   src, strerror(-rc));\n        /* there is something wrong: set the status to unknown */\n        set_backup_status(smi, p_attrs, STATUS_UNKNOWN);\n    } else if (lchown(dst, info.st_uid, info.st_gid)) {\n        rc = -errno;\n        DisplayLog(LVL_EVENT, TAG,\n                   \"error setting owner/group in backend on %s: %s\", dst,\n                   strerror(-rc));\n    }\n    return 0;\n}\n\nstruct attr_save {\n    attr_mask_t attr_mask;\n    char *attr_path;\n};\n\n/**\n * Replace path attribute with target in case of copyback, and save previous\n * value in attr_save.\n */\nstatic void path_replace(struct attr_save *save, attr_set_t *p_attrs,\n                         const char *path)\n{\n    save->attr_mask = null_mask;\n    save->attr_mask.std = p_attrs->attr_mask.std & ATTR_MASK_fullpath;\n\n    save->attr_path = ATTR_MASK_TEST(p_attrs, fullpath) ?\n        strdup(ATTR(p_attrs, fullpath)) : NULL;\n\n    ATTR_MASK_SET(p_attrs, fullpath);\n    rh_strncpy(ATTR(p_attrs, fullpath), path, sizeof(ATTR(p_attrs, fullpath)));\n}\n\n/**\n * Restore path and backend path attributes, free allocated fields in\n * attr_save.\n */\nstatic void path_restore(struct attr_save *save, attr_set_t *p_attrs)\n{\n    /* restore initial values */\n    p_attrs->attr_mask.std &= ~ATTR_MASK_fullpath;\n    p_attrs->attr_mask = attr_mask_or(&p_attrs->attr_mask, &save->attr_mask);\n    if (save->attr_path != NULL) {\n        strcpy(ATTR(p_attrs, fullpath), save->attr_path);\n        free(save->attr_path);\n    }\n}\n\n/** wrap the copy action for a file */\nstatic int wrap_file_copy(sm_instance_t *smi,\n                          const entry_id_t *p_id, attr_set_t *p_attrs,\n                          const char *srcpath, const char *bkpath,\n                          bool bk_moved, const action_params_t *params,\n                          const policy_action_t *action, post_action_e *after,\n                          db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    char *tmp = NULL;\n    int rc;\n    struct stat info;\n    struct attr_save sav = ATTR_SET_INIT;\n    action_params_t tmp_params = { 0 };\n\n    /* build tmp copy path */\n    asprintf(&tmp, \"%s.%s\", bkpath, COPY_EXT);\n    if (!tmp)\n        return -ENOMEM;\n\n#ifdef HAVE_SHOOK\n    rc = shook_archive_start(get_fsname(), p_id, bkpath);\n    if (rc) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Failed to initialize transfer: shook_archive_start() returned error %d\",\n                   rc);\n        goto err_out;\n    }\n#endif\n\n    /* Actions expect to get a source path in 'fullpath' and targetpath in\n     * 'targetpath' parameter.\n     * So, build a fake attribute and new parameter set with these values.\n     */\n    rc = rbh_params_copy(&tmp_params, params);\n    if (rc)\n        goto err_out;\n\n    if (config.compress) {\n        if (rbh_param_set(&tmp_params, \"compress\", \"1\", false) != 0) {\n            DisplayLog(LVL_CRIT, TAG,\n                       \"ERROR: failed to set action param 'compress'\");\n            return -EFAULT;\n        }\n    }\n\n    rbh_param_set(&tmp_params, TARGET_PATH_PARAM, tmp, true);\n    path_replace(&sav, p_attrs, srcpath);\n\n    rc = action_helper(action, \"copy\", p_id, p_attrs, &tmp_params,\n                       smi, NULL, after, db_cb_fn, db_cb_arg);\n\n    /* restore real entry attributes */\n    path_restore(&sav, p_attrs);\n\n    if (rc) {\n#ifdef HAVE_SHOOK\n        shook_archive_abort(get_fsname(), p_id);\n#endif\n        /* cleanup tmp copy */\n        unlink(tmp);\n        /* the transfer failed. entry still needs to be archived */\n        set_backup_status(smi, p_attrs, STATUS_MODIFIED);\n        goto free_params;\n    }\n\n    /* finalize the transfer */\n\n    /* owner/group is supposed to be saved by the copy command */\n\n    /* set same mtime as the source file (initial value) */\n    if (ATTR_MASK_TEST(p_attrs, last_mod)) {\n        struct utimbuf tbuf;\n        tbuf.actime = time(NULL);\n        tbuf.modtime = ATTR(p_attrs, last_mod);\n\n        if (utime(tmp, &tbuf) != 0) {\n            rc = -errno;\n            DisplayLog(LVL_CRIT, TAG, \"Error setting mtime for file %s: %s\",\n                       tmp, strerror(-rc));\n            /* ignore the error */\n            rc = 0;\n        }\n    }\n\n    /* move the entry to final path */\n    if (rename(tmp, bkpath) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Error renaming tmp copy file '%s' to final name '%s': %s\",\n                   tmp, bkpath, strerror(-rc));\n\n        /* the transfer failed. entry still needs to be archived */\n        set_backup_status(smi, p_attrs, STATUS_MODIFIED);\n        goto free_params;\n    }\n\n    /* has the file been renamed since last copy? */\n    if (bk_moved) {\n        char *bp = ATTR_MASK_INFO_TEST(p_attrs, smi, ATTR_BK_PATH) ?\n            BKPATH(p_attrs, smi) : NULL;\n\n        /* bp is not supposed to be NULL when bk_moved is true */\n        assert(bp != NULL);\n\n        /* check if the backend path is different */\n        if (strcmp(bkpath, bp)) {\n            DisplayLog(LVL_DEBUG, TAG, \"Removing previous copy %s\", bp);\n            if (unlink(bp)) {\n                rc = -errno;\n                DisplayLog(LVL_DEBUG, TAG,\n                           \"Error removing previous copy %s: %s\", bp,\n                           strerror(-rc));\n                /* ignore */\n                rc = 0;\n            }\n        }\n    }\n#ifdef HAVE_SHOOK\n    rc = shook_archive_finalize(get_fsname(), p_id, bkpath);\n    if (rc) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Failed to finalize transfer: shook_archive_finalize() returned error %d\",\n                   rc);\n        return rc;\n    }\n#endif\n\n    set_backup_status(smi, p_attrs, STATUS_SYNCHRO);\n    set_backend_path(smi, p_attrs, bkpath);\n    set_last_archive(smi, p_attrs, time(NULL));\n\n    /* get and check attributes after the transfer */\n    if (lstat(srcpath, &info) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_EVENT, TAG, \"Error performing final lstat(%s): %s\",\n                   srcpath, strerror(-rc));\n        set_backup_status(smi, p_attrs, STATUS_UNKNOWN);\n        return rc;\n    }\n\n    /* check final size/mtime */\n    if ((info.st_mtime != ATTR(p_attrs, last_mod))\n        || (info.st_size != ATTR(p_attrs, size))) {\n        DisplayLog(LVL_EVENT, TAG,\n                   \"Entry %s has been modified during transfer: \"\n                   \"size before/after: %\" PRI_SZ \"/%\" PRI_SZ \", \"\n                   \"mtime before/after: %u/%\" PRI_TT, srcpath, ATTR(p_attrs,\n                                                                    size),\n                   info.st_size, ATTR(p_attrs, last_mod), info.st_mtime);\n        set_backup_status(smi, p_attrs, STATUS_MODIFIED);\n    }\n\n    /* update entry attributes */\n    stat2rbh_attrs(&info, p_attrs, true);\n    rc = 0;\n\n free_params:\n    rbh_params_free(&tmp_params);\n err_out:\n    free(tmp);\n    return rc;\n}\n\n/** check this is a supported action */\nstatic bool backup_check_action_name(const char *name)\n{\n    if (strcasecmp(name, \"archive\") &&\n#ifdef HAVE_SHOOK\n        strcasecmp(name, \"release\") &&\n#endif\n        /* special values for deleted entries (for backup_remove) */\n        strcasecmp(name, \"removed\") && strcasecmp(name, \"deleted\"))\n        return false;\n\n    return true;\n}\n\n/** executor for copy actions */\nstatic int copy_executor(sm_instance_t *smi, const policy_action_t *action,\n                         const entry_id_t *p_id, attr_set_t *p_attrs,\n                         const action_params_t *params, post_action_e *after,\n                         db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n    char bkpath[RBH_PATH_MAX];\n    char fspath[RBH_PATH_MAX];\n    obj_type_t entry_type;\n    bool bk_moved = false;\n\n    /* check mandatory attributes, entry type and status */\n    rc = copy_action_precheck(smi, p_id, p_attrs, bkpath, &entry_type,\n                              &bk_moved);\n    if (rc)\n        return rc;\n\n    /* build path in filesystem to access the entry */\n    rc = entry_fs_path(p_id, p_attrs, fspath);\n    if (rc)\n        return rc;\n\n    rc = create_parent_in_backend(bkpath);\n    if (rc)\n        return rc;\n\n    /* set default for 'after', so it can be overriden in copy action */\n    *after = PA_UPDATE;\n\n    /** @TODO if compression is enabled, append 'compress' hint */\n\n    /* run the copy action */\n    if (entry_type == TYPE_FILE)\n        rc = wrap_file_copy(smi, p_id, p_attrs, fspath, bkpath, bk_moved,\n                            params, action, after, db_cb_fn, db_cb_arg);\n    else if (entry_type == TYPE_LINK)\n        rc = backup_symlink(smi, p_attrs, fspath, bkpath);\n    else\n        rc = -ENOTSUP;\n\n    return rc;\n}\n\nstatic int remove_executor(sm_instance_t *smi, const policy_action_t *action,\n                           const entry_id_t *p_id, attr_set_t *p_attrs,\n                           const action_params_t *params,\n                           post_action_e *after, db_cb_func_t db_cb_fn,\n                           void *db_cb_arg)\n{\n    const char *backend_path;\n    struct attr_save sav = ATTR_SET_INIT;\n    char bkpath[RBH_PATH_MAX];\n    int rc;\n\n    if (ATTR_MASK_INFO_TEST(p_attrs, smi, ATTR_BK_PATH))\n        backend_path = BKPATH(p_attrs, smi);\n    else {\n        /* if there is no backend path, try to guess */\n        int lvl_log;\n\n        if (ATTR_MASK_TEST(p_attrs, type)\n            && !strcasecmp(ATTR(p_attrs, type), STR_TYPE_FILE))\n            lvl_log = LVL_EVENT;\n        else\n            lvl_log = LVL_VERB;\n\n        entry2backend_path(smi, p_id, p_attrs, FOR_LOOKUP, bkpath,\n                           config.compress);\n        DisplayLog(lvl_log, TAG,\n                   \"No backend path is set for \" DFID\n                   \", guess it could be '%s'\", PFID(p_id), bkpath);\n        backend_path = bkpath;\n    }\n\n    /* replace the path argument by backend_path */\n    path_replace(&sav, p_attrs, backend_path);\n\n    rc = action_helper(action, \"remove\", p_id, p_attrs, params, smi, NULL,\n                       after, db_cb_fn, db_cb_arg);\n\n    /* restore real entry attributes */\n    path_restore(&sav, p_attrs);\n\n    return rc;\n}\n\n/** executor for release actions */\nstatic int release_executor(sm_instance_t *smi, const policy_action_t *action,\n                            const entry_id_t *p_id, attr_set_t *p_attrs,\n                            const action_params_t *params, post_action_e *after,\n                            db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n\n    /* make sure the entry has a backend path */\n    if (!ATTR_MASK_INFO_TEST(p_attrs, smi, ATTR_BK_PATH)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Can't release a file that has no path in\"\n                   \" backend\");\n        return -EINVAL;\n    }\n    if (!ATTR_MASK_TEST(p_attrs, type)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Missing mandatory attribute 'type' in %s()\",\n                   __func__);\n        return -EINVAL;\n    }\n    if (strcmp(ATTR(p_attrs, type), STR_TYPE_FILE) != 0) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unsupported type for release operation: %s\",\n                   ATTR(p_attrs, type));\n        return -ENOTSUP;\n    }\n\n    /* set default for 'after', so it can be overriden in release action */\n    *after = PA_UPDATE;\n\n    rc = action_helper(action, \"release\", p_id, p_attrs, params,\n                       smi, NULL, after, db_cb_fn, db_cb_arg);\n\n    if (rc)\n        return rc;\n\n    return set_backup_status(smi, p_attrs, STATUS_RELEASED);\n}\n\n/** Wrap command execution */\nstatic int backup_common_executor(sm_instance_t *smi, const char *implements,\n                                  const policy_action_t *action,\n                                  const entry_id_t *p_id, attr_set_t *p_attrs,\n                                  const action_params_t *params,\n                                  post_action_e *after, db_cb_func_t db_cb_fn,\n                                  void *db_cb_arg)\n{\n\n    /** @TODO support execution of hsm_remove actions */\n    if (!strcmp(implements, \"archive\")) {\n        return copy_executor(smi, action, p_id, p_attrs, params, after,\n                             db_cb_fn, db_cb_arg);\n    } else if (!strcmp(implements, \"removed\")\n               || !strcmp(implements, \"deleted\")) {\n        return remove_executor(smi, action, p_id, p_attrs, params, after,\n                               db_cb_fn, db_cb_arg);\n\n    } else if (!strcmp(implements, \"release\")) {\n        return release_executor(smi, action, p_id, p_attrs, params, after,\n                               db_cb_fn, db_cb_arg);\n    } else {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Operation not supported by status manager %s: '%s'\",\n                   smi->sm->name, implements);\n        return -ENOTSUP;\n    }\n}\n\n#define IS_ZIP_NAME(_n) (_n[strlen(_n) - 1] == 'z')\n\n/* Rebind a backend entry to a new file in Lustre (with new fid)\n * Notice: fs_path is not necessarily the current path of new_id\n * but it should be moved to this path at the end.\n */\nstatic int backup_rebind(sm_instance_t *smi, const char *fs_path,\n                         const char *old_bk_path, char *new_bk_path,\n                         const entry_id_t *new_id)\n{\n    int rc;\n    attr_set_t attrs_new;\n    struct stat st;\n    char tmp[RBH_PATH_MAX];\n    char fidpath[RBH_PATH_MAX];\n    char *destdir;\n    bool compressed = IS_ZIP_NAME(old_bk_path);\n    bool retry = false;\n\n    BuildFidPath(new_id, fidpath);\n\n    if (lstat(fidpath, &st)) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR: lstat() failed on target \" DFID \": %s\", PFID(new_id),\n                   strerror(-rc));\n        return rc;\n    }\n\n    if (!S_ISREG(st.st_mode)) {\n        DisplayLog(LVL_MAJOR, TAG, \"%s() is only supported for files\",\n                   __func__);\n        return -ENOTSUP;\n    }\n\n    /* build attr structure to pass to entry2backend_path() */\n    ATTR_MASK_INIT(&attrs_new);\n    stat2rbh_attrs(&st, &attrs_new, true);\n    strcpy(ATTR(&attrs_new, fullpath), fs_path);\n    ATTR_MASK_SET(&attrs_new, fullpath);\n\n    /* Build new path in backend. */\n    /* Ensure the target name is not compressed if the source was not. */\n    entry2backend_path(smi, new_id, &attrs_new, FOR_NEW_COPY, new_bk_path,\n                       compressed);\n    /* set compression name if the previous entry was compressed */\n    if (compressed && !IS_ZIP_NAME(new_bk_path))\n        strcat(new_bk_path, \"z\");\n\n    /* -- move entry from old bk path to the new location -- */\n\n    /* recursively create the parent directory */\n    /* extract dir path */\n    strcpy(tmp, new_bk_path);\n    destdir = dirname(tmp);\n    if (destdir == NULL) {\n        DisplayLog(LVL_CRIT, TAG, \"Error extracting directory path of '%s'\",\n                   new_bk_path);\n        return -EINVAL;\n    }\n\n    rc = mkdir_recurse_clone_attrs(destdir, 0750, TO_BACKEND);\n    if (rc)\n        return rc;\n\n    do {\n        /* rename the entry in backend */\n        DisplayLog(LVL_DEBUG, TAG, \"Moving entry in the backend: '%s'->'%s'\",\n                   old_bk_path, new_bk_path);\n        if (rename(old_bk_path, new_bk_path)) {\n            rc = -errno;\n\n            /* only retry once if error is EXDEV */\n            if (!retry && rc == -EXDEV) {\n                const char *fname;\n\n                DisplayLog(LVL_MAJOR, TAG,\n                           \"Could not move entry in the backend \"\n                           \"because target path is in different device (error EXDEV): '%s'->'%s'\",\n                           old_bk_path, new_bk_path);\n\n                /* try to move file from one backend fileset to another:\n                 * in this case, just change filename within the same directory\n                 */\n                /* 1-extract current dirname in backend */\n                strcpy(tmp, old_bk_path);\n                destdir = dirname(tmp);\n                /* 2-extract new filename */\n                fname = rh_basename(fs_path);\n                /* 3-build the new backend path */\n#ifdef _HAVE_FID\n                sprintf(new_bk_path, \"%s/%s__\" DFID_NOBRACE, destdir, fname,\n                        PFID(new_id));\n#else\n                sprintf(new_bk_path, \"%s/%s__%#LX:%#LX\", destdir, fname,\n                        (unsigned long long)new_id->device,\n                        (unsigned long long)new_id->inode);\n#endif\n                retry = true;\n\n                DisplayLog(LVL_MAJOR, TAG, \"Trying to rename to '%s' instead\",\n                           new_bk_path);\n                continue;\n            } else {\n                DisplayLog(LVL_MAJOR, TAG,\n                           \"Could not move entry in the backend ('%s'->'%s'): %s\",\n                           old_bk_path, new_bk_path, strerror(-rc));\n                /* keep the old path */\n                strcpy(new_bk_path, old_bk_path);\n                return rc;\n            }\n        }\n        /* rename succeeded */\n        retry = false;\n    } while (retry);\n\n#ifdef HAVE_SHOOK\n    /* save new backendpath to filesystem */\n    /* XXX for now, don't manage several hsm_index */\n    rc = shook_set_hsm_info(fidpath, new_bk_path, 0);\n    if (rc)\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Could not set backend path for \" DFID \": error %d\",\n                   PFID(new_id), rc);\n#endif\n\n    return rc;\n}\n\n/** recover a directory */\nstatic recov_status_t recov_dir(const char *backend_path, const char *fspath,\n                                const attr_set_t *attrs, bool *set_mode)\n{\n    mode_t mode_create = 0750 /* default */ ;\n    int rc;\n\n    /* overwrite default if a mode is specified */\n    if (ATTR_MASK_TEST(attrs, mode))\n        mode_create = ATTR(attrs, mode);\n\n    rc = mkdir(fspath, mode_create) ? errno : 0;\n\n    if (rc != 0 && rc != EEXIST) {\n        DisplayLog(LVL_CRIT, TAG, \"ERROR: cannot create directory '%s': %s\",\n                   fspath, strerror(rc));\n        return RS_ERROR;\n    } else if (rc == EEXIST)\n        /* must set the mode */\n        *set_mode = true;\n\n    return RS_NON_FILE;\n}\n\n/** recover a symlink */\nstatic recov_status_t recov_symlink(const char *backend_path,\n                                    const char *fspath,\n                                    const attr_set_t *attrs, bool *set_mode)\n{\n    int rc;\n    char link[RBH_PATH_MAX] = \"\";\n\n    /* restore from DB */\n    if (ATTR_MASK_TEST(attrs, link))\n        rh_strncpy(link, ATTR(attrs, link), sizeof(link));\n    else {  /* restore from FS */\n\n        /* read link contents from backend */\n        rc = readlink(backend_path, link, sizeof(link));\n        if (rc < 0) {\n            rc = errno;\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Error reading symlink contents (%s): %s\", backend_path,\n                       strerror(rc));\n            if (rc == ENOENT)\n                return RS_NOBACKUP;\n            else\n                return RS_ERROR;\n        }\n        /* safety */\n        if (rc < RBH_PATH_MAX)\n            link[rc] = '\\0';\n        else\n            link[RBH_PATH_MAX - 1] = '\\0';\n    }\n\n    if (symlink(link, fspath) != 0) {\n        rc = errno;\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Error creating symlink %s->\\\"%s\\\" in filesystem: %s\",\n                   fspath, link, strerror(rc));\n        return RS_ERROR;\n    }\n\n    return RS_NON_FILE;\n}\n\n/** recover a regular file */\nstatic recov_status_t recov_file(sm_instance_t *smi, const entry_id_t *p_id,\n                                 const char *backend_path, const char *fspath,\n                                 attr_set_t *attrs, bool *set_mode,\n                                 bool *compressed, bool *stat_done,\n                                 struct stat *bk_stat, bool *no_copy)\n{\n    struct utimbuf utb;\n    struct stat st_dest;\n    mode_t mode_create;\n    int rc;\n\n    /* test if this copy exists */\n    if (!*stat_done) {\n        if (bk_lstat(backend_path, bk_stat, 1, compressed) != 0) {\n            rc = errno;\n            if (rc != ENOENT) {\n                DisplayLog(LVL_MAJOR, TAG, \"Cannot stat '%s' in backend: %s\",\n                           backend_path, strerror(rc));\n                return RS_ERROR;\n            }\n        } else\n            *stat_done = true;\n    }\n\n    if (!*stat_done) {\n        /* if no stat done and file != 0 => no backup */\n        if (!ATTR_MASK_TEST(attrs, size) || ATTR(attrs, size) != 0) {\n            DisplayLog(LVL_MAJOR, TAG, \"%s has no backup copy (%s not found)\",\n                       fspath, backend_path);\n            return RS_NOBACKUP;\n        } else\n            *no_copy = true;\n    }\n\n    if (!*no_copy) {    /* only if there is a copy in the backend */\n        attr_set_t attr_bk;\n\n        if (!S_ISREG(bk_stat->st_mode)) {\n            DisplayLog(LVL_CRIT, TAG,\n                       \"ERROR: recovering file from non-file object %s\",\n                       backend_path);\n            return RS_ERROR;\n        }\n\n        ATTR_MASK_INIT(&attr_bk);\n        /* merge missing posix attrs to p_attrs_old */\n        stat2rbh_attrs(bk_stat, &attr_bk, true);\n        /* leave attrs unchanged if they are already set in p_attrs_old */\n        ListMgr_MergeAttrSets(attrs, &attr_bk, false);\n    }\n\n    /* test if the target does not already exist */\n    rc = lstat(fspath, &st_dest) ? errno : 0;\n    if (rc == 0) {\n        DisplayLog(LVL_MAJOR, TAG, \"Error: cannot recover '%s': already exists\",\n                   fspath);\n        return RS_ERROR;\n    } else if (rc != ENOENT) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unexpected error performing lstat(%s): %s\",\n                   fspath, strerror(rc));\n        return RS_ERROR;\n    }\n\n    /* Check that this is not a cross-device import or recovery\n     * (entry could not be moved in that case) */\n    if (!*no_copy && config.check_mounted && (backend_dev != bk_stat->st_dev)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Source file %s is not in the same device as target %s\",\n                   backend_path, config.root);\n        return RS_ERROR;\n    }\n\n    if (ATTR_MASK_TEST(attrs, mode))\n        mode_create = ATTR(attrs, mode);\n    else if (!*no_copy)\n        mode_create = bk_stat->st_mode;\n    else\n        mode_create = 0640; /* default */\n\n#ifdef _LUSTRE\n    /* restripe the file in Lustre */\n    if (ATTR_MASK_TEST(attrs, stripe_info)) {\n        CreateStriped(fspath, &ATTR(attrs, stripe_info), false);\n        *set_mode = true;\n    } else {\n#endif\n        int fd = creat(fspath, mode_create & 07777);\n\n        if (fd < 0) {\n            rc = errno;\n            DisplayLog(LVL_CRIT, TAG, \"ERROR: couldn't create '%s': %s\",\n                       fspath, strerror(rc));\n            return RS_ERROR;\n        }\n        close(fd);\n#ifdef _LUSTRE\n    }\n#endif\n\n    if (!*no_copy) {\n        struct attr_save sav = ATTR_SET_INIT;\n        action_params_t recov_params = { 0 };\n        post_action_e dummy_after;\n        size_t old_size = -1LL;\n\n        /* In any case, set 'copyback' param. */\n        if (rbh_param_set(&recov_params, \"copyback\", \"1\", false)) {\n            DisplayLog(LVL_CRIT, TAG,\n                       \"ERROR: failed to set action param 'copyback'\");\n            return RS_ERROR;\n        }\n        /* If compression is enabled, append 'compress' param.  */\n        if (*compressed) {\n            if (rbh_param_set(&recov_params, \"compress\", \"1\", false) != 0) {\n                DisplayLog(LVL_CRIT, TAG,\n                           \"ERROR: failed to set action param 'compress'\");\n                return RS_ERROR;\n            }\n        } else {\n            /* restore the size as in the backend (except if compressed) */\n            old_size = ATTR(attrs, size);\n            ATTR(attrs, size) = bk_stat->st_size;\n        }\n\n        /* fspath may be a pointer to attrs, so make sure we set the right\n         * path in TARGET_PATH before 'path_replace' modifies it. */\n        if (rbh_param_set(&recov_params, TARGET_PATH_PARAM, fspath, false)) {\n            DisplayLog(LVL_CRIT, TAG, \"ERROR: failed to set action param '%s'\",\n                       TARGET_PATH_PARAM);\n            return RS_ERROR;\n        }\n\n\n        /* actions expect to get a source path in 'fullpath' and targetpath\n         * in 'targetpath' parameter.\n         * So, build a fake attribute and new parameter set with these values */\n        path_replace(&sav, attrs, backend_path);\n\n        /* perform the data copy (if needed) */\n        rc = action_helper(&config.recovery_action, \"recover\", p_id, attrs,\n                           &recov_params, smi, NULL, &dummy_after, NULL, NULL);\n\n        /* restore old size in structure */\n        if (old_size != -1LL)\n            ATTR(attrs, size) = old_size;\n\n        /* restore real entry attributes */\n        path_restore(&sav, attrs);\n\n        if (rc)\n            return RS_ERROR;\n\n        utb.actime = bk_stat->st_atime;\n        utb.modtime = bk_stat->st_mtime;\n\n        /* set the same mtime as in the backend */\n        DisplayLog(LVL_FULL, TAG,\n                   \"Restoring times from backend for '%s': atime=%lu, mtime=%lu\",\n                   fspath, utb.actime, utb.modtime);\n        if (utime(fspath, &utb))\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Warning: couldn't restore times for '%s': %s\", fspath,\n                       strerror(errno));\n    } else if (ATTR_MASK_TEST(attrs, last_mod)) {\n        utb.modtime = ATTR(attrs, last_mod);\n\n        if (ATTR_MASK_TEST(attrs, last_access))\n            utb.actime = ATTR(attrs, last_access);\n        else\n            utb.actime = utb.modtime;\n\n        /* set the same mtime as in the DB */\n        DisplayLog(LVL_FULL, TAG,\n                   \"Restoring times from DB for '%s': atime=%lu, mtime=%lu\",\n                   fspath, utb.actime, utb.modtime);\n        if (utime(fspath, &utb))\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Warning: couldn't restore times for '%s': %s\", fspath,\n                       strerror(errno));\n    }\n\n    return *no_copy ? RS_FILE_EMPTY : RS_FILE_OK;\n}\n\n#define VALID_FULLPATH(_attrs) (ATTR_MASK_TEST(_attrs, fullpath) && \\\n                                !EMPTY_STRING(ATTR(_attrs, fullpath)))\n\n/** recover a file from the backend (after rm, or a disaster...)\n * \\return recovery status\n */\nstatic recov_status_t backup_recover(struct sm_instance *smi,\n                                     const entry_id_t *p_old_id,\n                                     const attr_set_t *p_attrs_old_in,\n                                     entry_id_t *p_new_id,\n                                     attr_set_t *p_attrs_new,\n                                     bool already_recovered)\n{\n    char bkpath[RBH_PATH_MAX] = \"\";\n    char link[RBH_PATH_MAX] = \"\";\n    const char *backend_path = NULL;\n    const char *fspath;\n    int rc;\n    struct stat st_bk;\n    struct stat st_dest;\n    recov_status_t success_status;\n    entry_id_t parent_id;\n    mode_t mode_create = 0;\n    bool set_mode = false;\n    bool stat_done = false;\n    bool no_copy = false;\n    bool compressed = false;\n    char *name;\n    attr_set_t attrs_old = ATTR_SET_INIT;\n\n    /* build writable copy of input attrs. */\n    ListMgr_MergeAttrSets(&attrs_old, p_attrs_old_in, true);\n\n    /* if the entry is already specified, associate with the new fspath,\n     * if specified */\n    if (already_recovered && VALID_FULLPATH(p_attrs_new))\n        fspath = ATTR(p_attrs_new, fullpath);\n    else if (VALID_FULLPATH(&attrs_old))\n        fspath = ATTR(&attrs_old, fullpath);\n    else {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Missing mandatory attribute 'fullpath' for recover entry \"\n                   DFID, PFID(p_old_id));\n        success_status = RS_ERROR;\n        goto out;\n    }\n\n#if 0   /* Code from RBHv2: keep it? */\n    /* if FS path is not absolute, get the relative backend path and append to\n     * FS root */\n    if (fspath[0] != '/') {\n        char tmp[RBH_PATH_MAX];\n        if (ATTR_MASK_TEST(p_attrs_old, backendpath)) {\n            relative_path(ATTR(p_attrs_old, backendpath), config.root, tmp);\n            sprintf(buff, \"%s/%s/%s\", global_config.fs_path, dirname(tmp),\n                    strrchr(fspath, '/') + 1);\n            fspath = buff;\n        } else {    /* use the given relative path */\n\n            sprintf(buff, \"%s/%s\", global_config.fs_path,\n                    ATTR(p_attrs_old, fullpath));\n            fspath = buff;\n        }\n    }\n#endif\n\n    if (ATTR_MASK_INFO_TEST(&attrs_old, smi, ATTR_BK_PATH))\n        backend_path = BKPATH(&attrs_old, smi);\n    else\n        /* if there is no backend path, try to guess */\n    {\n        int lvl_log;\n\n        if (ATTR_MASK_TEST(&attrs_old, type)\n            && !strcasecmp(ATTR(&attrs_old, type), STR_TYPE_FILE))\n            lvl_log = LVL_EVENT;\n        else\n            lvl_log = LVL_VERB;\n\n        entry2backend_path(smi, p_old_id, &attrs_old, FOR_LOOKUP, bkpath,\n                           config.compress);\n        DisplayLog(lvl_log, TAG,\n                   \"No backend path is set for '%s', guess it could be '%s'\",\n                   fspath, bkpath);\n        backend_path = bkpath;\n    }\n\n    /* Another status manager recovered it. Just rebind in the backend. */\n    if (already_recovered) {\n        char bknew[RBH_PATH_MAX];\n\n        success_status = backup_rebind(smi, fspath, backend_path, bknew,\n                                       p_new_id);\n        if (success_status == 0)\n            set_backend_path(smi, p_attrs_new, bknew);\n\n        /* @FIXME make backup_rebind return a recov_status */\n        goto out;\n    }\n\n    if (!ATTR_MASK_TEST(&attrs_old, type)) {\n        const char *type;\n\n        if (bk_lstat(backend_path, &st_bk, 1, &compressed) != 0) {\n            rc = errno;\n            DisplayLog(LVL_MAJOR, TAG, \"Cannot restore entry \" DFID\n                       \": '%s' not found in backend.\", PFID(p_old_id),\n                       backend_path);\n            if (rc == ENOENT)\n                success_status = RS_NOBACKUP;\n            else\n                success_status = RS_ERROR;\n            goto out;\n        }\n        stat_done = true;\n\n        /* set type in attrs_old */\n        type = mode2type(st_bk.st_mode);\n        if (type != NULL) {\n            ATTR_MASK_SET(&attrs_old, type);\n            strcpy(ATTR(&attrs_old, type), type);\n        } else {\n            DisplayLog(LVL_MAJOR, TAG, \"%s has unsupported type\", backend_path);\n            success_status = RS_NOBACKUP;\n            goto out;\n        }\n    }\n\n    /* create parent in filesystem in any case */\n    if (create_parent_in_fs(fspath, &parent_id)) {\n        success_status = RS_ERROR;\n        goto out;\n    }\n\n    if (!strcasecmp(ATTR(&attrs_old, type), STR_TYPE_DIR)) {\n        success_status = recov_dir(backend_path, fspath, &attrs_old, &set_mode);\n        if (success_status != RS_NON_FILE)\n            goto out;\n    } else if (!strcasecmp(ATTR(&attrs_old, type), STR_TYPE_LINK)) {\n        success_status =\n            recov_symlink(backend_path, fspath, &attrs_old, &set_mode);\n        if (success_status != RS_NON_FILE)\n            goto out;\n    } else if (!strcasecmp(ATTR(&attrs_old, type), STR_TYPE_FILE)) {\n        success_status =\n            recov_file(smi, p_old_id, backend_path, fspath, &attrs_old,\n                       &set_mode, &compressed, &stat_done, &st_bk, &no_copy);\n        if ((success_status != RS_FILE_EMPTY) && (success_status != RS_FILE_OK))\n            goto out;\n    } else {\n        /* type not supported */\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Error: cannot restore entries with type '%s' (%s)\",\n                   ATTR(&attrs_old, type), fspath);\n        success_status = RS_NOBACKUP;\n        goto out;\n    }\n\n    /* set owner, group */\n    if (ATTR_MASK_TEST(&attrs_old, uid) || ATTR_MASK_TEST(&attrs_old, gid)) {\n        uid_t uid = -1;\n        gid_t gid = -1;\n        char buff[4096];\n\n        if (ATTR_MASK_TEST(&attrs_old, uid)) {\n            if (global_config.uid_gid_as_numbers) {\n                uid = ATTR(&attrs_old, uid).num;\n            } else {\n                struct passwd pw;\n                struct passwd *p_pw;\n\n                if ((getpwnam_r\n                     (ATTR(&attrs_old, uid).txt, &pw, buff, 4096, &p_pw) != 0)\n                    || (p_pw == NULL)) {\n                    DisplayLog(LVL_MAJOR, TAG,\n                               \"Warning: couldn't resolve uid for user '%s'\",\n                               ATTR(&attrs_old, uid).txt);\n                    uid = -1;\n                } else\n                    uid = p_pw->pw_uid;\n            }\n        }\n\n        if (ATTR_MASK_TEST(&attrs_old, gid)) {\n            if (global_config.uid_gid_as_numbers) {\n                gid = ATTR(&attrs_old, gid).num;\n            } else {\n                struct group gr;\n                struct group *p_gr;\n\n                if ((getgrnam_r\n                     (ATTR(&attrs_old, gid).txt, &gr, buff, 4096, &p_gr) != 0)\n                    || (p_gr == NULL)) {\n                    DisplayLog(LVL_MAJOR, TAG,\n                               \"Warning: couldn't resolve gid for group '%s'\",\n                               ATTR(&attrs_old, gid).txt);\n                    gid = -1;\n                } else\n                    gid = p_gr->gr_gid;\n            }\n        }\n\n        DisplayLog(LVL_FULL, TAG,\n                   \"Restoring owner/group for '%s': uid=%u, gid=%u\", fspath,\n                   uid, gid);\n\n        if (lchown(fspath, uid, gid)) {\n            rc = errno;\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Warning: cannot set owner/group for '%s': %s\", fspath,\n                       strerror(rc));\n        } else {\n            /* According to chown(2) manual: chown may clear sticky bits even\n             * if root does it,\n             * so, we must set the mode again if it contains special bits */\n            if (!set_mode && (mode_create & 07000))\n                set_mode = true;\n        }\n    }\n\n    if (set_mode) {\n        /* set the same mode as in the backend */\n        DisplayLog(LVL_FULL, TAG, \"Restoring mode for '%s': mode=%#o\",\n                   fspath, mode_create & 07777);\n        if (chmod(fspath, mode_create & 07777))\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Warning: couldn't restore mode for '%s': %s\", fspath,\n                       strerror(errno));\n    }\n\n    if (lstat(fspath, &st_dest)) {\n        rc = errno;\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR: lstat() failed on restored entry '%s': %s\", fspath,\n                   strerror(rc));\n        success_status = RS_ERROR;\n        goto out;\n    }\n\n    /* Compare restored size and mtime with the one saved in the DB\n     * for warning purpose (not for directories) */\n    if (!S_ISDIR(st_dest.st_mode)) {\n        DisplayLog(LVL_DEBUG, TAG, \"old size: %zu, bk size: %zu, fs size: %zu\",\n                   ATTR(&attrs_old, size), st_bk.st_size, st_dest.st_size);\n\n        if (ATTR_MASK_TEST(&attrs_old, size)\n            && (st_dest.st_size != ATTR(&attrs_old, size))) {\n            if (!compressed) {\n                DisplayLog(LVL_MAJOR, TAG, \"%s: the restored size (%zu) is \"\n                           \"different from the last known size in filesystem (%\"\n                           PRIu64 \"): \"\n                           \"it may have been modified in filesystem after the last backup.\",\n                           fspath, st_dest.st_size, ATTR(&attrs_old, size));\n                success_status = RS_FILE_DELTA;\n            } else\n                success_status = RS_FILE_OK;\n        }\n    }\n    /* only for files */\n    else if (S_ISREG(st_dest.st_mode)) {\n        if (ATTR_MASK_TEST(&attrs_old, last_mod)\n            && (st_dest.st_mtime != ATTR(&attrs_old, last_mod))) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"%s: the restored mtime (%lu) is \"\n                       \"different from the last time in filesystem (%u): \"\n                       \"it may have been modified in filesystem after the last backup.\",\n                       fspath, st_dest.st_mtime, ATTR(&attrs_old, last_mod));\n            success_status = RS_FILE_DELTA;\n        }\n    }\n\n    /* set the new attributes */\n    ATTR_MASK_INIT(p_attrs_new);\n    stat2rbh_attrs(&st_dest, p_attrs_new, true);\n    strcpy(ATTR(p_attrs_new, fullpath), fspath);\n    ATTR_MASK_SET(p_attrs_new, fullpath);\n\n    rc = path2id(fspath, p_new_id, &st_dest);\n    if (rc) {\n        success_status = RS_ERROR;\n        goto out;\n    }\n\n    /* set parent id */\n    ATTR_MASK_SET(p_attrs_new, parent_id);\n    ATTR(p_attrs_new, parent_id) = parent_id;\n\n    /* set name */\n    name = strrchr(ATTR(p_attrs_new, fullpath), '/');\n    if ((name != NULL) && (*(name + 1) != '\\0')) {\n        rh_strncpy(ATTR(p_attrs_new, name), name + 1,\n                   sizeof(ATTR(p_attrs_new, name)));\n        ATTR_MASK_SET(p_attrs_new, name);\n    }\n#ifdef _LUSTRE\n    if (!ATTR_MASK_TEST(p_attrs_new, type)\n        || !strcmp(ATTR(p_attrs_new, type), STR_TYPE_FILE)) {\n        /* get the new stripe info */\n        if (File_GetStripeByPath(fspath,\n                                 &ATTR(p_attrs_new, stripe_info),\n                                 &ATTR(p_attrs_new, stripe_items)) == 0) {\n            ATTR_MASK_SET(p_attrs_new, stripe_info);\n            ATTR_MASK_SET(p_attrs_new, stripe_items);\n        }\n    }\n#endif\n\n    if (S_ISLNK(st_dest.st_mode)) {\n        strcpy(ATTR(p_attrs_new, link), link);\n        ATTR_MASK_SET(p_attrs_new, link);\n    }\n\n    if (!no_copy && (S_ISREG(st_dest.st_mode) || S_ISLNK(st_dest.st_mode))) {\n        char tmp[RBH_PATH_MAX];\n        char *destdir;\n\n#ifdef HAVE_SHOOK\n        /* only files are recovered as released, others are synchro */\n        if (S_ISREG(st_dest.st_mode))\n            rc = set_backup_status(smi, p_attrs_new, STATUS_RELEASED);\n        else\n#endif\n            rc = set_backup_status(smi, p_attrs_new, STATUS_SYNCHRO);\n\n        /* set the new entry path in backend, according to the new fid,\n         * and actual compression */\n        entry2backend_path(smi, p_new_id, p_attrs_new,\n                           FOR_NEW_COPY, tmp, compressed);\n        set_backend_path(smi, p_attrs_new, tmp);\n\n        /* recursively create the parent directory */\n        /* Extract dir path. We can modify tmp now,\n         * as it has been copied by set_backend_path(). */\n        destdir = dirname(tmp);\n        if (destdir == NULL) {\n            DisplayLog(LVL_CRIT, TAG, \"Error extracting directory path of '%s'\",\n                       BKPATH(p_attrs_new, smi));\n            success_status = RS_ERROR;\n            goto out;\n        }\n\n        rc = mkdir_recurse_clone_attrs(destdir, 0750, TO_BACKEND);\n        if (rc) {\n            success_status = RS_ERROR;\n            goto out;\n        }\n\n        /* rename the entry in backend */\n        if (strcmp(BKPATH(p_attrs_new, smi), backend_path) != 0) {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"Moving the entry in backend: '%s'->'%s'\", backend_path,\n                       BKPATH(p_attrs_new, smi));\n            if (rename(backend_path, BKPATH(p_attrs_new, smi))) {\n                rc = errno;\n                DisplayLog(LVL_MAJOR, TAG,\n                           \"Could not move entry in backend ('%s'->'%s'): %s\",\n                           backend_path, BKPATH(p_attrs_new, smi),\n                           strerror(rc));\n                /* keep the old path */\n                set_backend_path(smi, p_attrs_new, backend_path);\n            }\n        }\n#ifdef HAVE_SHOOK\n        /* save new backendpath to filesystem */\n        /* XXX for now, don't manage several hsm_index */\n        rc = shook_set_hsm_info(fspath, BKPATH(p_attrs_new, smi), 0);\n        if (rc)\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Could not set backend path for %s: error %d\", fspath,\n                       rc);\n#endif\n    }\n\n out:\n    ListMgr_FreeAttrs(&attrs_old);\n    return success_status;\n}\n\n#ifdef HAVE_SHOOK\n/**\n * recovery function (instead of dummy copy)\n */\nstatic int rbh_shook_recov_file(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                                const action_params_t *params, post_action_e *after,\n                                db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n    size_t sz;\n    const char *path;\n\n    path = rbh_param_get(params, TARGET_PATH_PARAM);\n    if (path == NULL) {\n        /* target path */\n        DisplayLog(LVL_CRIT, __func__, \"Missing mandatory parameter '%s' to \"\n                   \"recover file.\", TARGET_PATH_PARAM);\n        return -EINVAL;\n    }\n\n    if (!ATTR_MASK_TEST(p_attrs, size)) {\n        DisplayLog(LVL_MAJOR, __func__, \"Warning: missing attribute 'size' to \"\n                   \"recover file. Restoring '%s' to 0 length.\",\n                   path);\n        sz = 0;\n    } else {\n        sz = ATTR(p_attrs, size);\n    }\n\n    DisplayLog(LVL_DEBUG, __func__, \"Restoring '%s' with size %\"PRI_SZ,\n               path, sz);\n\n    rc = truncate(path, sz) ? -errno : 0;\n    if (rc) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR could not set original size %\" PRI_SZ \" for '%s': %s\",\n                   sz, path, strerror(rc));\n        return rc;\n    }\n\n    /* set the file in \"released\" state */\n    rc = shook_set_status(path, SS_RELEASED);\n    if (rc) {\n        DisplayLog(LVL_CRIT, TAG, \"ERROR setting released state for '%s': %s\",\n                   path, strerror(-rc));\n        return rc;\n    }\n\n    return 0;\n}\n\n/** action function */\nstatic int rbh_shook_release(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                            const action_params_t *params, post_action_e *after,\n                            db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    /* is it the good type? */\n    if (!ATTR_MASK_TEST(p_attrs, type)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Missing mandatory attribute 'type' in %s()\",\n                   __func__);\n        return -EINVAL;\n    }\n\n    if (strcmp(ATTR(p_attrs, type), STR_TYPE_FILE) != 0) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unsupported type for release operation: %s\",\n                   ATTR(p_attrs, type));\n        return -ENOTSUP;\n    }\n\n    return shook_release(get_fsname(), p_entry_id);\n}\n\n#ifdef HAVE_SHOOK_LHSMIFY\n/** action function */\nstatic int rbh_shook_release_lhsmify(const entry_id_t *p_entry_id,\n                            attr_set_t *p_attrs, const action_params_t *params,\n                            post_action_e *after, db_cb_func_t db_cb_fn,\n                            void *db_cb_arg)\n{\n    /* is it the good type? */\n    if (!ATTR_MASK_TEST(p_attrs, type)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Missing mandatory attribute 'type' in %s()\",\n                   __func__);\n        return -EINVAL;\n    }\n\n    if (strcmp(ATTR(p_attrs, type), STR_TYPE_FILE) != 0) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unsupported type for release operation: %s\",\n                   ATTR(p_attrs, type));\n        return -ENOTSUP;\n    }\n\n    return shook_release_lhsmify(get_fsname(), p_entry_id);\n}\n\n/** action function */\nstatic int rbh_shook_lhsmify(const entry_id_t *p_entry_id,\n                             attr_set_t *p_attrs, const action_params_t *params,\n                             post_action_e *after, db_cb_func_t db_cb_fn,\n                             void *db_cb_arg)\n{\n    /* is it the good type? */\n    if (!ATTR_MASK_TEST(p_attrs, type)) {\n        DisplayLog(LVL_MAJOR, TAG, \"Missing mandatory attribute 'type' in %s()\",\n                   __func__);\n        return -EINVAL;\n    }\n\n    if (strcmp(ATTR(p_attrs, type), STR_TYPE_FILE) != 0) {\n        DisplayLog(LVL_MAJOR, TAG, \"Unsupported type for lhsmify operation: %s\",\n                   ATTR(p_attrs, type));\n        return -ENOTSUP;\n    }\n\n    return shook_lhsmify(get_fsname(), p_entry_id);\n}\n#endif\n#endif\n\n/** Status manager for backup or shook (2 builds with different flags) */\nstatic status_manager_t backup_sm = {\n    .name = MOD_NAME,\n    .flags = SM_SHARED | SM_DELETED | SM_MULTI_ACTION,\n    .status_enum = backup_status_list,  /* unknown is empty(unset) status */\n    .status_count = STATUS_COUNT - 1,\n    .nb_info = G_N_ELEMENTS(backup_info),\n    .info_types = backup_info,\n\n    /* Previous backup path is also needed.\n     * It is only in DB (so it is a cached information). */\n    .status_needs_attrs_cached = {.std = ATTR_MASK_type | ATTR_MASK_fullpath,\n                                  /* XXX used last_archive in RBH2.5: */\n                                  .sm_info = GENERIC_INFO_BIT(ATTR_BK_PATH)},\n\n    /* needs fresh mtime/size information from lustre\n     * to determine if the entry changed */\n    .status_needs_attrs_fresh = {.std = ATTR_MASK_last_mod | ATTR_MASK_size},\n\n    .get_status_func = backup_status,\n    .changelog_cb = backup_cl_cb,\n\n    .executor = backup_common_executor,\n\n    .check_action_name = backup_check_action_name,\n    /* no action callback as it has an executor */\n\n    /* fields for checking if entries must be inserted to SOFTRM */\n#ifdef HAVE_SHOOK\n    /* need name to check shook special files */\n    .softrm_filter_mask = {.std = ATTR_MASK_type | ATTR_MASK_name,\n                           .status = SMI_MASK(0)},\n#else\n    .softrm_filter_mask = {.std = ATTR_MASK_type, .status = SMI_MASK(0)},\n#endif\n    .softrm_filter_func = backup_softrm_filter,\n\n    /** needed attributes for undelete in addition to POSIX and fullpath:\n     * - backup_status: to know the original status of the 'undeleted' entry.\n     * - backend_path: to rebind undeleted entry in backend.\n     */\n    .softrm_table_mask = {.std = ATTR_MASK_type | ATTR_MASK_fullpath\n                                 | ATTR_MASK_size | ATTR_MASK_last_mod,\n                          .status = SMI_MASK(0),\n                          .sm_info = GENERIC_INFO_BIT(ATTR_BK_PATH)},\n    .undelete_func = backup_recover,\n\n    .cfg_funcs = &backup_cfg_hdlr,\n    .init_func = backup_init\n};\n\nconst char *mod_get_name(void)\n{\n    return backup_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &backup_sm;\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n#ifdef HAVE_SHOOK\n    if (strcmp(action_name, \"shook.release\") == 0)\n        return rbh_shook_release;\n    if (strcmp(action_name, \"shook.recover\") == 0)\n        return rbh_shook_recov_file;\n#ifdef HAVE_SHOOK_LHSMIFY\n    if (strcmp(action_name, \"shook.release_lhsmify\") == 0)\n        return rbh_shook_release_lhsmify;\n    if (strcmp(action_name, \"shook.lhsmify\") == 0)\n        return rbh_shook_lhsmify;\n#endif\n#endif\n\n    /* unknown function */\n    return NULL;\n}\n"
  },
  {
    "path": "src/modules/backup.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2010-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifndef BACKUP_H\n#define BACKUP_H\n\n/** set of managed status */\ntypedef enum {\n    STATUS_UNKNOWN = 0, /* undetermined status */\n    STATUS_NEW, /* file does not exists in the backend */\n    STATUS_MODIFIED, /* file has been modified since it was stored in\n                      * the backend */\n    STATUS_RESTORE_RUNNING, /* file is being retrieved */\n    STATUS_ARCHIVE_RUNNING, /* file is being archived */\n    STATUS_SYNCHRO, /* file has been synchronized in HSM, file can be purged */\n    STATUS_RELEASED,    /* file is released (nothing to do). */\n    STATUS_RELEASE_PENDING, /* file is being released */\n\n    STATUS_COUNT    /* number of possible file status */\n} file_status_t;\n\n#ifdef HAVE_SHOOK\nint rbh_shook_status(const char *path, file_status_t *p_status);\nint rbh_shook_recov_by_id(const entry_id_t *p_id, file_status_t *p_status);\n#endif\n\n#endif\n"
  },
  {
    "path": "src/modules/basic.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"mod_internal.h\"\n\n\n/* -------------- Basic status manager implementation ------------------- */\n\n#define BASIC_ST_COUNT 2\nstatic const char *basic_status_list[] = { \"ok\", \"failed\" };    /* + not set */\n\n/** set status according to action return status */\nstatic int basic_sm_action_cb(struct sm_instance *smi, const char *implements,\n                              int action_status, const entry_id_t *id,\n                              attr_set_t *p_attrs, post_action_e *what_after)\n{\n    if (action_status == 0)\n        set_status_attr(smi, p_attrs, basic_status_list[0]);\n    else\n        set_status_attr(smi, p_attrs, basic_status_list[1]);\n\n    return 0;\n}\n\nstatic status_manager_t basic_sm = {\n    .name         = \"basic\",\n    .status_enum  = basic_status_list,\n    .status_count = BASIC_ST_COUNT,\n    .action_cb    = basic_sm_action_cb,\n};\n\n/* ======= PUBLIC FUNCTIONS ======= */\nconst char *mod_get_name(void)\n{\n    return basic_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &basic_sm;\n}\n"
  },
  {
    "path": "src/modules/checker.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file checker.c\n * \\brief file checking module.\n *\n * The purpose of this module is to run actions\n * on filesystem entries (checksum, virus scan, ...)\n * and store the result of this action in the DB,\n * so it can be later compared when executing the action\n * a next time.\n * It implement a 'last_check' criteria, to allow defining\n * a check interval in policy rules (e.g. last_check > 7d).\n * It implement a 'last_success' criteria, to report\n * last successful command execution.\n * It also maintain a status (ok/failed), which is the\n * status of the last command run on the entry.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"mod_internal.h\"\n\n#define TAG \"checker\"\n\n/** set of managed status */\ntypedef enum {\n    STATUS_OK,  /* last executed command exited with status 0 */\n    STATUS_FAILED,  /* last executed command exited with status != 0 */\n\n    STATUS_COUNT,   /* number of possible statuses */\n} check_status_t;\n\nstatic const char *check_status_list[] = {\n    [STATUS_OK] = \"ok\",\n    [STATUS_FAILED] = \"failed\",\n};\n\nstatic const char *check_status2str(check_status_t st)\n{\n    switch (st) {\n    case STATUS_OK:\n    case STATUS_FAILED:\n        return check_status_list[st];\n    default:\n        return NULL;\n    }\n}\n\n/** enum of specific attributes */\nenum check_info_e {\n    ATTR_LAST_CHECK = 0,    /* time of last command run */\n    ATTR_LAST_SUCCESS,  /* time of last command success */\n    ATTR_OUTPUT,    /* command output (if commands succeeds) */\n};\n\n/** definition of specific info  */\nstatic sm_info_def_t check_info[] = {\n    [ATTR_LAST_CHECK] =\n        {\"last_check\", \"lstchk\", DB_UINT, 0, {.val_uint = 0},\n         .crit_type = PT_DURATION},\n    [ATTR_LAST_SUCCESS] =\n        {\"last_success\", \"lstsuc\", DB_UINT, 0, {.val_uint = 0},\n         .crit_type = PT_DURATION},\n/** Define a limited output size to reduce de DB footprint. If the user needs to attach more\n * information for each file, he can still store it as an xattr, or in an annex database. */\n    [ATTR_OUTPUT] = {\"output\", \"out\", DB_TEXT, 255, {.val_str = NULL},\n                     .crit_type = PT_STRING},\n};\n\nstatic int check_executor(struct sm_instance *smi,\n                          const char *implements,\n                          const policy_action_t *action,\n                          /* arguments for the action : */\n                          const entry_id_t *p_id, attr_set_t *p_attrs,\n                          const action_params_t *params,\n                          post_action_e *what_after, db_cb_func_t db_cb_fn,\n                          void *db_cb_arg)\n{\n    int rc = 0;\n    time_t t;\n    bool use_str = false;\n    GString *out = NULL;\n\n    *what_after = PA_UPDATE;\n\n    /* Run the action.\n     * Functions (defined in modules):\n     * o As input, a function action should use 'output' attribute to compare\n     *   the result of the last execution.\n     * o As output, a function action can store its result to 'output'\n     *   attribute.\n     * Commands:\n     * o As input, a command can retrieve the last output by using '{output}'\n     *   placeholder.\n     * o As output, output will be set as the contents of stdout\n     *   (truncated to 255 char).\n     */\n    out = g_string_new(\"\");\n    rc = action_helper(action, \"check\", p_id, p_attrs, params, smi, out,\n                       what_after, db_cb_fn, db_cb_arg);\n\n    /* update the value of last_check */\n    t = time(NULL);\n    set_uint_info(smi, p_attrs, ATTR_LAST_CHECK, (unsigned int)t);\n\n    /* depending on the action status, update the value of last_success */\n    if (rc == 0) {\n        set_status_attr(smi, p_attrs, check_status2str(STATUS_OK));\n        set_uint_info(smi, p_attrs, ATTR_LAST_SUCCESS, (unsigned int)t);\n\n        /* set output if the action was a successful command */\n        if (action->type == ACTION_COMMAND) {\n            int rc2;\n\n            DisplayLog(LVL_DEBUG, \"check_exec\", \"check command output='%s'\",\n                       out->str);\n            rc2 = set_sm_info(smi, p_attrs, ATTR_OUTPUT, out->str);\n            if (rc2 == 0)\n                /* str is now owner by p_attrs */\n                use_str = true;\n        }\n    } else {\n        set_status_attr(smi, p_attrs, check_status2str(STATUS_FAILED));\n        DisplayLog(LVL_EVENT, \"check_exec\",\n                   \"check command FAILED on: \" DFID_NOBRACE \" (%s)\",\n                   PFID(p_id), ATTR(p_attrs, fullpath));\n    }\n\n    g_string_free(out, use_str ? FALSE : TRUE);\n    return rc;\n}\n\n/** Status manager for file check management */\nstatus_manager_t checker_sm = {\n    .name = \"checker\",\n    .flags = 0,\n    .status_enum = check_status_list, /* initial state is empty status (unset) */\n    .status_count = STATUS_COUNT,\n    .nb_info = G_N_ELEMENTS(check_info),\n    .info_types = check_info,\n\n    /* note: no get_status support */\n\n    .executor = check_executor,\n};\n\n/* ======= PUBLIC FUNCTIONS ======= */\nconst char *mod_get_name(void)\n{\n    return checker_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &checker_sm;\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n    /* no specific action implemented */\n    return NULL;\n}\n"
  },
  {
    "path": "src/modules/common_actions.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2014-2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"list_mgr.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_basename.h\"\n#include \"rbh_modules.h\"\n#include \"policy_rules.h\"\n#include \"status_manager.h\"\n#include \"Memory.h\"\n#include \"rbh_params.h\"\n#include <unistd.h>\n#include <utime.h>\n#include <fcntl.h>\n#include <sys/sendfile.h>\n#include <zlib.h>\n\n/** perform a standard unlink() action */\nstatic int common_unlink(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                         const action_params_t *params, post_action_e *after,\n                         db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    const char *path = NULL;\n    bool invalidate = false;\n\n    *after = PA_UPDATE;\n\n    if (ATTR_MASK_TEST(p_attrs, fullpath))\n        path = ATTR(p_attrs, fullpath);\n    else\n        return EINVAL;\n\n    // str2bool returns\n    // 1 true\n    // 0 false\n    // -1 if str is NULL or param is not valid\n    if (str2bool(rbh_param_get(params, \"invalidate_dbentry\")) > 0) {\n        invalidate = true;\n    }\n\n    if (unlink(path) != 0 && errno != ENOENT)\n        return errno;\n\n    if (invalidate) {\n#ifdef ATTR_INDEX_invalid\n        /* let GC or Changelog take care of this */\n        ATTR_MASK_SET(p_attrs, invalid);\n        ATTR(p_attrs, invalid) = true;\n        *after = PA_UPDATE;\n#else\n        *after = PA_NONE;\n#endif\n    }\n    else {\n      /* 1 less link */\n      *after = PA_RM_ONE;\n    }\n    return 0;\n}\n\n/** perform a standard rmdir() action */\nstatic int common_rmdir(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                        const action_params_t *params, post_action_e *after,\n                        db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    const char *path = NULL;\n\n    *after = PA_UPDATE;\n\n    if (ATTR_MASK_TEST(p_attrs, fullpath))\n        path = ATTR(p_attrs, fullpath);\n    else\n        return EINVAL;\n\n    if (rmdir(path) != 0 && errno != ENOENT)\n        return errno;\n\n    /* no hardlink for dirs */\n    *after = PA_RM_ALL;\n    return 0;\n}\n\n/** just log it! */\nstatic int common_log(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                      const action_params_t *params, post_action_e *after,\n                      db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    GString *params_str = g_string_new(\"\");\n\n    if (rbh_params_serialize(params, params_str, NULL,\n                             RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        /* ignore (just for logging) */\n        g_string_assign(params_str, \"ERROR\");\n\n    DisplayLog(LVL_MAJOR, \"LogAction\", \"fid=\" DFID \", path=%s, params={%s}\",\n               PFID(p_entry_id),\n               ATTR_MASK_TEST(p_attrs, fullpath) ? ATTR(p_attrs, fullpath) : \"\",\n               params_str->str);\n    g_string_free(params_str, TRUE);\n\n    *after = PA_UPDATE;\n    return 0;\n}\n\n/** standard copy of file contents and its attributes */\nstatic int common_copy(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                       const action_params_t *params, post_action_e *after,\n                       db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n    copy_flags_e flags = cp_params2flags(params);\n    const char *targetpath = rbh_param_get(params, TARGET_PATH_PARAM);\n\n    /* flags for restore vs. flags for archive */\n    int oflg = (flags & CP_COPYBACK) ? O_WRONLY : O_WRONLY | O_CREAT | O_TRUNC;\n\n    /* actions expect to get a source path in 'fullpath' and a targetpath\n     * in params */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath) || (targetpath == NULL)) {\n        DisplayLog(LVL_MAJOR, CP_TAG,\n                   \"Missing mandatory attribute to perform file copy \"\n                   \"(fullpath or backendpath)\");\n        return -EINVAL;\n    }\n\n    rc = builtin_copy(ATTR(p_attrs, fullpath), targetpath,\n                      oflg, !(flags & CP_COPYBACK), flags);\n    *after = PA_UPDATE;\n    return rc;\n}\n\n/** copy file contents using sendfile() */\nstatic int common_sendfile(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                           const action_params_t *params,\n                           post_action_e *after, db_cb_func_t db_cb_fn,\n                           void *db_cb_arg)\n{\n    int rc;\n    copy_flags_e flags = cp_params2flags(params);\n    const char *targetpath = rbh_param_get(params, TARGET_PATH_PARAM);\n\n    /* flags for restore vs. flags for archive */\n    int oflg = (flags & CP_COPYBACK) ? O_WRONLY : O_WRONLY | O_CREAT | O_TRUNC;\n\n    /* actions expect to get a source path in 'fullpath' and a targetpath in\n     * params */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath) || (targetpath == NULL)) {\n        DisplayLog(LVL_MAJOR, CP_TAG,\n                   \"Missing mandatory attribute to perform file copy (fullpath or backendpath)\");\n        return -EINVAL;\n    }\n\n    rc = builtin_copy(ATTR(p_attrs, fullpath), targetpath, oflg,\n                      !(flags & CP_COPYBACK), flags | CP_USE_SENDFILE);\n    *after = PA_UPDATE;\n    return rc;\n}\n\n/** copy and compress file contents */\nstatic int common_gzip(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                       const action_params_t *params, post_action_e *after,\n                       db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n    copy_flags_e flags = cp_params2flags(params);\n    const char *targetpath = rbh_param_get(params, TARGET_PATH_PARAM);\n\n    /* flags for restore vs. flags for archive */\n    int oflg = (flags & CP_COPYBACK) ? O_WRONLY : O_WRONLY | O_CREAT | O_TRUNC;\n\n    /* actions expect to get a source path in 'fullpath' and a targetpath in\n     * params */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath) || (targetpath == NULL)) {\n        DisplayLog(LVL_MAJOR, CP_TAG,\n                   \"Missing mandatory attribute to perform file copy \"\n                   \"(fullpath or targetpath)\");\n        return -EINVAL;\n    }\n\n    rc = builtin_copy(ATTR(p_attrs, fullpath), targetpath, oflg,\n                      !(flags & CP_COPYBACK), flags | CP_COMPRESS);\n    *after = PA_UPDATE;\n    return rc;\n}\n\n/**\n * Move an entry in the namespace and create the target parent directory\n * if necessary.\n */\nstatic int common_move(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                       const action_params_t *params, post_action_e *after,\n                       db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n#define MOVE_TAG \"move\"\n    const char *targetpath = rbh_param_get(params, TARGET_PATH_PARAM);\n    entry_id_t dir_id = {0};\n    int rc;\n\n    /* actions expect to get a source path in 'fullpath' and a targetpath\n     * in params */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath) || (targetpath == NULL)) {\n        DisplayLog(LVL_MAJOR, CP_TAG,\n                   \"Missing mandatory attribute to perform move operation \"\n                   \"(fullpath or targetpath)\");\n        return -EINVAL;\n    }\n\n    rc = create_parent_of(targetpath, &dir_id);\n    if (rc != 0 && rc != -EEXIST)\n        goto out;\n\n    DisplayLog(LVL_DEBUG, MOVE_TAG, \"rename('%s', '%s')\",\n               ATTR(p_attrs, fullpath), targetpath);\n\n    if (rename(ATTR(p_attrs, fullpath), targetpath) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, MOVE_TAG, \"rename('%s', '%s') failed: %s\",\n                   ATTR(p_attrs, fullpath), targetpath, strerror(-rc));\n        goto out;\n    }\n    rc = 0;\n\n    /* set new parent id, name and path */\n    ATTR(p_attrs, parent_id) = dir_id;\n    ATTR_MASK_SET(p_attrs, parent_id);\n\n    rh_strncpy(ATTR(p_attrs, name), rh_basename(targetpath),\n               sizeof(ATTR(p_attrs, name)));\n    ATTR_MASK_SET(p_attrs, name);\n\n    rh_strncpy(ATTR(p_attrs, fullpath), targetpath,\n               sizeof(ATTR(p_attrs, fullpath)));\n    ATTR_MASK_SET(p_attrs, fullpath);\n\nout:\n    *after = PA_UPDATE;\n    return rc;\n}\n\nconst char *mod_get_name(void)\n{\n    return \"common\";\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n    if (strcmp(action_name, \"common.unlink\") == 0)\n        return common_unlink;\n    else if (strcmp(action_name, \"common.rmdir\") == 0)\n        return common_rmdir;\n    else if (strcmp(action_name, \"common.log\") == 0)\n        return common_log;\n    else if (strcmp(action_name, \"common.copy\") == 0)\n        return common_copy;\n    else if (strcmp(action_name, \"common.sendfile\") == 0)\n        return common_sendfile;\n    else if (strcmp(action_name, \"common.gzip\") == 0)\n        return common_gzip;\n    else if (strcmp(action_name, \"common.move\") == 0)\n        return common_move;\n    else\n        return NULL;\n}\n\n"
  },
  {
    "path": "src/modules/common_sched.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"policy_run.h\"\n\n/** max-per-run scheduler configuration */\ntypedef struct sched_mpr_config {\n    ull_t   max_count;\n    ull_t   max_vol;\n} sched_mpr_config_t;\n\n/** internal state for max-per-run scheduler (LPR) */\nstruct sched_mpr_state {\n    sched_mpr_config_t cfg;\n    ull_t   count;\n    ull_t   vol;\n};\n\nstatic int sched_mpr_init(void *config, void **p_sched_data)\n{\n    struct sched_mpr_state *state = calloc(1, sizeof(*state));\n    sched_mpr_config_t *cfg = config;\n\n    if (!config)\n        return -EINVAL;\n\n    if (!state)\n        return -ENOMEM;\n\n    state->cfg = *cfg;\n    *p_sched_data = state;\n    return 0;\n}\n\nstatic int sched_mpr_reset(void *sched_data)\n{\n    struct sched_mpr_state *state = sched_data;\n\n    state->count = state->vol = 0;\n    return 0;\n}\n\n/* atomic increment */\n#define ATOMIC_INCR(_var, _val) (__sync_fetch_and_add(&(_var), (_val)))\n\nstatic int sched_mpr_schedule(void *sched_data, const entry_id_t *id,\n                              const attr_set_t *attrs, sched_cb_t cb,\n                              void *udata)\n{\n    struct sched_mpr_state *state = sched_data;\n\n    /* check if a previous entry reached the limit */\n\n    /* limit reached stop submitting actions */\n    if (state->cfg.max_count != 0 && state->count >= state->cfg.max_count) {\n        DisplayLog(LVL_VERB, \"max_per_run\",\n                   \"Max count reached (%Lu): stopping policy run\",\n                   state->cfg.max_count);\n        return SCHED_STOP_RUN;\n    }\n\n    if (state->cfg.max_vol != 0 && state->vol >= state->cfg.max_vol) {\n        DisplayLog(LVL_VERB, \"max_per_run\",\n                   \"Max volume reached (%Lu): stopping policy run\",\n                   state->cfg.max_vol);\n        return SCHED_STOP_RUN;\n    }\n\n    ATOMIC_INCR(state->count, 1);\n\n    if (attrs != NULL && ATTR_MASK_TEST(attrs, size))\n        ATOMIC_INCR(state->vol, ATTR(attrs, size));\n\n    /* if the limits are not reached, directly call the action calback */\n    DisplayLog(LVL_DEBUG, \"max_per_run\", \"Scheduling next step \"\n               \"(curr counters: count=%Lu, vol=%Lu)\", state->count, state->vol);\n    cb(udata, SCHED_OK);\n\n    return 0;\n}\n\n/* ------------- configuration management functions ---------- */\n\n/** configuration block name for max_per_run scheduler */\n#define SCHED_LPR_BLOCK \"max_per_run\"\n\nstatic void *sched_mpr_cfg_new(void)\n{\n    return calloc(1, sizeof(sched_mpr_config_t));\n}\n\nstatic void sched_mpr_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\nstatic void sched_mpr_cfg_set_default(void *module_config)\n{\n    sched_mpr_config_t *conf = module_config;\n\n    conf->max_count = conf->max_vol = 0;\n}\n\nstatic void sched_mpr_cfg_write_default(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_LPR_BLOCK, NULL);\n    print_line(output, indent + 1, \"max_count: 0 (unlimited)\");\n    print_line(output, indent + 1, \"max_vol:   0 (unlimited)\");\n    print_end_block(output, indent);\n}\n\nstatic void sched_mpr_cfg_write_template(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_LPR_BLOCK, NULL);\n    print_line(output, indent + 1, \"# max actions per run\");\n    print_line(output, indent + 1, \"max_count = 10000;\");\n    print_line(output, indent + 1, \"# max volume per run\");\n    print_line(output, indent + 1, \"max_vol   = 100GB;\");\n    print_end_block(output, indent);\n}\n\n/** get a 'max_per_run' sublock from the policy parameters */\nstatic int sched_mpr_cfg_read_from_block(config_item_t parent, void *cfg,\n                                         char *msg_out)\n{\n    int rc;\n    sched_mpr_config_t *conf = cfg;\n    config_item_t       block;\n\n    const cfg_param_t mpr_params[] = {\n        {\"max_count\", PT_INT64, PFLG_POSITIVE, &conf->max_count, 0},\n        {\"max_vol\",   PT_SIZE,  PFLG_POSITIVE, &conf->max_vol,   0},\n        END_OF_PARAMS\n    };\n\n    static const char *allowed_params[] = {\n        \"max_count\", \"max_vol\", NULL\n    };\n\n    /* get 'max_per_run' subblock */\n    rc = get_cfg_subblock(parent, SCHED_LPR_BLOCK, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(block, SCHED_LPR_BLOCK, mpr_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(block, SCHED_LPR_BLOCK, allowed_params);\n\n    return 0;\n}\n\nstatic int sched_mpr_cfg_update(void *sched_data, void *cfg)\n{\n    sched_mpr_config_t *new = cfg;\n    struct sched_mpr_state *state = sched_data;\n\n    state->cfg = *new;\n    return 0;\n}\n\n/** configuration handlers for \"max_per_run\" scheduler */\nstatic const ctx_cfg_funcs_t sched_mpr_cfg_funcs = {\n    .module_name     = \"max_per_run scheduler\",\n    .new             = sched_mpr_cfg_new,\n    .free            = sched_mpr_cfg_free,\n    .set_default     = sched_mpr_cfg_set_default,\n    .read_from_block = sched_mpr_cfg_read_from_block,\n    .update          = sched_mpr_cfg_update,\n    .write_default   = sched_mpr_cfg_write_default,\n    .write_template  = sched_mpr_cfg_write_template,\n};\n\n/** \"max_per_run\" scheduler definition */\nstatic action_scheduler_t sched_mpr = {\n    .sched_name         = \"max_per_run\",\n    .sched_cfg_funcs    = &sched_mpr_cfg_funcs,\n    .sched_init_func    = sched_mpr_init,\n    .sched_reset_func   = sched_mpr_reset,\n    .sched_attr_mask    = { .std = ATTR_MASK_size, },\n    .sched_schedule     = sched_mpr_schedule,\n};\n\n/** scheduler defined in sched_ratelimit.c */\nextern action_scheduler_t sched_tbf;\n\n/** get a common scheduler by name */\naction_scheduler_t *mod_get_scheduler(const char *sched_name)\n{\n    if (strcmp(sched_name, \"common.max_per_run\") == 0)\n        return &sched_mpr;\n    else if (strcmp(sched_name, \"common.rate_limit\") == 0)\n        return &sched_tbf;\n\n    return NULL;\n}\n"
  },
  {
    "path": "src/modules/lhsm.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file lhsm.c\n * \\brief implements Lustre/HSM status manager and functions.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"db_schema.h\" /* for common robinhood types: entry_id_t,\n                          stripe_info_t... */\n#include \"status_manager.h\"\n\n#include <stdbool.h>\n#include <glib.h>\n#include <sys/types.h>\n#include <sys/xattr.h>\n\n/* config block name */\n#define LHSM_BLOCK \"lhsm_config\"\n/* tag for logs */\n#define LHSM_TAG \"lhsm\"\n\n#define DEFAULT_ARCHIVE_ID  0\n#define ARCHIVE_PARAM \"archive_id\"\n\n/* Length of a UUID as a string, without trailing NUL. */\n#define UUID_XATTR_STRLEN 36\n\ntypedef struct lhsm_config_t {\n    char **rebind_cmd;\n\n    char uuid_xattr[XATTR_NAME_MAX + 1];\n    bool strict_uuid;\n} lhsm_config_t;\n\n/* lhsm config is global as the status manager is shared */\nstatic lhsm_config_t config;\n\n/**\n * Global static list of excluded variables for action parameters\n * serialization. */\nstatic struct rbh_params *exclude_params = NULL;\n\n/**\n * Get archive_id from action parameters.\n * @return archive_id on success, a negative value or error.\n */\nstatic int get_archive_id(const action_params_t *params)\n{\n    int arch_id;\n    const char *val = rbh_param_get(params, ARCHIVE_PARAM);\n\n    if (val == NULL)\n        return -ENOENT;\n\n    arch_id = str2int(val);\n    if (arch_id == -1) {\n        DisplayLog(LVL_MAJOR, LHSM_TAG,\n                   \"Invalid archive_id '%s': index expected\", val);\n        return -EINVAL;\n    }\n\n    return arch_id;\n}\n\n/** Initialize action related global information.\n * Prepare exclude set once to avoid reinitializing it for each action.\n */\nstatic int init_action_global_info(void)\n{\n    struct rbh_params *new_params;\n    int rc;\n\n    if (exclude_params != NULL)\n        return 0;\n\n    new_params = (struct rbh_params *)calloc(1, sizeof(*new_params));\n    if (new_params == NULL)\n        return -ENOMEM;\n\n    /* initialize exclude list, as it is constant */\n    rc = rbh_param_set(new_params, ARCHIVE_PARAM, \"\", true);\n    if (rc) {\n        free(new_params);\n        return rc;\n    }\n\n    exclude_params = new_params;\n    return 0;\n}\n\n/** lhsm module initialization function */\nstatic int lhsm_init(struct sm_instance *smi, run_flags_t flags)\n{\n    return init_action_global_info();\n}\n\n/** Trigger an HSM action */\nstatic int lhsm_action(enum hsm_user_action action, const entry_id_t *p_id,\n                       const attr_set_t *attrs, const action_params_t *params)\n{\n    struct hsm_user_request *req;\n    int rc;\n    char *mpath;\n    unsigned int archive_id = DEFAULT_ARCHIVE_ID;   /* default */\n    GString *args = NULL;\n    const char *data = NULL;\n    int data_len = 0;\n\n    /* if archive_id is explicitely specified in action parameters, use it */\n    rc = get_archive_id(params);\n    if (rc >= 0) {\n        archive_id = rc;\n    } else if (rc == -ENOENT) {\n        /* for HSM_REMOVE, try to get it from previous attrs */\n        if (action == HUA_REMOVE) {\n            unsigned int *tmp;\n            unsigned int idx;\n            const sm_info_def_t *def;\n\n            rc = sm_attr_get(NULL, attrs, \"lhsm.archive_id\", (void **)&tmp,\n                             &def, &idx);\n            if (rc == 0) {\n                /* sanity check of returned type */\n                if (def->db_type != DB_UINT)\n                    DisplayLog(LVL_CRIT, LHSM_TAG,\n                               \"Unexpected type for 'lhsm.archive_id': %d\",\n                               def->db_type);\n                else\n                    archive_id = *tmp;\n            }\n        }\n        /* all other cases: keep default */\n    } else\n        return rc;\n\n    /* Serialize the parameters to pass them to the copytool.\n     * exclude archive_id, which is for internal use. */\n    args = g_string_new(\"\");\n    rc = rbh_params_serialize(params, args, exclude_params,\n                              RBH_PARAM_CSV | RBH_PARAM_COMPACT);\n    if (rc)\n        goto free_args;\n\n    if (!GSTRING_EMPTY(args)) {\n        data = args->str;\n        data_len = args->len + 1;\n    }\n\n    DisplayLog(LVL_DEBUG, LHSM_TAG,\n               \"action %s, fid=\" DFID \", archive_id=%u, parameters='%s'\",\n               hsm_user_action2name(action), PFID(p_id), archive_id, args->str);\n\n    req = llapi_hsm_user_request_alloc(1, data_len);\n    if (!req) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, LHSM_TAG, \"Cannot create HSM request: %s\",\n                   strerror(-rc));\n        goto free_args;\n    }\n\n    req->hur_request.hr_action = action;\n    req->hur_request.hr_archive_id = archive_id;\n    req->hur_request.hr_flags = 0;\n\n    req->hur_user_item[0].hui_fid = *p_id;\n    req->hur_user_item[0].hui_extent.offset = 0;\n    /* XXX for now, always transfer entire file */\n    req->hur_user_item[0].hui_extent.length = -1LL;\n\n    req->hur_request.hr_itemcount = 1;\n    req->hur_request.hr_data_len = data_len;\n\n    if (data)\n        memcpy(hur_data(req), data, data_len);\n\n    /* make tmp copy as llapi_hsm_request arg is not const */\n    mpath = strdup(get_mount_point(NULL));\n    rc = llapi_hsm_request(mpath, req);\n    free(mpath);\n    free(req);\n\n    if (rc)\n        DisplayLog(LVL_CRIT, LHSM_TAG,\n                   \"ERROR performing HSM request(%s, root=%s, fid=\" DFID\n                   \"): %s\", hsm_user_action2name(action), get_mount_point(NULL),\n                   PFID(p_id), strerror(-rc));\n free_args:\n    g_string_free(args, TRUE);\n    return rc;\n}\n\n/** perform hsm_release action */\nstatic int lhsm_release(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                        const action_params_t *params, post_action_e *after,\n                        db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    /* 'after' is set in action callback */\n    return lhsm_action(HUA_RELEASE, p_entry_id, p_attrs, params);\n}\n\n/** perform hsm_archive action */\nstatic int lhsm_archive(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                        const action_params_t *params, post_action_e *after,\n                        db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    /* 'after' is set in action callback */\n    return lhsm_action(HUA_ARCHIVE, p_entry_id, p_attrs, params);\n}\n\n/** perform hsm_remove action */\nstatic int lhsm_remove(const entry_id_t *p_entry_id, attr_set_t *p_attrs,\n                       const action_params_t *params, post_action_e *after,\n                       db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    /* 'after' is set in action callback */\n    int rc = lhsm_action(HUA_REMOVE, p_entry_id, p_attrs, params);\n    return rc;\n}\n\n/** set of managed status */\ntypedef enum {\n    STATUS_NEW, /* file has no HSM flags (just created) */\n    STATUS_MODIFIED,    /* file must be archived */\n    STATUS_RESTORE_RUNNING, /* file is being retrieved */\n    STATUS_ARCHIVE_RUNNING, /* file is being archived */\n    STATUS_SYNCHRO, /* file has been synchronized in HSM, file can be purged */\n    STATUS_RELEASED,    /* file is released (nothing to do). */\n\n    STATUS_COUNT,   /* number of possible file status */\n} hsm_status_t;\n\n/* XXX /!\\ Keep in sync with hsm_status_t */\nstatic const char *lhsm_status_list[] = {\n    [STATUS_NEW] = \"new\",\n    [STATUS_MODIFIED] = \"modified\",\n    [STATUS_RESTORE_RUNNING] = \"retrieving\",\n    [STATUS_ARCHIVE_RUNNING] = \"archiving\",\n    [STATUS_SYNCHRO] = \"synchro\",\n    [STATUS_RELEASED] = \"released\",\n};\n\nstatic const char *hsm_status2str(hsm_status_t st)\n{\n    if ((unsigned int)st >= STATUS_COUNT)\n        return NULL;\n    else\n        return lhsm_status_list[st];\n}\n\n/** enum of specific attributes */\nenum lhsm_info_e {\n    ATTR_ARCHIVE_ID = 0,\n    ATTR_NO_RELEASE,\n    ATTR_NO_ARCHIVE,\n    ATTR_LAST_ARCHIVE,\n    ATTR_LAST_RESTORE,\n    ATTR_UUID,\n};\n\n/** size of specific info to be stored in DB:\n * archive_id: hsm_user_state returns a 32bits interger for archive_id.\n * no_release: 0 or 1\n * no_archive: 0 or 1\n * last_archive: unix epoch\n * last_restore: unix epoch\n * uuid: 36 characters representing a 16 bytes UUID\n */\nstatic sm_info_def_t lhsm_info[] = {\n    [ATTR_ARCHIVE_ID] =\n        {ARCHIVE_PARAM, \"archid\", DB_UINT, 0, {.val_uint = 0}, PT_INT},\n    [ATTR_NO_RELEASE] =\n        {\"no_release\", \"norels\", DB_BOOL, 0, {.val_bool = false}, PT_BOOL},\n    [ATTR_NO_ARCHIVE] =\n        {\"no_archive\", \"noarch\", DB_BOOL, 0, {.val_bool = false}, PT_BOOL},\n    [ATTR_LAST_ARCHIVE] =\n        {\"last_archive\", \"lstarc\", DB_UINT, 0, {.val_uint = 0}, PT_DURATION},\n    [ATTR_LAST_RESTORE] =\n        {\"last_restore\", \"lstrst\", DB_UINT, 0, {.val_uint = 0}, PT_DURATION},\n    [ATTR_UUID] =\n        {\"uuid\", \"uuid\", DB_TEXT, UUID_XATTR_STRLEN, {.val_str = NULL},\n         PT_STRING},\n};\n\nstatic bool cfg_has_uuid(const lhsm_config_t *cfg)\n{\n    return cfg->uuid_xattr[0] != '\\0';\n}\n\n/* Get the UUID for the fid.\n * Return 0 on success, an errno on failure. uuid must be at least 37\n * bytes long. */\nstatic int get_uuid(const entry_id_t *id, char *uuid)\n{\n    char fid_path[RBH_PATH_MAX];\n    int rc;\n\n    rc = BuildFidPath(id, fid_path);\n    if (rc)\n        return rc;\n\n    rc = lgetxattr(fid_path, config.uuid_xattr, uuid, UUID_XATTR_STRLEN + 1);\n    if (rc == -1) {\n        rc = errno;\n        if (rc != ENODATA)\n            DisplayLog(LVL_MAJOR, LHSM_TAG,\n                       \"Cannot get UUID for fid \" DFID_NOBRACE \" : %s\",\n                       PFID(id), strerror(rc));\n        return rc;\n    }\n\n    if (rc > UUID_XATTR_STRLEN) {\n        DisplayLog(LVL_MAJOR, LHSM_TAG,\n                   \"UUID size %d is too large for fid \" DFID_NOBRACE,\n                   rc, PFID(id));\n        return EMSGSIZE;\n    } else if (config.strict_uuid && rc < UUID_XATTR_STRLEN) {\n        DisplayLog(LVL_MAJOR, LHSM_TAG,\n                   \"UUID size is too small (%d) for fid \"DFID_NOBRACE\". \"\n                   \"It must be exactly %d bytes long \"\n                   \"(or specify 'strict_uuid = no' in the configuration).\",\n                   rc, PFID(id), UUID_XATTR_STRLEN);\n        return EINVAL;\n    }\n\n    /* null terminate the uuid string */\n    uuid[rc] = 0;\n\n    return 0;\n}\n\n/* Get the UUID from the file and set the SM attribute. Do not return\n * an error if the file doesn't have a UUID, as it is better to still\n * have it up to date in the database than not at all. */\nstatic void set_uuid_info(struct sm_instance *smi, const entry_id_t *id,\n                          attr_set_t *refreshed_attrs)\n{\n    char *uuid;\n\n    uuid = malloc(UUID_XATTR_STRLEN + 1);\n    if (uuid == NULL)\n        return;\n\n    if (get_uuid(id, uuid) != 0) {\n        free(uuid);\n        return;\n    }\n\n    if (set_sm_info(smi, refreshed_attrs, ATTR_UUID, uuid) != 0)\n        free(uuid);\n}\n\n/** get Lustre status and convert it to an internal scalar status */\nstatic int lhsm_get_status(const char *path, hsm_status_t *p_status,\n                           bool *no_release, bool *no_archive,\n                           unsigned int *archive_id)\n{\n    struct hsm_user_state file_status;\n    int rc;\n\n    /* initialize outputs */\n    *p_status = STATUS_NEW;\n    *no_release = false;\n    *no_archive = false;\n    *archive_id = DEFAULT_ARCHIVE_ID;\n\n    /* get status */\n    rc = llapi_hsm_state_get(path, &file_status);\n\n    if ((rc != 0) && (rc != -ENOENT) && (rc != -ESTALE))\n        DisplayLog(LVL_DEBUG, LHSM_TAG, \"llapi_hsm_state_get(%s)=%d\", path, rc);\n    if (rc != 0)\n        return rc;\n\n    /* archive_id */\n    *archive_id = file_status.hus_archive_id;\n\n    /* user flags */\n\n    if (file_status.hus_states & HS_NORELEASE)\n        *no_release = true;\n    if (file_status.hus_states & HS_NOARCHIVE)\n        *no_archive = true;\n\n    /* clear them */\n    file_status.hus_states &= ~(HS_NORELEASE | HS_NOARCHIVE);\n\n    /* pending actions */\n\n    if (file_status.hus_in_progress_action == HUA_ARCHIVE) {\n        *p_status = STATUS_ARCHIVE_RUNNING;\n        return 0;\n    } else if (file_status.hus_in_progress_action == HUA_RESTORE) {\n        *p_status = STATUS_RESTORE_RUNNING;\n        return 0;\n    } else if (file_status.hus_in_progress_action == HUA_RELEASE) {\n        DisplayLog(LVL_DEBUG, LHSM_TAG, \"Entry %s is being released\", path);\n    } else if (file_status.hus_in_progress_action == HUA_REMOVE) {\n        DisplayLog(LVL_DEBUG, LHSM_TAG, \"Entry %s is being removed\", path);\n    }\n\n    /* status flags */\n    if ((file_status.hus_states & HSM_FLAGS_MASK) == 0) {\n        *p_status = STATUS_NEW;\n    } else if (file_status.hus_states & HS_DIRTY) {\n        *p_status = STATUS_MODIFIED;\n    } else if (file_status.hus_states & HS_ARCHIVED) {\n        /* \"and not dirty\" is ensured by the previous test */\n        if (file_status.hus_states & HS_RELEASED) {\n            /* file is archived in HSM, and released from Lustre */\n            *p_status = STATUS_RELEASED;\n        } else {\n            /* file is up-to-date in HSM, and not released in Lustre */\n            *p_status = STATUS_SYNCHRO;\n        }\n    } else if (file_status.hus_states & HS_EXISTS) {\n        /* new file, not yet archived successfully */\n        *p_status = STATUS_MODIFIED;\n    } else {\n        /* In this case: file has non null status and !HS_DIRTY and !HS_ARCHIVED\n         * and !HS_EXISTS.\n         * Maybe is it HS_RELEASED without being HS_ARCHIVED (empty file?)\n         * or maybe is it LOST???\n         */\n        DisplayLog(LVL_MAJOR, LHSM_TAG,\n                   \"Entry %s has inconsistent or unknown HSM flags %#X\",\n                   path, file_status.hus_states);\n        return EINVAL;\n    }\n\n    /** @TODO what if special LOST flag is set??? */\n\n    return 0;\n}\n\n/** helper to set the LHSM status in attribute structure */\nstatic inline int set_lhsm_status(struct sm_instance *smi, attr_set_t *attrs,\n                                  hsm_status_t status)\n{\n    return set_status_attr(smi, attrs, hsm_status2str(status));\n}\n\n/** get the HSM status of an entry */\nstatic int lhsm_status(struct sm_instance *smi,\n                       const entry_id_t *id, const attr_set_t *attrs,\n                       attr_set_t *refreshed_attrs)\n{\n    int rc;\n    char fid_path[RBH_PATH_MAX];\n    hsm_status_t st;\n    bool no_release = false, no_archive = false;\n    unsigned int archive_id = DEFAULT_ARCHIVE_ID;\n\n    if (ATTR_MASK_TEST(attrs, type) &&\n        strcmp(ATTR(attrs, type), STR_TYPE_FILE) != 0) {\n        /* not a file: no status */\n        rc = 0;\n        goto clean_status;\n    }\n\n    rc = BuildFidPath(id, fid_path);\n    if (rc)\n        goto clean_status;\n\n    rc = lhsm_get_status(fid_path, &st, &no_release, &no_archive, &archive_id);\n    if (rc)\n        goto clean_status;\n\n    rc = set_lhsm_status(smi, refreshed_attrs, st);\n    if (rc)\n        goto clean_status;\n\n    /* save archive_id */\n    rc = set_uint_info(smi, refreshed_attrs, ATTR_ARCHIVE_ID, archive_id);\n    if (rc)\n        goto clean_status;\n\n    if (cfg_has_uuid(&config))\n        set_uuid_info(smi, id, refreshed_attrs);\n\n    /* update no_archive/no_release (non critical: ignore errors) */\n    set_bool_info(smi, refreshed_attrs, ATTR_NO_ARCHIVE, no_archive);\n    set_bool_info(smi, refreshed_attrs, ATTR_NO_RELEASE, no_release);\n\n    return 0;\n\n clean_status:\n    if (refreshed_attrs->attr_values.sm_status != NULL)\n        /* don't free it as it contains a const char* */\n        STATUS_ATTR(refreshed_attrs, smi->smi_index) = NULL;\n\n    /* Clean the status from the mask */\n    ATTR_MASK_STATUS_UNSET(refreshed_attrs, smi->smi_index);\n\n    return rc;\n}\n\n/** helper to compare a LHSM status */\nstatic bool status_equal(struct sm_instance *smi, const attr_set_t *attrs,\n                         hsm_status_t status)\n{\n    return !strcmp(STATUS_ATTR(attrs, smi->smi_index), hsm_status2str(status));\n}\n\n/** check this is a supported action */\nstatic bool lhsm_check_action_name(const char *name)\n{\n    if (strcasecmp(name, \"archive\") && strcasecmp(name, \"release\") &&\n        /* special values for deleted entries (for lhsm_remove) */\n        strcasecmp(name, \"removed\") && strcasecmp(name, \"deleted\"))\n        return false;\n\n    return true;\n}\n\nstatic int lhsm_action_callback(struct sm_instance *smi,\n                                const char *implements, int action_status,\n                                const entry_id_t *id, attr_set_t *attrs,\n                                post_action_e *what_after)\n{\n    if (smi == NULL || implements == NULL)\n        return -EINVAL;\n\n    if (!strcasecmp(implements, \"archive\")) {\n        /* successful archive (asynchronous): now archive_running,\n         * else (failed): unchanged. */\n        if (action_status == 0)\n            set_lhsm_status(smi, attrs, STATUS_ARCHIVE_RUNNING);\n        else\n            /* (try to) update hsm_status on failure */\n            lhsm_status(smi, id, attrs, attrs);\n\n        *what_after = PA_UPDATE;\n        return 0;\n    } else if (!strcasecmp(implements, \"release\")) {\n        /* successful release (synchronous): now released, else: unchanged. */\n        if (action_status == 0)\n            set_lhsm_status(smi, attrs, STATUS_RELEASED);\n        else\n            /* (try to) update hsm_status on failure */\n            lhsm_status(smi, id, attrs, attrs);\n\n        *what_after = PA_UPDATE;\n        return 0;\n    } else if (!strcasecmp(implements, \"removed\")\n               || !strcasecmp(implements, \"deleted\")) {\n        /* successful removed (asynchronous): drop from DB */\n        *what_after = (action_status != 0 ? PA_NONE : PA_RM_ONE);\n        return 0;\n    } else\n        return -EINVAL;\n}\n\n/** changelog callback */\nstatic int lhsm_cl_cb(struct sm_instance *smi, const CL_REC_TYPE *logrec,\n                      const entry_id_t *id, const attr_set_t *attrs,\n                      attr_set_t *refreshed_attrs, bool *getit,\n                      proc_action_e *rec_action)\n{\n    /* If this is a CREATE record, we know its status is NEW\n     * (except if it is already set to another value) */\n    if (logrec->cr_type == CL_CREATE) {\n        if (!ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)) {\n            /* new file, status is new */\n            set_lhsm_status(smi, refreshed_attrs, STATUS_NEW);\n            /* no need to retrieve it from filesystem */\n            *getit = false;\n        }\n        /* else: file is already known. Preserve the known status. */\n\n        /* new entry: never archived or restored\n         * (non-critical: ignore errors) */\n        set_uint_info(smi, refreshed_attrs, ATTR_LAST_ARCHIVE, 0);\n        set_uint_info(smi, refreshed_attrs, ATTR_LAST_RESTORE, 0);\n        /* no flag is set at creation */\n        set_bool_info(smi, refreshed_attrs, ATTR_NO_ARCHIVE, false);\n        set_bool_info(smi, refreshed_attrs, ATTR_NO_RELEASE, false);\n    } else if ((logrec->cr_type == CL_MKDIR) || (logrec->cr_type == CL_RMDIR)) {\n        /* no status for directories */\n        *getit = false;\n    } else if (logrec->cr_type == CL_HSM) {\n        switch (hsm_get_cl_event(logrec->cr_flags)) {\n        case HE_ARCHIVE:\n            /* is it a successful copy? */\n            if (hsm_get_cl_error(logrec->cr_flags) == CLF_HSM_SUCCESS) {\n                /* save last archive time (non-critical: ignore errors) */\n                set_uint_info(smi, refreshed_attrs, ATTR_LAST_ARCHIVE,\n                              cltime2sec(logrec->cr_time));\n\n                /* Save UUID */\n                if (cfg_has_uuid(&config))\n                    set_uuid_info(smi, id, refreshed_attrs);\n\n                /* We need to fetch the hsm state on archive. The archive ID is\n                 * not present in the changelog record.\n                 */\n                *getit = true;\n            } else {    /* archive failed */\n                /* Entry is probably still dirty. If dirty flag is not set,\n                 * we need to ask the actual status */\n                if (hsm_get_cl_flags(logrec->cr_flags) & CLF_HSM_DIRTY) {\n                    set_lhsm_status(smi, refreshed_attrs, STATUS_MODIFIED);\n                    *getit = false;\n                } else  /* archive failed but entry is not dirty?\n                         * retrieve the status from filesystem */\n                    *getit = true;\n            }\n            break;\n\n        case HE_RESTORE:\n            if (hsm_get_cl_error(logrec->cr_flags) == CLF_HSM_SUCCESS) {\n                /* save last restore time (non-critical: ignore errors) */\n                set_uint_info(smi, refreshed_attrs, ATTR_LAST_RESTORE,\n                              cltime2sec(logrec->cr_time));\n\n                /* status is 'up-to-date' after a successful restore */\n                set_lhsm_status(smi, refreshed_attrs, STATUS_SYNCHRO);\n                *getit = false;\n            } else {    /* failed restore */\n\n                /* Entry status remains 'released' */\n                set_lhsm_status(smi, refreshed_attrs, STATUS_RELEASED);\n                *getit = false;\n            }\n            break;\n\n        case HE_RELEASE:\n            if (hsm_get_cl_error(logrec->cr_flags) != CLF_HSM_SUCCESS) {\n                /* release records are not expected to be erroneous */\n                DisplayLog(LVL_CRIT, LHSM_TAG,\n                           \"ERROR: Unexpected HSM release event with error %d\",\n                           hsm_get_cl_error(logrec->cr_flags));\n                /* make sure of actual entry status */\n                *getit = true;\n            } else {    /* successful release */\n\n                set_lhsm_status(smi, refreshed_attrs, STATUS_RELEASED);\n                *getit = false;\n            }\n            break;\n\n        case HE_STATE:\n            /* state changed: did it become dirty? */\n            if (hsm_get_cl_flags(logrec->cr_flags) & CLF_HSM_DIRTY) {\n                set_lhsm_status(smi, refreshed_attrs, STATUS_MODIFIED);\n                *getit = false;\n            } else  /* other status change: need to get it */\n                *getit = true;\n\n            break;\n\n        case HE_REMOVE:\n        case HE_CANCEL:\n            /* undetermined status after such an event */\n            *getit = true;\n            break;\n\n        default:\n            DisplayLog(LVL_CRIT, LHSM_TAG,\n                       \"ERROR: unknown HSM event: bitfield=%#x, event=%u\",\n                       logrec->cr_flags, hsm_get_cl_event(logrec->cr_flags));\n            /* skip */\n            return EINVAL;\n        }\n    } else if (logrec->cr_type == CL_MTIME || logrec->cr_type == CL_TRUNC ||\n               (logrec->cr_type == CL_CLOSE)) {\n        /* If file is modified or truncated, need to check its status\n         * (probably modified) EXCEPT if its status is already 'modified' */\n        if (!ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)\n            || (!status_equal(smi, attrs, STATUS_NEW) &&\n                !status_equal(smi, attrs, STATUS_MODIFIED))) {\n            DisplayLog(LVL_DEBUG, LHSM_TAG,\n                       \"Getstatus needed because this is a %s event \"\n                       \"and status is not already 'modified' or 'new': status=%s\",\n                       changelog_type2str(logrec->cr_type),\n                       ATTR_MASK_STATUS_TEST(attrs, smi->smi_index) ?\n                       STATUS_ATTR(attrs, smi->smi_index) : \"<not set>\");\n            *getit = true;\n        }\n    } else if ((logrec->cr_type == CL_UNLINK)\n               && (logrec->cr_flags & CLF_UNLINK_LAST)) {\n        /* if CLF_UNLINK_HSM_EXISTS is set, we must clean something in the\n         * backend.\n         * always add the entry to the SOFTRM_TABLE.\n         */\n        if (logrec->cr_flags & CLF_UNLINK_HSM_EXISTS) {\n            /* Don't care about softrm filter here as Lustre explicitely\n             * indicates there is something to be cleaned in the backend. */\n            *rec_action = PROC_ACT_SOFTRM_ALWAYS;\n        } else  /* remove the entry from DB */\n            *rec_action = PROC_ACT_RM_ALL;\n    }\n\n    /* other records: keep default value for status need */\n    return 0;\n}\n\n/** function to determine if a deleted entry must be inserted to SOFTRM table\n */\nstatic proc_action_e lhsm_softrm_filter(struct sm_instance *smi,\n                                        const entry_id_t *id,\n                                        const attr_set_t *attrs)\n{\n    if (ATTR_MASK_TEST(attrs, type)\n        && strcmp(ATTR(attrs, type), STR_TYPE_FILE) != 0) {\n        DisplayLog(LVL_FULL, LHSM_TAG,\n                   \"Removing non-file entry (no rm in backend)\");\n        return PROC_ACT_RM_ALL;\n    } else if (ATTR_MASK_STATUS_TEST(attrs, smi->smi_index)\n               && status_equal(smi, attrs, STATUS_NEW)) {\n        DisplayLog(LVL_DEBUG, LHSM_TAG,\n                   \"Removing 'new' entry (\" DFID \"): no remove in backend\",\n                   PFID(id));\n        return PROC_ACT_RM_ALL;\n    }\n    /* If we have a doubt, always insert to softrm.\n     *  In the worst case, it's just a useless hsm_rm operation.\n     */\n    return PROC_ACT_SOFTRM_ALWAYS;\n}\n\n/** rebind an entry to a new fid in HSM backend */\nstatic int lhsm_rebind(const entry_id_t *old_id, const entry_id_t *new_id,\n                       const attr_set_t *new_attrs,\n                       struct sm_instance *smi, unsigned int archive_id)\n{\n    const char descr[] = \"rebind command\";\n    char **cmd = NULL;\n    char *log_cmd;\n    char tmp[256];  /* max length for fid */\n    action_params_t cmd_params = { 0 };\n    int rc;\n\n    if (!old_id || !new_id) {\n        DisplayLog(LVL_CRIT, LHSM_TAG, \"Missing mandatory old/new fid \"\n                   \"argument for rebind operation\");\n        return -EINVAL;\n    }\n\n    DisplayLog(LVL_EVENT, LHSM_TAG, \"Rebinding \" DFID \" to \" DFID \" in archive\",\n               PFID(old_id), PFID(new_id));\n\n    /* push archive_id, oldfid, newfid into command params */\n    snprintf(tmp, sizeof(tmp), \"%u\", archive_id);\n    rbh_param_set(&cmd_params, ARCHIVE_PARAM, tmp, true);\n    snprintf(tmp, sizeof(tmp), DFID_NOBRACE, PFID(old_id));\n    rbh_param_set(&cmd_params, \"oldfid\", tmp, true);\n    snprintf(tmp, sizeof(tmp), DFID_NOBRACE, PFID(new_id));\n    rbh_param_set(&cmd_params, \"newfid\", tmp, true);\n\n    rc = subst_shell_params(config.rebind_cmd, descr, new_id, new_attrs,\n                            &cmd_params, NULL, smi, true, &cmd);\n    rbh_params_free(&cmd_params);\n\n    if (rc) {\n        log_cmd = concat_cmd(config.rebind_cmd);\n        DisplayLog(LVL_MAJOR, LHSM_TAG, \"Invalid rebind command: %s\", log_cmd);\n        free(log_cmd);\n        return rc;\n    }\n\n    if (log_config.debug_level >= LVL_EVENT) {\n        log_cmd = concat_cmd(cmd);\n        DisplayLog(LVL_EVENT, LHSM_TAG, \"Executing rebind command: %s\",\n                   log_cmd);\n        free(log_cmd);\n    }\n\n    rc = execute_shell_command(cmd, cb_stderr_to_log, (void *)LVL_DEBUG);\n    g_strfreev(cmd);\n\n    return rc;\n}\n\n/**\n * Undelete function for Lustre/HSM.\n * Creates file in 'released' state, using the given attributes.\n * Then call directly an external command to rebind the old archived\n * entry with the new fid. As long as lustre can't transmit rebind\n * commands to copytools, robinhood directly calls a admin-defined\n * command to do this.\n */\nstatic recov_status_t lhsm_undelete(struct sm_instance *smi,\n                                    const entry_id_t *p_old_id,\n                                    const attr_set_t *p_attrs_old_in,\n                                    entry_id_t *p_new_id,\n                                    attr_set_t *p_attrs_new,\n                                    bool already_recovered)\n{\n    struct stat entry_stat = { 0 };\n    unsigned int *tmp;\n    unsigned int idx;\n    const sm_info_def_t *def;\n    unsigned int archive_id = DEFAULT_ARCHIVE_ID;\n    int rc;\n    const char *path;\n    char *uuid = NULL;\n\n    /* Lustre/HSM only archive files */\n    if (ATTR_MASK_TEST(p_attrs_old_in, type) &&\n        strcmp(ATTR(p_attrs_old_in, type), STR_TYPE_FILE) != 0) {\n        return RS_NOBACKUP;\n    }\n\n    /* convert attrs from DB to a struct stat */\n    rbh_attrs2stat(p_attrs_old_in, &entry_stat);\n\n    if (!ATTR_MASK_TEST(p_attrs_old_in, fullpath)) {\n        DisplayLog(LVL_MAJOR, LHSM_TAG, \"Missing mandatory parameter \"\n                   \"'fullpath' to import the file.\");\n        /** TODO create as <root>/.undelete/old_<fid> */\n        return RS_ERROR;\n    } else {\n        path = ATTR(p_attrs_old_in, fullpath);\n    }\n\n    rc = sm_attr_get(smi, p_attrs_old_in, \"lhsm.archive_id\", (void **)&tmp,\n                     &def, &idx);\n    if (rc == 0) {\n        /* sanity check of returned type */\n        if (def->db_type != DB_UINT)\n            DisplayLog(LVL_CRIT, LHSM_TAG,\n                       \"Unexpected type for 'lhsm.archive_id': %d\",\n                       def->db_type);\n        else\n            archive_id = *tmp;\n    }\n\n    rc = sm_attr_get(smi, p_attrs_old_in, \"lhsm.uuid\", (void **)&uuid,\n                     &def, &idx);\n    if (rc == 0) {\n        /* sanity check of returned type */\n        if (def->db_type != DB_TEXT) {\n            DisplayLog(LVL_CRIT, LHSM_TAG,\n                       \"Unexpected type for 'lhsm.uuid': %d\", def->db_type);\n            uuid = NULL;\n        }\n    }\n\n    if (!already_recovered) {\n        /* create parent directory if it does not already exist */\n        rc = create_parent_of(path, NULL);\n        if (rc != 0 && rc != -EEXIST) {\n            DisplayLog(LVL_CRIT, LHSM_TAG, \"Failed to create parent directory for \"\n                       \"file '%s': %s\", path, strerror(-rc));\n            return RS_ERROR;\n        }\n\n        /* create the file in 'released' state */\n        rc = llapi_hsm_import(path, archive_id, &entry_stat, 0, -1, 0, 0, NULL,\n                              p_new_id);\n        if (rc) {\n            DisplayLog(LVL_CRIT, LHSM_TAG, \"Failed to import file '%s': %s\", path,\n                       strerror(-rc));\n            return RS_ERROR;\n        }\n    }\n\n    if (cfg_has_uuid(&config) && !uuid)\n        DisplayLog(LVL_CRIT, LHSM_TAG, \"WARNING: restoring entry '%s' \"\n                   \"without UUID. Will try rebind instead.\", path);\n\n    /* Set the UUID back */\n    if (uuid) {\n        DisplayLog(LVL_DEBUG, LHSM_TAG, \"Setting xattr %s='%s' on '%s'\",\n                   config.uuid_xattr, uuid, path);\n        rc = lsetxattr(path, config.uuid_xattr, uuid, UUID_XATTR_STRLEN, 0);\n        rc = rc ? errno : 0;\n        if (rc) {\n            DisplayLog(LVL_CRIT, LHSM_TAG,\n                       \"Failed to set UUID for file '%s': %s\",\n                       path, strerror(rc));\n            return RS_ERROR;\n        }\n    }\n\n    /* get the new entry attributes */\n    if (lstat(path, &entry_stat)) {\n        DisplayLog(LVL_CRIT, LHSM_TAG, \"Failed to stat imported file '%s': %s\",\n                   path, strerror(errno));\n        return RS_ERROR;\n    }\n    stat2rbh_attrs(&entry_stat, p_attrs_new, true);\n\n    if (!cfg_has_uuid(&config) || !uuid) {\n        rc = lhsm_rebind(p_old_id, p_new_id, p_attrs_new, smi, archive_id);\n        if (rc) {\n            DisplayLog(LVL_CRIT, LHSM_TAG,\n                       \"Failed to rebind entry in backend: %s\",\n                       rc < 0 ? strerror(-rc) : \"command failed\");\n            return RS_ERROR;\n        }\n    }\n\n    return RS_FILE_OK;\n}\n\n#define DEFAULT_REBIND_CMD \"lhsmtool_posix --archive={archive_id} \" \\\n                                \"--rebind {oldfid} {newfid} {fsroot}\"\n\nstatic void lhsm_cfg_set_default(void *module_config)\n{\n    lhsm_config_t *conf = module_config;\n    GError *err_desc;\n    int ac;\n\n    if (!g_shell_parse_argv(DEFAULT_REBIND_CMD, &ac, &conf->rebind_cmd,\n                            &err_desc)) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"Failed to parse default rebind_cmd '%s': %s\",\n                   DEFAULT_REBIND_CMD, err_desc->message);\n        g_error_free(err_desc);\n        conf->rebind_cmd = NULL;\n        abort();\n    }\n    if (ac == 0) {\n        g_strfreev(conf->rebind_cmd);\n        conf->rebind_cmd = NULL;\n    }\n\n    conf->uuid_xattr[0] = 0;\n    conf->strict_uuid = true;\n}\n\n#define UUID_CONFIG_BLOCK \"uuid\"\n\nstatic void lhsm_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, LHSM_BLOCK, NULL);\n    print_line(output, 1, \"rebind_cmd: \" DEFAULT_REBIND_CMD);\n    print_begin_block(output, 1, UUID_CONFIG_BLOCK, NULL);\n    print_line(output, 2, \"xattr = \\\"\\\" (disabled)\");\n    print_line(output, 2, \"strict_uuid = yes\");\n    print_end_block(output, 1);\n\n    print_end_block(output, 0);\n}\n\nstatic int lhsm_cfg_read(config_file_t config, void *module_config,\n                         char *msg_out)\n{\n    int rc;\n    lhsm_config_t *conf = module_config;\n    config_item_t block;\n    config_item_t uuid_block;\n    bool unique = true;\n\n    const cfg_param_t hsm_params[] = {\n        /* rebind_cmd can contain wildcards: {fsroot} {oldfid} {newfid}... */\n        {\"rebind_cmd\", PT_CMD, 0, &conf->rebind_cmd, 0},\n        END_OF_PARAMS\n    };\n\n    const cfg_param_t uuid_params[] = {\n        {\"xattr\", PT_STRING, 0, conf->uuid_xattr, sizeof(conf->uuid_xattr)},\n        {\"strict_uuid\", PT_BOOL, 0, &conf->strict_uuid, 0},\n        END_OF_PARAMS\n    };\n\n    static const char *uuid_allowed[] = {\n        \"xattr\", \"strict_uuid\", NULL\n    };\n\n    static const char *allowed_params[] = {\n        \"rebind_cmd\", \"uuid\", NULL\n    };\n\n    /* get lhsm_config block */\n    rc = get_cfg_block(config, LHSM_BLOCK, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(block, LHSM_BLOCK, hsm_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(block, LHSM_BLOCK, allowed_params);\n\n    /* Read uuid block if present */\n    uuid_block =\n        rh_config_FindItemByName(config, LHSM_BLOCK \"::\" UUID_CONFIG_BLOCK,\n                                 &unique);\n    if (uuid_block) {\n        rc = get_cfg_block(config, LHSM_BLOCK \"::\" UUID_CONFIG_BLOCK,\n                           &uuid_block, msg_out);\n        if (rc)\n            return rc;\n\n        rc = read_scalar_params(uuid_block, UUID_CONFIG_BLOCK, uuid_params,\n                                msg_out);\n        if (rc)\n            return rc;\n\n        CheckUnknownParameters(uuid_block, UUID_CONFIG_BLOCK, uuid_allowed);\n    }\n\n    return 0;\n}\n\nstatic void lhsm_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, LHSM_BLOCK, NULL);\n    print_line(output, 1, \"# command to rebind an entry in the backend\");\n    print_line(output, 1, \"rebind_cmd = \\\"lhsmtool_posix \"\n               \"--archive={archive_id} --hsm_root=/tmp/backend \"\n               \"--rebind {oldfid} {newfid} {fsroot}\\\";\");\n    print_begin_block(output, 1, UUID_CONFIG_BLOCK, NULL);\n    print_line(output, 2, \"xattr = \\\"trusted.lhsm.uuid\\\";\");\n    print_line(output, 2, \"# enforce UUID-length of 36 bytes\");\n    print_line(output, 2, \"strict_uuid = yes;\");\n    print_end_block(output, 1);\n    print_end_block(output, 0);\n}\n\nstatic void *lhsm_cfg_new(void)\n{\n    return calloc(1, sizeof(lhsm_config_t));\n}\n\nstatic void lhsm_cfg_free(void *cfg)\n{\n    if (cfg != NULL)\n        free(cfg);\n}\n\nstatic int lhsm_cfg_set(void *cfg, bool reload)\n{\n    lhsm_config_t *new = cfg;\n\n    if (!reload) {\n        config = *new;\n        return 0;\n    }\n\n    if (compare_cmd(new->rebind_cmd, config.rebind_cmd)) {\n        DisplayLog(LVL_MAJOR, LHSM_TAG,\n                   LHSM_BLOCK \"::rebind_cmd changed in config file \"\n                   \"but cannot be changed dynamically\");\n    }\n\n    return 0;\n}\n\nstatic const mod_cfg_funcs_t lhsm_cfg_hdlr = {\n    .module_name = \"lhsm\",\n    .new = lhsm_cfg_new,\n    .free = lhsm_cfg_free,\n    .set_default = lhsm_cfg_set_default,\n    .read = lhsm_cfg_read,\n    .set_config = lhsm_cfg_set,\n    .write_default = lhsm_cfg_write_default,\n    .write_template = lhsm_cfg_write_template,\n};\n\n/** Status manager for Lustre/HSM */\nstatic status_manager_t lhsm_sm = {\n    .name = \"lhsm\",\n    .flags = SM_SHARED | SM_DELETED | SM_MULTI_ACTION,\n    .status_enum = lhsm_status_list,\n    .status_count = STATUS_COUNT,\n    .nb_info = G_N_ELEMENTS(lhsm_info),\n    .info_types = lhsm_info,\n\n    /* This policy needs the previous status to process changelog callbacks.\n     * As we don't know the actual index of the status manager instance (smi)\n     * we set it to SMI_MASK(0). It is translated later by accessors to\n     * its actual index.  */\n    .status_needs_attrs_cached = {.std = ATTR_MASK_type, .status = SMI_MASK(0)},\n    .status_needs_attrs_fresh = {0},\n\n    .get_status_func = lhsm_status,\n    .changelog_cb = lhsm_cl_cb,\n\n    .check_action_name = lhsm_check_action_name,\n    .action_cb = lhsm_action_callback,\n\n    /* fields for managing deleted entries */\n    .softrm_filter_mask = {.std = ATTR_MASK_type, .status = SMI_MASK(0)},\n    .softrm_filter_func = lhsm_softrm_filter,\n\n    /** needed attributes for undelete in addition to POSIX and fullpath:\n     * - lhsm_status: to know the original status of the 'undeleted' entry.\n     * - archive_id: to know what archive the hsm_remove order must be sent to.\n     * - uuid\n     */\n    .softrm_table_mask = {.status = SMI_MASK(0),\n                          .sm_info = GENERIC_INFO_BIT(ATTR_ARCHIVE_ID) |\n                          GENERIC_INFO_BIT(ATTR_UUID)\n                          },\n    .undelete_func = lhsm_undelete,\n\n    /* XXX about full disaster recovery: must recreate all metadata\n     * (incl. symlinks => need link field)\n     * not only the entries managed by the policy.\n     * This was used to be done using the contents of ENTRIES table.\n     */\n\n    .cfg_funcs = &lhsm_cfg_hdlr,\n    .init_func = lhsm_init\n};\n\nconst char *mod_get_name(void)\n{\n    return lhsm_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &lhsm_sm;\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n    if (strcmp(action_name, \"lhsm.archive\") == 0)\n        return lhsm_archive;\n    else if (strcmp(action_name, \"lhsm.release\") == 0)\n        return lhsm_release;\n    else if (strcmp(action_name, \"lhsm.hsm_remove\") == 0\n             || strcmp(action_name, \"lhsm.remove\") == 0)\n        return lhsm_remove;\n    else\n        return NULL;\n}\n"
  },
  {
    "path": "src/modules/mod_internal.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_modules.h\"\n#include \"mod_internal.h\"\n#include \"policy_rules.h\"\n#include \"status_manager.h\"\n#include \"Memory.h\"\n#include <unistd.h>\n#include <utime.h>\n#include <fcntl.h>\n#include <sys/sendfile.h>\n#include <zlib.h>\n\nstruct copy_params_t {\n    const char *name;\n    copy_flags_e flag;\n} copy_params[] = {\n    {\"compress\", CP_COMPRESS}, /* compress target */\n    {\"nosync\",   CP_NO_SYNC},  /* don't sync when the copy ends */\n    {\"copyback\", CP_COPYBACK}, /* revert copy way: tgt->src */\n    {\"mkdir\",    CP_MKDIR},    /* create parent directories */\n    {NULL, 0}\n};\n\n/** helper to set file attributes from a struct stat */\nstatic int file_clone_attrs(const char *tgt, const struct stat *st)\n{\n    struct utimbuf tbuf;\n\n    if (lchown(tgt, st->st_uid, st->st_gid))\n        return -errno;\n    if (chmod(tgt, st->st_mode & 07777))\n        return -errno;\n\n    tbuf.actime = st->st_atime;\n    tbuf.modtime = st->st_mtime;\n\n    if (utime(tgt, &tbuf))\n        return -errno;\n\n    return 0;\n}\n\ncopy_flags_e cp_params2flags(const action_params_t *params)\n{\n    copy_flags_e flg = 0;\n    struct copy_params_t *curr_param;\n\n    if (params == NULL)\n        return 0;\n\n    for (curr_param = copy_params; curr_param->name != NULL; curr_param++) {\n        const char *val = rbh_param_get(params, curr_param->name);\n        if ((val != NULL) && (str2bool(val) == 1))\n            flg |= curr_param->flag;\n    }\n\n    return flg;\n}\n\nstruct copy_info {\n    const char *src;\n    const char *dst;\n    int src_fd;\n    int dst_fd;\n    struct stat src_st;\n};\n\nstatic int flush_data(int srcfd, int dstfd, copy_flags_e flags)\n{\n    posix_fadvise(srcfd, 0, 0, POSIX_FADV_DONTNEED);\n    if (!(flags & CP_NO_SYNC)) {\n        if (fdatasync(dstfd) < 0)\n            return -errno;\n    }\n    posix_fadvise(dstfd, 0, 0, POSIX_FADV_DONTNEED);\n    return 0;\n}\n\nstatic inline bool compress_src(copy_flags_e flags)\n{\n    return (flags & CP_COMPRESS) && (flags & CP_COPYBACK);\n}\n\nstatic inline bool uncompress_src(copy_flags_e flags)\n{\n    return (flags & CP_COMPRESS) && !(flags & CP_COPYBACK);\n}\n\nstatic int builtin_copy_standard(const struct copy_info *cp_nfo,\n                                 copy_flags_e flags)\n{\n    int srcfd, dstfd;\n    struct stat dst_st;\n    int rc = 0;\n    size_t io_size;\n    ssize_t r = 0, w = 0;\n    char *io_buff = NULL;\n    gzFile gz = NULL;\n    int gzerr, err_close = 0;\n\n    if (compress_src(flags)) {\n        srcfd = dup(cp_nfo->src_fd);\n        dstfd = cp_nfo->dst_fd;\n\n        gz = gzdopen(srcfd, \"rb\");\n        if (gz == NULL) {\n            DisplayLog(LVL_MAJOR, CP_TAG,\n                       \"Failed to initialize decompression stream\");\n            close(srcfd);\n            return -EIO;\n        }\n    } else if (uncompress_src(flags)) {\n        srcfd = cp_nfo->src_fd;\n        dstfd = dup(cp_nfo->dst_fd);\n\n        gz = gzdopen(dstfd, \"wb\");\n        if (gz == NULL) {\n            DisplayLog(LVL_MAJOR, CP_TAG,\n                       \"Failed to initialize decompression stream\");\n            close(dstfd);\n            return -EIO;\n        }\n    } else {\n        /* Uncompressed regular copy */\n        srcfd = cp_nfo->src_fd;\n        dstfd = cp_nfo->dst_fd;\n    }\n\n    /* needed to get the biggest IO size of source and destination. */\n    if (fstat(dstfd, &dst_st)) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Failed to stat %s: %s\",\n                   cp_nfo->dst, strerror(-rc));\n        goto out_close;\n    }\n\n    io_size = MAX2(cp_nfo->src_st.st_blksize, dst_st.st_blksize);\n    DisplayLog(LVL_DEBUG, CP_TAG, \"using IO size = %\" PRI_SZ, io_size);\n\n    io_buff = MemAlloc(io_size);\n    if (!io_buff) {\n        rc = -ENOMEM;\n        goto out_close;\n    }\n\n    /* Do the copy */\n    do {\n        if (compress_src(flags))\n            r = gzread(gz, io_buff, io_size);\n        else\n            r = read(srcfd, io_buff, io_size);\n\n        if (r <= 0)\n            break;\n\n        if (uncompress_src(flags))\n            w = gzwrite(gz, io_buff, r);\n        else\n            w = write(dstfd, io_buff, r);\n\n        if (w < 0) {\n            rc = -errno;\n            DisplayLog(LVL_MAJOR, CP_TAG, \"Copy error (%s -> %s): %s\",\n                       cp_nfo->src, cp_nfo->dst, strerror(-rc));\n            goto out_free;\n        } else if (w < r) {\n            DisplayLog(LVL_MAJOR, CP_TAG, \"Short write on %s, aborting copy\",\n                       cp_nfo->dst);\n            rc = -EAGAIN;\n            goto out_free;\n        }\n    } while (r > 0);\n\n    if (r < 0) {    /* error */\n        rc = -errno;\n        goto out_free;\n    }\n    /* else (r == 0): EOF */\n\n    /* need to flush the compression buffer before system sync */\n    if (compress_src(flags)) {\n        if (gzflush(gz, Z_FINISH) != Z_OK) {\n            DisplayLog(LVL_MAJOR, CP_TAG, \"compression error for %s: %s\",\n                       cp_nfo->dst, gzerror(gz, &gzerr));\n            rc = -EIO;\n            goto out_free;\n        }\n    }\n\n    /* Free the kernel buffer cache as we don't expect to read the files again.\n     * This can be done immediately for the read file.\n     * For the written file, we need to flush it to disk to ensure\n     * that it is correctly archived and to allow freeing the buffer cache. */\n    rc = flush_data(srcfd, dstfd, flags);\n    if (rc)\n        goto out_free;\n\n out_free:\n    MemFree(io_buff);\n\n out_close:\n    if (flags & CP_COMPRESS)\n        err_close = (gzclose(gz) != Z_OK);\n\n    if (err_close && rc == 0) {\n        rc = errno ? -errno : -EIO;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"close failed on %s: %s\",\n                   cp_nfo->src, \"error closing compression stream\");\n    }\n\n    return rc;\n}\n\nstatic int builtin_copy_sendfile(const struct copy_info *cp_nfo,\n                                 copy_flags_e flags)\n{\n    int rc;\n    int srcfd = cp_nfo->src_fd;\n    int dstfd = cp_nfo->dst_fd;\n    size_t fsize = cp_nfo->src_st.st_size;\n\n#if HAVE_FALLOCATE\n    rc = fallocate(dstfd, 0, 0, fsize);\n    if (rc) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Failed to fallocate %s: %s\",\n                   cp_nfo->dst, strerror(-rc));\n        goto out;\n    }\n#endif\n\n    rc = sendfile(dstfd, srcfd, NULL, fsize);\n    if (rc) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Failed to sendfile(%s->%s): %s\",\n                   cp_nfo->src, cp_nfo->dst, strerror(-rc));\n        goto out;\n    }\n\n    rc = flush_data(srcfd, dstfd, flags);\n    if (rc)\n        goto out;\n\n out:\n    return rc;\n}\n\nint builtin_copy(const char *src, const char *dst, int dst_oflags,\n                 bool save_attrs, copy_flags_e flags)\n{\n    struct copy_info cp_nfo;\n    int rc, err_close = 0;\n\n    cp_nfo.src = src;\n    cp_nfo.dst = dst;\n\n    DisplayLog(LVL_DEBUG, \"Mod\",\n               \"builtin_copy('%s', '%s', oflg=%#x, save_attrs=%d, flags=%#x)\",\n               src, dst, dst_oflags, save_attrs, flags);\n\n    cp_nfo.src_fd = open(src, O_RDONLY | O_NOATIME);\n    if (cp_nfo.src_fd < 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Can't open %s for read: %s\", src,\n                   strerror(-rc));\n        return rc;\n    }\n\n    if (fstat(cp_nfo.src_fd, &cp_nfo.src_st)) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Failed to stat %s: %s\", src,\n                   strerror(-rc));\n        goto close_src;\n    }\n\n    if (flags & CP_MKDIR) {\n        rc = create_parent_of(dst, NULL);\n        if (rc != 0 && rc != -EEXIST)\n           goto close_src;\n    }\n\n    cp_nfo.dst_fd = open(dst, dst_oflags, cp_nfo.src_st.st_mode & 07777);\n    if (cp_nfo.dst_fd < 0) {\n        rc = -errno;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"Can't open %s for write: %s\",\n                   dst, strerror(-rc));\n        goto close_src;\n    }\n\n    if (flags & CP_COMPRESS)\n        rc = builtin_copy_standard(&cp_nfo, flags);\n    else if (flags & CP_USE_SENDFILE)\n        rc = builtin_copy_sendfile(&cp_nfo, flags);\n    else\n        rc = builtin_copy_standard(&cp_nfo, flags);\n\n    err_close = close(cp_nfo.dst_fd);\n    if (err_close && (rc == 0)) {\n        rc = errno ? -errno : -EIO;\n        DisplayLog(LVL_MAJOR, CP_TAG, \"close failed on %s: %s\",\n                   dst, strerror(-rc));\n    }\n\n close_src:\n    close(cp_nfo.src_fd);\n\n    if (rc == 0 && save_attrs)\n        rc = file_clone_attrs(dst, &cp_nfo.src_st);\n\n    return rc;\n}\n\n/** collect the first line of output */\nstatic int cb_collect_stdout(void *arg, char *line, size_t size, int stream)\n{\n    GString *out = (GString *) arg;\n    int len;\n\n    if (line == NULL || out == NULL)\n        return -EINVAL;\n\n    len = strnlen(line, size);\n    /* terminate the string */\n    if (len >= size)\n        line[len - 1] = '\\0';\n\n    /* remove '\\n' */\n    if ((len > 0) && (line[len - 1] == '\\n'))\n        line[len - 1] = '\\0';\n\n    switch (stream) {\n    case STDOUT_FILENO:\n        if (out->len == 0)\n            g_string_append(out, line);\n        break;\n    case STDERR_FILENO:\n        DisplayLog(LVL_EVENT, \"cmd_stderr\", \"%s\", line);\n        break;\n    }\n\n    return 0;\n}\n\n/**\n * Run a shell command to perform an action.\n * @param [in,out] out  Initialized GString to collect command stdout\n *                      (NULL for no output).\n */\nstatic int run_command(const char *name, char **cmd_in,\n                       const entry_id_t *p_id,\n                       const attr_set_t *p_attrs,\n                       const action_params_t *params,\n                       struct sm_instance *smi, GString *out)\n{\n    gchar **cmd;\n    char *log_cmd;\n    int rc = 0;\n\n    /** @TODO set additional params */\n    rc = subst_shell_params(cmd_in, \"command\", p_id, p_attrs, params, NULL,\n                            smi, true, &cmd);\n    if (rc)\n        return rc;\n\n    /* call custom purge command instead of unlink() */\n    if (log_config.debug_level >= LVL_DEBUG) {\n        log_cmd = concat_cmd(cmd);\n        DisplayLog(LVL_DEBUG, __func__, DFID \": %s action: cmd(%s)\",\n                   PFID(p_id), name, log_cmd);\n        free(log_cmd);\n    }\n\n    if (out == NULL)\n        /* do not collect output, just redirect command stderr to the log */\n        rc = execute_shell_command(cmd, cb_stderr_to_log, (void *)LVL_DEBUG);\n    else\n        /* collect stdout in out Gstring */\n        rc = execute_shell_command(cmd, cb_collect_stdout, (void *)out);\n\n    g_strfreev(cmd);\n\n    return rc;\n}\n\nint action_helper(const policy_action_t *action, const char *name,\n                  const entry_id_t *p_id, attr_set_t *p_attrs,\n                  const action_params_t *params, struct sm_instance *smi,\n                  GString *out, post_action_e *after,\n                  db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    int rc;\n\n    switch (action->type) {\n    case ACTION_COMMAND:\n        rc = run_command(name, action->action_u.command, p_id, p_attrs,\n                         params, smi, out);\n        break;\n\n    case ACTION_FUNCTION:\n        DisplayLog(LVL_DEBUG, __func__, DFID \": %s action: %s\", PFID(p_id),\n                   name, action->action_u.func.name);\n        rc = action->action_u.func.call(p_id, p_attrs, params, after,\n                                        db_cb_fn, db_cb_arg);\n        break;\n\n    case ACTION_NONE:\n        DisplayLog(LVL_DEBUG, __func__, \"%s(\" DFID \"): noop\", name, PFID(p_id));\n        rc = 0;\n        break;\n\n    case ACTION_UNSET:\n        DisplayLog(LVL_EVENT, __func__, \"%s(\" DFID \"): no action specified\",\n                   name, PFID(p_id));\n        rc = 0;\n        break;\n\n    default:\n        RBH_BUG(\"action->type is invalid\");\n    }\n    return rc;\n}\n\n/**\n * This symbol is embedded in all modules. Thus, it returns the version\n * of the modules depending on the source tree where they have been\n * built.\n */\nint mod_get_version(void)\n{\n    return RBH_MODULE_VERSION;\n}\n"
  },
  {
    "path": "src/modules/mod_internal.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2010-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifndef UTILITY_H\n#define UTILITY_H\n\n#include <stdlib.h>\n#include <stdbool.h>\n#include \"rbh_modules.h\"\n\n/* log tag for built-in copy */\n#define CP_TAG \"cp\"\n\n#define TARGET_PATH_PARAM \"targetpath\"\n\ntypedef enum {\n    CP_COMPRESS     = (1 << 0),\n    CP_USE_SENDFILE = (1 << 1),\n    CP_NO_SYNC      = (1 << 2),\n    CP_COPYBACK     = (1 << 3), /* retrieve a copy */\n    CP_MKDIR        = (1 << 4),\n} copy_flags_e;\n\n/** These functions are shared by several modules (namely common & backup). */\nint builtin_copy(const char *src, const char *dst, int dst_oflags,\n                 bool save_attrs, copy_flags_e flags);\n\n/** set copy flags from a parameter set */\ncopy_flags_e cp_params2flags(const action_params_t *params);\n\n/** helper to set the entry status for the given SMI */\nstatic inline int set_status_attr(const sm_instance_t *smi,\n                                  attr_set_t *pattrs, const char *str_st)\n{\n    int rc;\n\n    if (str_st == NULL) {\n        rc = -EINVAL;\n        goto clean_status;\n    }\n\n    /* check allocation of sm_status array */\n    sm_status_ensure_alloc(&pattrs->attr_values.sm_status);\n    if (pattrs->attr_values.sm_status == NULL) {\n        rc = -ENOMEM;\n        goto clean_status;\n    }\n\n    STATUS_ATTR(pattrs, smi->smi_index) = str_st;\n    ATTR_MASK_STATUS_SET(pattrs, smi->smi_index);\n\n    return 0;\n\n clean_status:\n    if (pattrs->attr_values.sm_status != NULL)\n        /* don't free it as it contains a const char* */\n        STATUS_ATTR(pattrs, smi->smi_index) = NULL;\n\n    /* Clean the status from the mask */\n    ATTR_MASK_STATUS_UNSET(pattrs, smi->smi_index);\n\n    return rc;\n}\n\n/** helper to set bool attr */\nstatic inline int set_bool_info(const sm_instance_t *smi, attr_set_t *pattrs,\n                                unsigned int attr_index, bool val)\n{\n    bool *info;\n    int rc;\n\n    info = malloc(sizeof(bool));\n    if (info == NULL)\n        return -ENOMEM;\n\n    *info = val;\n\n    rc = set_sm_info(smi, pattrs, attr_index, info);\n    if (rc)\n        free(info);\n\n    return rc;\n}\n\n/** helper to set uint attr */\nstatic inline int set_uint_info(const sm_instance_t *smi, attr_set_t *pattrs,\n                                unsigned int attr_index, unsigned int val)\n{\n    unsigned int *info;\n    int rc;\n\n    info = malloc(sizeof(unsigned int));\n    if (info == NULL)\n        return -ENOMEM;\n\n    *info = val;\n\n    rc = set_sm_info(smi, pattrs, attr_index, info);\n    if (rc)\n        free(info);\n\n    return rc;\n}\n\n/** Helper to run a configurable action. */\nint action_helper(const policy_action_t *action, const char *name,\n                  const entry_id_t *p_id, attr_set_t *p_attrs,\n                  const action_params_t *params, struct sm_instance *smi,\n                  GString *out, post_action_e *after,\n                  db_cb_func_t db_cb_fn, void *db_cb_arg);\n\n/* ---- Public module interface ---- */\n\nconst char *mod_get_name(void);\n\nint mod_get_version(void);\n\nstatus_manager_t *mod_get_status_manager(void);\n\naction_func_t mod_get_action(const char *action_name);\n\naction_scheduler_t *mod_get_scheduler(const char *sched_name);\n#endif\n"
  },
  {
    "path": "src/modules/modeguard.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2017 Board of Trustees, Leland Stanford Jr. University\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file modeguard.c\n * \\brief modeguard to enforce some file/directory access mode\n *\n * The purpose of this module is to enforce useful permission bits,\n * like directory setgid bit on project folders.\n *\n * It maintains a status (ok/invalid).\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"status_manager.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"mod_internal.h\"\n\n#define TAG \"modeguard\"\n\n/* config block name */\n#define MODEGUARD_BLOCK \"modeguard_config\"\n#define SET_MASK_PARAM \"set_mask\"\n#define CLEAR_MASK_PARAM \"clear_mask\"\n\n/* config block default values */\n#define DEFAULT_SET_MASK 02000\n#define DEFAULT_CLEAR_MASK 0\n\ntypedef struct modeguard_config_t {\n    mode_t set_mask;\n    mode_t clear_mask;\n} modeguard_config_t;\n\n/**\n * modeguard config is global for now\n * @TODO config per SM instance (policy)\n */\nstatic modeguard_config_t config;\n\n/** set of managed status */\ntypedef enum {\n    STATUS_OK = 0,      /* checked and access mode is OK */\n    STATUS_INVALID = 1, /* checked and access mode is invalid */\n\n    STATUS_COUNT = 2,   /* number of possible statuses */\n} modeguard_status_t;\n\nstatic const char *modeguard_status_list[] = {\n    [STATUS_OK] = \"ok\",\n    [STATUS_INVALID] = \"invalid\",\n};\n\nstatic const char *modeguard_status2str(modeguard_status_t st)\n{\n    switch (st) {\n    case STATUS_OK:\n    case STATUS_INVALID:\n        return modeguard_status_list[st];\n    default:\n        return NULL;\n    }\n}\n\n/** helper to set the entry status for the given SMI */\nstatic int set_modeguard_status(sm_instance_t *smi, attr_set_t *pattrs,\n                                modeguard_status_t st)\n{\n    return set_status_attr(smi, pattrs, modeguard_status2str(st));\n}\n\n/**\n * Get the status for an entry.\n * \\param[in] p_id pointer to entry id\n * \\param[in] attrs_in pointer to entry attributes\n * \\param[out] p_attrs_changed changed/retrieved attributes\n */\nstatic int modeguard_status(struct sm_instance *smi,\n                         const entry_id_t *p_id, const attr_set_t *p_attrs_in,\n                         attr_set_t *p_attrs_changed)\n{\n    mode_t mode;\n    int status;\n\n    /* check if mode is provided (mandatory) */\n    if (!ATTR_MASK_TEST(p_attrs_in, mode)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   DFID \": missing 'mode' attr for checking entry status\",\n                   PFID(p_id));\n        return -EINVAL;\n    }\n\n    mode = ATTR(p_attrs_in, mode);\n\n    DisplayLog(LVL_DEBUG, TAG, \"status path=%s mode=%o\",\n               ATTR(p_attrs_in, fullpath), mode);\n\n    if (((mode & config.set_mask) == config.set_mask) &&\n        ((mode & config.clear_mask) == 0))\n        status = STATUS_OK;\n    else\n        status = STATUS_INVALID;\n\n    return set_modeguard_status(smi, p_attrs_changed, status);\n}\n\n/** the only action supported by modeguard to restore permissions */\nstatic int modeguard_enforce_mode(const entry_id_t *p_entry_id,\n                                  attr_set_t *p_attrs,\n                                  const action_params_t *params,\n                                  post_action_e *after,\n                                  db_cb_func_t db_cb_fn, void *db_cb_arg)\n{\n    mode_t mode;\n    mode_t fixed_mode;\n    const char *path;\n\n    *after = PA_NONE;\n\n    /* check if mode is provided (mandatory) */\n    if (!ATTR_MASK_TEST(p_attrs, mode)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   DFID \": missing 'mode' attr for checking entry status\",\n                   PFID(p_entry_id));\n        return -EINVAL;\n    }\n    /* check if fullpath is provided (mandatory) */\n    if (!ATTR_MASK_TEST(p_attrs, fullpath)) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   DFID \": missing 'fullpath' attr for checking entry status\",\n                   PFID(p_entry_id));\n        return -EINVAL;\n    }\n\n    mode = ATTR(p_attrs, mode); /* in robinhood we trust */\n    path = ATTR(p_attrs, fullpath);\n\n    fixed_mode = (mode | config.set_mask) & ~config.clear_mask;\n    if (mode != fixed_mode) {\n        DisplayLog(LVL_EVENT, TAG,\n                   \"Fixing invalid mode %04o to %04o for '%s'\", mode,\n                   fixed_mode, path);\n        if (chmod(path, fixed_mode) != 0 && errno != ENOENT) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"chmod %04o failed on '%s' with error %s\", fixed_mode,\n                       path, strerror(errno));\n            return -errno;\n        }\n    }\n    return 0;\n}\n\n#ifdef HAVE_CHANGELOGS\n/** changelog callback */\nstatic int modeguard_cl_cb(struct sm_instance *smi, const CL_REC_TYPE *logrec,\n                           const entry_id_t *id, const attr_set_t *attrs,\n                           attr_set_t *refreshed_attrs, bool *getit,\n                           proc_action_e *rec_action)\n{\n    /* we are only interested in object creation or setattr (chmod) events */\n    if (logrec->cr_type == CL_MKDIR || logrec->cr_type == CL_CREATE ||\n        logrec->cr_type == CL_SETATTR) {\n        *getit = true; /* update status */\n    }\n    return 0;\n}\n#endif /* HAVE_CHANGELOGS */\n\n/** modeguard_config */\n\nstatic int parse_param_octal(const char *str, const char *param_name,\n                             mode_t *val_out)\n{\n    char *tmp;\n\n    /* interpret octal notations (base 8) */\n    errno = 0;\n    *val_out = strtol(str, &tmp, 8);\n    if (tmp == str || *tmp != '\\0' || errno == ERANGE) {\n        DisplayLog(LVL_MAJOR, MODEGUARD_BLOCK, \"Error: invalid value for '%s'\",\n                   param_name);\n        return -EINVAL;\n    }\n    return 0;\n}\n\nstatic void modeguard_cfg_write_template(FILE *output)\n{\n    print_begin_block(output, 0, MODEGUARD_BLOCK, NULL);\n    print_line(output, 1, \"# enforced mode bits on directories\");\n    print_line(output, 1, \"set_mask = \\\"%04o\\\"\", DEFAULT_SET_MASK);\n    print_line(output, 1, \"clear_mask = \\\"%04o\\\"\", DEFAULT_CLEAR_MASK);\n    print_end_block(output, 0);\n}\n\nstatic void *modeguard_cfg_new(void)\n{\n    return calloc(1, sizeof(modeguard_config_t));\n}\n\nstatic void modeguard_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\nstatic void modeguard_cfg_set_default(void *module_config)\n{\n    modeguard_config_t *conf = module_config;\n\n    conf->set_mask = DEFAULT_SET_MASK;\n    conf->clear_mask = DEFAULT_CLEAR_MASK;\n}\n\nstatic int modeguard_cfg_set(void *cfg, bool reload)\n{\n    modeguard_config_t *new = cfg;\n\n    config = *new;\n    return 0;\n}\n\nstatic void modeguard_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, MODEGUARD_BLOCK, NULL);\n    print_line(output, 1, \"set_mask: \\\"%04o\\\"\", DEFAULT_SET_MASK);\n    print_line(output, 1, \"clear_mask: \\\"%04o\\\"\", DEFAULT_CLEAR_MASK);\n    print_end_block(output, 0);\n}\n\nstatic int modeguard_cfg_read(config_file_t config, void *module_config,\n                              char *msg_out)\n{\n    modeguard_config_t *conf = module_config;\n    char set_mask_str[8] = \"\";\n    char clear_mask_str[8] = \"\";\n    config_item_t block;\n    int rc;\n\n    const cfg_param_t modeguard_params[] = {\n        {SET_MASK_PARAM, PT_STRING, 0, set_mask_str, sizeof(set_mask_str)},\n        {CLEAR_MASK_PARAM, PT_STRING, 0, clear_mask_str,\n         sizeof(clear_mask_str)},\n        END_OF_PARAMS\n    };\n\n    static const char *allowed_params[] = {\n        SET_MASK_PARAM, CLEAR_MASK_PARAM, NULL\n    };\n\n    /* get modeguard_config block */\n    rc = get_cfg_block(config, MODEGUARD_BLOCK, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read parameters */\n    rc = read_scalar_params(block, MODEGUARD_BLOCK, modeguard_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(block, MODEGUARD_BLOCK, allowed_params);\n\n    if (*set_mask_str && !parse_param_octal(set_mask_str, SET_MASK_PARAM,\n                                            &conf->set_mask))\n        DisplayLog(LVL_DEBUG, MODEGUARD_BLOCK, \"%s set to %04o\",\n                   SET_MASK_PARAM, conf->set_mask);\n    else\n        DisplayLog(LVL_DEBUG, MODEGUARD_BLOCK, \"%s not set\", SET_MASK_PARAM);\n\n    if (*clear_mask_str && !parse_param_octal(clear_mask_str, CLEAR_MASK_PARAM,\n                                              &conf->clear_mask))\n        DisplayLog(LVL_DEBUG, MODEGUARD_BLOCK, \"%s set to %04o\",\n                   CLEAR_MASK_PARAM, conf->clear_mask);\n    else\n        DisplayLog(LVL_DEBUG, MODEGUARD_BLOCK, \"%s not set\", CLEAR_MASK_PARAM);\n\n    return 0;\n}\n\n\nstatic const mod_cfg_funcs_t modeguard_cfg_hdlr = {\n    .module_name = \"modeguard\",\n    .new = modeguard_cfg_new,\n    .free = modeguard_cfg_free,\n    .set_default = modeguard_cfg_set_default,\n    .read = modeguard_cfg_read,\n    .set_config = modeguard_cfg_set,\n    .write_default = modeguard_cfg_write_default,\n    .write_template = modeguard_cfg_write_template,\n};\n\n/** Status manager for modeguard */\nstatic status_manager_t modeguard_sm = {\n    .name = \"modeguard\",\n    .flags = 0,\n    .status_enum = modeguard_status_list,\n    .status_count = STATUS_COUNT,\n    .status_needs_attrs_fresh = { .std = ATTR_MASK_mode },\n    .get_status_func = modeguard_status,\n#ifdef HAVE_CHANGELOGS\n    .changelog_cb = modeguard_cl_cb,\n#endif\n    .cfg_funcs = &modeguard_cfg_hdlr,\n};\n\n/* ======= PUBLIC FUNCTIONS ======= */\nconst char *mod_get_name(void)\n{\n    return modeguard_sm.name;\n}\n\nstatus_manager_t *mod_get_status_manager(void)\n{\n    return &modeguard_sm;\n}\n\naction_func_t mod_get_action(const char *action_name)\n{\n    if (strcmp(action_name, \"modeguard.enforce_mode\") == 0)\n        return modeguard_enforce_mode;\n    else\n        return NULL;\n}\n"
  },
  {
    "path": "src/modules/sched_ratelimit.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"policy_run.h\"\n#include \"rbh_misc.h\"\n\n#define SCHED_NAME \"rate_limit\"\n\n/**\n * Default # and size of tokens (operations) granted per time interval.\n */\n#define TBF_DEFAULT_CAPACITY 100\n#define TBF_DEFAULT_SIZE     0 /* unlimited */\n#define TBF_DEFAULT_MAX_WAIT_COUNTS     3600 /* unlimited */\n\n/**\n * Default period (in milliseconds) between two consecutive refills.\n */\n#define TBF_DEFAULT_REFILL_PERIOD_MS 1000\n\n\n/**\n * Token Bucket Filter (TBF) based rate-limit scheduler configuration\n */\ntypedef struct sched_tbf_config {\n    int     count_capacity;\n    int64_t size_capacity;\n    int     refill_period;\n    int     max_wait_counts;\n} sched_tbf_config_t;\n\n/** internal state for TBF rate limiter */\nstruct sched_tbf_state {\n    sched_tbf_config_t  cfg;\n    pthread_rwlock_t    rwlock;\n    struct timespec     refill;\n    int                 count_tokens;\n    int                 wait_counts;\n    int64_t             size_tokens;\n};\n\n/**\n * This is the function used everywhere in this module to retrieve current time.\n * We are looking for something monotonic and as fast as possible.\n */\nstatic int getclock(struct timespec *clk)\n{\n    int rc;\n\n    rc = clock_gettime(CLOCK_MONOTONIC_COARSE, clk);\n    if (rc)\n        return -errno;\n\n    return 0;\n}\n\nstatic int sched_tbf_init(void *config, void **p_sched_data)\n{\n    struct sched_tbf_state *state;\n    sched_tbf_config_t *cfg = config;\n    int rc;\n\n    if (!config)\n        return -EINVAL;\n\n    state = malloc(sizeof(*state));\n    if (!state)\n        return -ENOMEM;\n\n    pthread_rwlock_init(&state->rwlock, NULL);\n\n    rc = getclock(&state->refill);\n    if (rc)\n        goto out_free;\n\n    state->cfg = *cfg;\n    state->count_tokens = TBF_DEFAULT_CAPACITY;\n    state->size_tokens = TBF_DEFAULT_SIZE;\n    state->wait_counts = TBF_DEFAULT_MAX_WAIT_COUNTS;\n\n    *p_sched_data = state;\n\nout_free:\n    if (rc)\n        free(state);\n\n    return rc;\n}\n\nstatic int sched_tbf_reset(void *sched_data)\n{\n    struct sched_tbf_state *state = sched_data;\n    struct timespec now;\n\n    pthread_rwlock_wrlock(&state->rwlock);\n    state->count_tokens = state->cfg.count_capacity;\n    state->size_tokens = state->cfg.size_capacity;\n    state->wait_counts = state->cfg.max_wait_counts;\n    getclock(&now);\n    state->refill = now;\n    pthread_rwlock_unlock(&state->rwlock);\n    return 0;\n}\n\n/**\n * Return the number of milliseconds elapsed between stop and start.\n * Can be negative if stop < start (but you won't do that).\n */\nstatic long timediff(const struct timespec *start, const struct timespec *stop)\n{\n    long res;\n\n    res = (stop->tv_sec - start->tv_sec) * 1000;\n    res += (stop->tv_nsec - start->tv_nsec) / 1000000;\n    return res;\n}\n\n#define ATOMIC_DEC(_x, _n)   (__sync_fetch_and_sub(&(_x), _n))\n\nstatic int sched_tbf_schedule(void *sched_data, const entry_id_t *id,\n                              const attr_set_t *attrs, sched_cb_t cb,\n                              void *udata)\n{\n    struct sched_tbf_state *state = sched_data;\n    struct timespec now;\n    long diff;\n    bool force_release = false;\n\n    getclock(&now);\n\n    pthread_rwlock_rdlock(&state->rwlock);\n    diff = timediff(&state->refill, &now);\n    pthread_rwlock_unlock(&state->rwlock);\n\n    if (diff < state->cfg.refill_period)\n        goto proceed;\n\n    /* lock and recheck in case someone else did it while we were waiting */\n    pthread_rwlock_wrlock(&state->rwlock);\n    getclock(&now);\n    diff = timediff(&state->refill, &now);\n    if (diff >= state->cfg.refill_period) {\n        state->count_tokens = state->cfg.count_capacity;\n        state->size_tokens += state->cfg.size_capacity;\n\n        if (state->size_tokens > state->cfg.size_capacity)\n            state->size_tokens = state->cfg.size_capacity;\n\n        if (state->cfg.max_wait_counts > 0 && state->wait_counts <= 0) {\n            force_release = true;\n            DisplayLog(LVL_DEBUG, SCHED_NAME,\n                      \"Delayed too many actions, releasing scheduler\");\n        }\n\n        state->refill = now;\n    }\n    pthread_rwlock_unlock(&state->rwlock);\n\nproceed:\n    /* check if configured limits are reached */\n    if (!force_release && (state->cfg.count_capacity > 0)\n        && (state->count_tokens <= 0)) {\n        DisplayLog(LVL_DEBUG, SCHED_NAME,\n                  \"Throttling after %d actions per %dms happened\",\n                  state->cfg.count_capacity, state->cfg.refill_period);\n        ATOMIC_DEC(state->wait_counts, 1) ;\n        return SCHED_DELAY;\n    }\n\n    if (!force_release && (state->cfg.size_capacity > 0)\n        && (state->size_tokens <= 0)) {\n        char buf[128];\n\n        FormatFileSize(buf, sizeof(buf), state->cfg.size_capacity);\n        DisplayLog(LVL_DEBUG, SCHED_NAME,\n                  \"Throttling after %s per %dms happened\", buf,\n                  state->cfg.refill_period);\n        ATOMIC_DEC(state->wait_counts, 1) ;\n        return SCHED_DELAY;\n    }\n\n    pthread_rwlock_wrlock(&state->rwlock);\n    state->wait_counts = state->cfg.max_wait_counts;\n    pthread_rwlock_unlock(&state->rwlock);\n    /* Enough credits, invoke the action callback */\n    if (state->cfg.count_capacity > 0)\n        ATOMIC_DEC(state->count_tokens, 1);\n    if (state->cfg.size_capacity > 0 && ATTR_MASK_TEST(attrs, size))\n        ATOMIC_DEC(state->size_tokens, ATTR(attrs, size));\n    cb(udata, SCHED_OK);\n    return SCHED_OK;\n}\n\n/* ------------- configuration management functions ---------- */\n\nstatic void *sched_tbf_cfg_new(void)\n{\n    return calloc(1, sizeof(sched_tbf_config_t));\n}\n\nstatic void sched_tbf_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\nstatic void sched_tbf_cfg_set_default(void *module_config)\n{\n    sched_tbf_config_t *conf = module_config;\n\n    conf->count_capacity = TBF_DEFAULT_CAPACITY;\n    conf->size_capacity = TBF_DEFAULT_SIZE;\n    conf->refill_period = TBF_DEFAULT_REFILL_PERIOD_MS;\n}\n\nstatic void sched_tbf_cfg_write_default(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_NAME, NULL);\n    print_line(output, indent + 1, \"max_count:  %d\", TBF_DEFAULT_CAPACITY);\n    print_line(output, indent + 1, \"max_size:   0 (unlimited)\");\n    print_line(output, indent + 1, \"period_ms:  %d\",\n               TBF_DEFAULT_REFILL_PERIOD_MS);\n    print_line(output, indent + 1, \"max_waits:   0 (unlimited)\");\n    print_end_block(output, indent);\n}\n\nstatic void sched_tbf_cfg_write_template(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_NAME, NULL);\n    print_line(output, indent + 1, \"# max actions per refill period\");\n    print_line(output, indent + 1, \"max_count = 100;\");\n    print_line(output, indent + 1, \"# max size per refill period\");\n    print_line(output, indent + 1, \"max_size  = 10GB;\");\n    print_line(output, indent + 1, \"# refill period in milliseconds\");\n    print_line(output, indent + 1, \"period_ms = 5000;\");\n    print_line(output, indent + 1, \"# Max waits count, to avoid timeouts\");\n    print_line(output, indent + 1, \"max_waits = 3600;\");\n    print_end_block(output, indent);\n}\n\n/** get a 'rate_limit' sublock from the policy parameters */\nstatic int sched_tbf_cfg_read_from_block(config_item_t parent, void *cfg,\n                                         char *msg_out)\n{\n    sched_tbf_config_t *conf = cfg;\n    static const char *const allowed_params[] = { \"max_count\", \"max_size\",\n                                                  \"period_ms\", \"max_waits\",\n                                                  NULL };\n    const cfg_param_t tbf_params[] = {\n        {\"max_count\", PT_INT, PFLG_POSITIVE, &conf->count_capacity, 0},\n        {\"max_size\", PT_SIZE, PFLG_POSITIVE, &conf->size_capacity, 0},\n        {\"period_ms\",   PT_INT,  PFLG_POSITIVE | PFLG_NOT_NULL,\n                     &conf->refill_period, 0},\n        {\"max_waits\", PT_INT, PFLG_POSITIVE, &conf->max_wait_counts, 0},\n        END_OF_PARAMS\n    };\n    config_item_t block;\n    int rc;\n\n    /* get 'rate_limit' subblock */\n    rc = get_cfg_subblock(parent, SCHED_NAME, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(block, SCHED_NAME, tbf_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(block, SCHED_NAME, allowed_params);\n    return 0;\n}\n\nstatic int sched_tbf_cfg_update(void *sched_data, void *cfg)\n{\n    struct sched_tbf_state *state = sched_data;\n    sched_tbf_config_t *new = cfg;\n\n    state->cfg = *new;\n    return 0;\n}\n\n/** configuration handlers for \"rate_limit\" scheduler */\nstatic const ctx_cfg_funcs_t sched_tbf_cfg_funcs = {\n    .module_name     = SCHED_NAME\" scheduler\",\n    .new             = sched_tbf_cfg_new,\n    .free            = sched_tbf_cfg_free,\n    .set_default     = sched_tbf_cfg_set_default,\n    .read_from_block = sched_tbf_cfg_read_from_block,\n    .update          = sched_tbf_cfg_update,\n    .write_default   = sched_tbf_cfg_write_default,\n    .write_template  = sched_tbf_cfg_write_template,\n};\n\n/** \"rate_limit\" scheduler definition */\naction_scheduler_t sched_tbf = {\n    .sched_name         = SCHED_NAME,\n    .sched_cfg_funcs    = &sched_tbf_cfg_funcs,\n    .sched_init_func    = sched_tbf_init,\n    .sched_reset_func   = sched_tbf_reset,\n    .sched_schedule     = sched_tbf_schedule,\n    /* size is needed for size bucket */\n    .sched_attr_mask    = { .std = ATTR_MASK_size, },\n};\n"
  },
  {
    "path": "src/modules/shook.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#define SHOOK_TAG   \"Shook\"\n\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include \"backup.h\"\n#include <shook_svr.h>\n\nstatic file_status_t shook2rbh_status(shook_state st)\n{\n    switch (st) {\n    case SS_ONLINE:\n        return STATUS_SYNCHRO;  /* may also be dirty or archiving... */\n    case SS_RELEASED:\n        return STATUS_RELEASED;\n    case SS_RELEASE_PEND:\n        return STATUS_RELEASE_PENDING;\n    case SS_RESTORE_PEND:\n        return STATUS_RESTORE_RUNNING;\n    case SS_LOST:\n        return STATUS_UNKNOWN;\n        /* handle restripe opreation as a restore */\n    case SS_RESTRIPE_PEND:\n        return STATUS_RESTORE_RUNNING;\n    default:\n        return (file_status_t)-1;\n    }\n}\n\n/**\n * Get status of entry regarding 'shook' system\n * and convert it to robinhood status.\n * @return 0 on success, <0 on error.\n */\nint rbh_shook_status(const char *path, file_status_t *p_status)\n{\n    shook_state st;\n    int rc;\n\n    if (shook_get_status(path, &st, FALSE) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, SHOOK_TAG, \"ERROR getting state of %s: %s\",\n                   path, strerror(-rc));\n        return rc;\n    }\n\n    if (st != SS_ONLINE)\n        DisplayLog(LVL_FULL, SHOOK_TAG,\n                   \"shook indicates '%s' status is '%s'\",\n                   path, shook_attr_val[st]);\n\n    *p_status = shook2rbh_status(st);\n    if (*p_status == (file_status_t)-1) {\n        DisplayLog(LVL_CRIT, SHOOK_TAG,\n                   \"ERROR getting state of %s: unknown status %d\", path,\n                   (int)st);\n        return -EINVAL;\n    }\n    return 0;\n}\n\nint rbh_shook_recov_by_id(const entry_id_t *p_id, file_status_t *p_status)\n{\n    int rc;\n    shook_state st;\n\n    rc = shook_recov_pending(get_fsname(), p_id, &st, 0);\n    if (rc < 0)\n        return rc;\n\n    *p_status = shook2rbh_status(st);\n    if (*p_status == (file_status_t)-1) {\n        DisplayLog(LVL_CRIT, SHOOK_TAG,\n                   \"ERROR getting recovering \" DFID \": unknown status %d\",\n                   PFID(p_id), (int)st);\n        return -EINVAL;\n    }\n    return 0;\n}\n"
  },
  {
    "path": "src/modules/test_sched.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * This module contains dummy schedulers for testing.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"mod_internal.h\"\n#include \"policy_run.h\"\n\n#include <unistd.h>\n\n/** scheduler configuration */\ntypedef struct sched_sleep_config {\n    unsigned int    sleep_time;\n} sched_sleep_config_t;\n\nstruct sched_sleep_state {\n    sched_sleep_config_t cfg;\n};\n\nstatic int sched_sleep_init(void *config, void **p_sched_data)\n{\n    struct sched_sleep_state *state;\n    sched_sleep_config_t *cfg = config;\n\n    if (!config)\n        return -EINVAL;\n\n    state = calloc(1, sizeof(*state));\n    if (!state)\n        return -ENOMEM;\n\n    state->cfg = *cfg;\n    *p_sched_data = state;\n    return 0;\n}\n\nstatic int sched_sleep_reset(void *sched_data)\n{\n    return 0;\n}\n\nstatic int sched_sleep_schedule(void *sched_data, const entry_id_t *id,\n                              const attr_set_t *attrs, sched_cb_t cb,\n                              void *udata)\n{\n    struct sched_sleep_state *state = sched_data;\n\n    if (state->cfg.sleep_time > 0)\n        sleep(state->cfg.sleep_time);\n\n    cb(udata, SCHED_OK);\n    return 0;\n}\n\n/* ------------- configuration management functions ---------- */\n\n/** configuration block name for max_per_run scheduler */\n#define SCHED_SLEEP_BLOCK \"sleeper\"\n\nstatic void *sched_sleep_cfg_new(void)\n{\n    return calloc(1, sizeof(sched_sleep_config_t));\n}\n\nstatic void sched_sleep_cfg_free(void *cfg)\n{\n    free(cfg);\n}\n\nstatic void sched_sleep_cfg_set_default(void *module_config)\n{\n    sched_sleep_config_t *conf = module_config;\n\n    conf->sleep_time = 1;\n}\n\nstatic void sched_sleep_cfg_write_default(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_SLEEP_BLOCK, NULL);\n    print_line(output, indent + 1, \"sleep_time: 1s\");\n    print_end_block(output, indent);\n}\n\nstatic void sched_sleep_cfg_write_template(int indent, FILE *output)\n{\n    print_begin_block(output, indent, SCHED_SLEEP_BLOCK, NULL);\n    print_line(output, indent + 1, \"# sleep time (sec)\");\n    print_line(output, indent + 1, \"sleep_time = 1;\");\n    print_end_block(output, indent);\n}\n\n/** get a 'sleeper' sublock from the policy parameters */\nstatic int sched_sleep_cfg_read_from_block(config_item_t parent, void *cfg,\n                                         char *msg_out)\n{\n    int rc;\n    sched_sleep_config_t *conf = cfg;\n    config_item_t       block;\n\n    const cfg_param_t sleep_params[] = {\n        {\"sleep_time\", PT_DURATION, PFLG_POSITIVE, &conf->sleep_time, 0},\n        END_OF_PARAMS\n    };\n\n    static const char * const allowed_params[] = {\n        \"sleep_time\", NULL\n    };\n\n    /* get 'sleeper' subblock */\n    rc = get_cfg_subblock(parent, SCHED_SLEEP_BLOCK, &block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* read std parameters */\n    rc = read_scalar_params(block, SCHED_SLEEP_BLOCK, sleep_params, msg_out);\n    if (rc)\n        return rc;\n\n    CheckUnknownParameters(block, SCHED_SLEEP_BLOCK, allowed_params);\n\n    return 0;\n}\n\nstatic int sched_sleep_cfg_update(void *sched_data, void *cfg)\n{\n    sched_sleep_config_t *new = cfg;\n    struct sched_sleep_state *state = sched_data;\n\n    state->cfg = *new;\n    return 0;\n}\n\n/** configuration handlers for \"max_per_run\" scheduler */\nstatic const ctx_cfg_funcs_t sched_sleep_cfg_funcs = {\n    .module_name     = \"sleeper scheduler\",\n    .new             = sched_sleep_cfg_new,\n    .free            = sched_sleep_cfg_free,\n    .set_default     = sched_sleep_cfg_set_default,\n    .read_from_block = sched_sleep_cfg_read_from_block,\n    .update          = sched_sleep_cfg_update,\n    .write_default   = sched_sleep_cfg_write_default,\n    .write_template  = sched_sleep_cfg_write_template,\n};\n\n/** \"max_per_run\" scheduler definition */\nstatic action_scheduler_t sched_sleep = {\n    .sched_name         = \"sleeper\",\n    .sched_cfg_funcs    = &sched_sleep_cfg_funcs,\n    .sched_init_func    = sched_sleep_init,\n    .sched_reset_func   = sched_sleep_reset,\n    .sched_attr_mask    = {0},\n    .sched_schedule     = sched_sleep_schedule,\n};\n\n/** get a common scheduler by name */\naction_scheduler_t *mod_get_scheduler(const char *sched_name)\n{\n    if (strcmp(sched_name, \"test.sleeper\") == 0)\n        return &sched_sleep;\n\n    return NULL;\n}\n"
  },
  {
    "path": "src/policies/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\n\nnoinst_LTLIBRARIES=libpolicies.la\n\nlibpolicies_la_SOURCES=policy_matching.c policy_loader.c policy_triggers.c \\\n                       policy_run_cfg.c status_manager.c run_policies.h \\\n\t\t       policy_run.c policy_sched.c policy_sched.h\n"
  },
  {
    "path": "src/policies/policy_loader.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n*/\n\n/**\n * policy management\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_rules.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_modules.h\"\n#include \"rbh_cfg_helpers.h\"\n#include \"status_manager.h\"\n#include <errno.h>\n#include <fnmatch.h>\n\n#define FILESETS_SECTION      \"Filesets\"\n#define FILESET_BLOCK         \"FileClass\"\n\n#define POLICY_DECLARATION    \"define_policy\"\n\n#define OLD_PURGEPOLICY_BLOCK     \"purge_policies\"\n#define OLD_MIGRPOLICY_BLOCK      \"migration_policies\"\n#define OLD_UNLINKPOLICY_BLOCK    \"hsm_remove_policy\"\n\n#define OLD_RULE_BLOCK        \"policy\"\n#define RULE_BLOCK            \"rule\"\n\n#define OLD_POLICIES_BLOCK    \"policies\"\n#define POLICIES_BLOCK        \"rules\"\n\n#define OLD_ACT_PARAMS        \"hints\"\n#define ACT_PARAMS            \"action_params\"\n#define OLD_ACT_PARAMS_SFX    \"_\"OLD_ACT_PARAMS\n#define ACT_PARAMS_SFX        \"_\"ACT_PARAMS\n\n#define IGNORE_BLOCK          \"ignore\"\n#define IGNORE_FC             \"ignore_fileclass\"\n#define CONDITION_BLOCK       \"condition\"\n#define DEFINITION_BLOCK      \"definition\"\n#define SCOPE_BLOCK           \"scope\"\n\n//#define RMDIR_BLOCK      \"rmdir_policy\"\n#define RM_RECURSE_BLOCK \"recursive_rmdir\"\n\n#define RELOAD_TAG  \"PolicyReload\"\n#define CHK_TAG     \"PolicyCheck\"\n#define LOADER_TAG  \"PolicyLoader\"\n\nstatic const policies_t policy_initializer = { 0 };\npolicies_t policies = { 0 };\n\n#define critical_err_check(_ptr_, _blkname_) do { if (!_ptr_) { \\\n            sprintf(msg_out, \"Internal error reading %s block in config file\", \\\n                    _blkname_); \\\n            return EFAULT; \\\n        }\\\n    } while (0)\n\n#define critical_err_check_goto(_ptr_, _blkname_, _rc, _label) \\\n    do { \\\n        if (!_ptr_) {\\\n            sprintf(msg_out, \"Internal error reading %s block in config file\", \\\n                    _blkname_); \\\n            (_rc) = EFAULT; \\\n            goto _label; \\\n    }} while (0)\n\n/** Update whitelist rules */\n/* @TODO support whitelist rules update on SIGHUP */\nstatic void __attribute__ ((__unused__))\n    update_whitelist(whitelist_item_t *old_items, unsigned int old_count,\n                 whitelist_item_t *new_items, unsigned int new_count,\n                 const char *block_name)\n{\n    unsigned int i;\n\n    if (old_count != new_count) {\n        DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                   \"Whitelist rules count changed in block '%s' but cannot be modified dynamically: whitelist update cancelled\",\n                   block_name);\n        return;\n    }\n\n    /* compare whitelist boolean expression structure */\n    for (i = 0; i < new_count; i++) {\n        if (!attr_mask_equal(&old_items[i].attr_mask, &new_items[i].attr_mask)\n            || compare_boolexpr(&old_items[i].bool_expr,\n                                &new_items[i].bool_expr)) {\n            DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                       \"Whitelist expression #%u changed in block '%s'. \"\n                       \"Only numerical values can be modified dynamically. \"\n                       \"Whitelist update cancelled\", i, block_name);\n            return;\n        }\n    }\n\n    /* if they are all the same, update/check their values */\n\n    for (i = 0; i < new_count; i++) {\n        if (update_boolexpr(&old_items[i].bool_expr, &new_items[i].bool_expr)) {\n            char criteriastr[2048];\n            BoolExpr2str(&old_items[i].bool_expr, criteriastr, 2048);\n            DisplayLog(LVL_EVENT, RELOAD_TAG,\n                       \"Whitelist expression #%u in block '%s' has been \"\n                       \"updated and is now: %s\", i, block_name, criteriastr);\n        }\n    }\n\n    /* XXX attr_mask is unchanged, since we keep the same expression\n     * structures */\n}\n\nstatic void free_whitelist(whitelist_item_t *p_items, unsigned int count)\n{\n    unsigned int i;\n\n    /* free boolean expressions */\n    for (i = 0; i < count; i++) {\n        FreeBoolExpr(&p_items[i].bool_expr, false);\n    }\n\n    if ((count > 0) && (p_items != NULL))\n        free(p_items);\n}\n\n/**\n * Analyze a policy action parameter and set policy_action_t accordingly.\n * @param[in]     name         parameter name.\n * @param[in]     value        parameter value.\n * @param[in]     extra        array of extra arguments.\n * @param[in]     extra_cnt    item count in extra array.\n * @param[out]    action       policy_action_t to be filled.\n * @param[in,out] mask         Pointer to the attribute mask of placeholders\n *                             in action command line.\n * @return 0 on success, an error code on error.\n */\nint parse_policy_action(const char *name, const char *value,\n                        char **extra, unsigned int extra_cnt,\n                        policy_action_t *action,\n                        attr_mask_t *mask, char *msg_out)\n{\n    if (!strcasecmp(value, \"none\")) {\n        if (extra_cnt != 0) {\n            sprintf(msg_out, \"No extra argument is expected for '%s = %s'\",\n                    name, value);\n            return EINVAL;\n        }\n\n        action->type = ACTION_NONE;\n    } else if (!strcasecmp(value, \"cmd\")) {\n        attr_mask_t m;\n        bool error = false;\n        GError *err_desc = NULL;\n        int i;\n\n        /* external command */\n        /* 1 single argument expected */\n        if (extra_cnt != 1) {\n            sprintf(msg_out,\n                    \"A single argument is expected for cmd. E.g.: %s = cmd(\\\"myscript.sh\\\");\",\n                    name);\n            return EINVAL;\n        }\n        action->type = ACTION_COMMAND;\n        if (!g_shell_parse_argv(extra[0], NULL,\n                                &action->action_u.command, &err_desc)) {\n            sprintf(msg_out, \"Could not parse command %s: %s\\n\",\n                    extra[0], err_desc->message);\n            g_error_free(err_desc);\n            return EINVAL;\n        }\n\n        /* Get attribute mask for this command, in case it contains attribute\n         * placeholder */\n        for (i = 0; action->action_u.command[i]; i++) {\n            m = params_mask(action->action_u.command[i], name, &error);\n            if (error) {\n                sprintf(msg_out, \"Unexpected parameters in %s cmd\", name);\n                return EINVAL;\n            }\n            *mask = attr_mask_or(mask, &m);\n        }\n    } else {    /* <module>.<action_name> expected */\n\n        if (extra_cnt != 0) {\n            sprintf(msg_out, \"No extra argument is expected for '%s = %s'\",\n                    name, value);\n            return EINVAL;\n        }\n        action->type = ACTION_FUNCTION;\n        action->action_u.func.call = module_get_action(value);\n        if (action->action_u.func.call == NULL) {\n            sprintf(msg_out, \"%s: unknown function '%s'\", name, value);\n            return EINVAL;\n        }\n        action->action_u.func.name = strdup(value);\n        if (action->action_u.func.name == NULL)\n            return ENOMEM;\n    }\n\n    return 0;\n}\n\n/** duplicate a string and convert it to lower case */\nstatic char *strdup_lower(const char *str)\n{\n    char *out;\n\n    out = strdup(str);\n    if (!out)\n        return NULL;\n\n    /* convert to lower case */\n    lowerstr(out);\n\n    return out;\n}\n\n/**\n * Check if a policy name exists in a given policy set.\n * @param[in]  p_pols the list of policies to search in\n * @param[in]  name   the policy name to search for\n * @param[out] index  index of the matching policy in the given list\n */\nstatic bool _policy_exists(const policies_t *p_pols, const char *name,\n                           int *index)\n{\n    int i;\n\n    for (i = 0; i < p_pols->policy_count; i++) {\n        if (!strcasecmp(name, p_pols->policy_list[i].name)) {\n            if (index != NULL)\n                *index = i;\n            return true;\n        }\n    }\n    return false;\n}\n\n/** Search for a policy name in the global (current) list */\nbool policy_exists(const char *name, int *index)\n{\n    return _policy_exists(&policies, name, index);\n}\n\nstatic int parse_policy_decl(config_item_t config_blk, const char *block_name,\n                             policy_descr_t *policy, bool *manage_deleted,\n                             const policies_t *pols, char *msg_out)\n{\n    int rc, prev;\n    const char *name;\n    char tmpstr[1024];\n    attr_mask_t mask;\n    char **extra = NULL;\n    unsigned int extra_cnt = 0;\n    bool unique;\n    config_item_t sub_item;\n\n    static const char *expect[] = {\n        \"status_manager\", \"scope\", \"default_action\",\n        \"default_lru_sort_attr\", \"status_current\",\n        NULL\n    };\n\n    name = rh_config_GetBlockId(config_blk);\n    if (!name) {\n        strcpy(msg_out, \"Missing name for '\" POLICY_DECLARATION \"' block \"\n               \"(ex: \" POLICY_DECLARATION \" my_policy { ...\");\n        return EINVAL;\n    }\n\n    /* check the policy is not already defined */\n    if (_policy_exists(pols, name, &prev)) {\n        sprintf(msg_out, \"Duplicate definition of policy '%s'.\", name);\n        return EINVAL;\n    }\n\n    if (strlen(name) > POLICY_NAME_LEN - 1) {\n        sprintf(msg_out, \"Policy name is too long (max: %u).\",\n                POLICY_NAME_LEN - 1);\n        return EINVAL;\n    }\n    rh_strncpy(policy->name, name, sizeof(policy->name));\n\n    /* read and parse default_action */\n    rc = GetStringParam(config_blk, block_name, \"default_action\",\n                        PFLG_MANDATORY, tmpstr, sizeof(tmpstr), &extra,\n                        &extra_cnt, msg_out);\n    if (rc)\n        return rc;\n\n    rc = parse_policy_action(\"default_action\", tmpstr, extra, extra_cnt,\n                             &policy->default_action,\n                             &policy->rules.run_attr_mask, msg_out);\n    if (rc)\n        return rc;\n\n    extra = NULL;\n    extra_cnt = 0;\n    rc = GetStringParam(config_blk, block_name, \"status_manager\",\n                        PFLG_MANDATORY | PFLG_NO_WILDCARDS, tmpstr,\n                        sizeof(tmpstr), &extra, &extra_cnt, msg_out);\n    if (rc == ENOENT)\n        strcat(msg_out,\n               \"\\nIf you don't need a status manager, you should explicitely specify: status_manager=none\");\n    if (rc != 0)\n        return rc;\n\n    if (!strcasecmp(tmpstr, \"none\")) {\n        policy->status_mgr = NULL;\n        if (extra_cnt > 0) {\n            sprintf(msg_out,\n                    \"No argument expected after 'status_manager = none': found '%s'\",\n                    extra[0]);\n            return EINVAL;\n        }\n    } else {\n        policy->status_mgr = create_sm_instance(policy->name, tmpstr);\n        if (policy->status_mgr == NULL) {\n            sprintf(msg_out, \"Could not load status manager '%s'\", tmpstr);\n            return EINVAL;\n        }\n\n        if (extra_cnt > 1) {    /* max 1 argument expected */\n            sprintf(msg_out,\n                    \"Too many arguments (%d) found for status_manager parameter '%s', in block '%s %s'.\",\n                    extra_cnt, tmpstr, block_name, name);\n            return EINVAL;\n        } else if (extra_cnt == 1) {\n            /* special values 'removed' or 'deleted' means the policy applies\n             * to deleted files */\n            if (!strcasecmp(extra[0], \"removed\")\n                || !strcasecmp(extra[0], \"deleted\")) {\n                /* the status manager must handle them */\n                if (!smi_manage_deleted(policy->status_mgr)) {\n                    sprintf(msg_out,\n                            \"'%s' is specified for status manager '%s' whereas\"\n                            \" it cannot handle deleted entries.\", extra[0],\n                            tmpstr);\n                    return EINVAL;\n                }\n                policy->manage_deleted = true;\n                *manage_deleted = true;\n                policy->implements = strdup_lower(extra[0]);\n            }\n            /* does the status manager support this action? */\n            else if (smi_support_action(policy->status_mgr, extra[0])) {\n                /* save the implemented action in policy */\n                policy->implements = strdup_lower(extra[0]);\n            } else {\n                sprintf(msg_out,\n                        \"status manager '%s' does not support action '%s' in block '%s %s'.\",\n                        tmpstr, extra[0], block_name, name);\n                return EINVAL;\n            }\n        }\n        /* extra_cnt == 0 */\n        else if (smi_multi_action(policy->status_mgr)) {\n            sprintf(msg_out,\n                    \"Missing mandatory argument for status_manager '%s' in block '%s %s': implemented action.\",\n                    tmpstr, block_name, name);\n            return ENOENT;\n        }\n    }\n\n    /* smi must be set to call str2lru_attr */\n    extra = NULL;\n    extra_cnt = 0;\n    rc = GetStringParam(config_blk, block_name, \"default_lru_sort_attr\",\n                        PFLG_NO_WILDCARDS | PFLG_MANDATORY, tmpstr,\n                        sizeof(tmpstr), &extra, &extra_cnt, msg_out);\n    if (rc)\n        return rc;\n\n    /* check extra parameter (allowed values are \"asc\" or \"desc\"). */\n    if (extra_cnt > 1) {\n        sprintf(msg_out, \"Too many parameters found for default_lru_sort_attr = \"\n                         \" '%s' in block '%s': '(asc)' or '(desc)' expected\",\n                         tmpstr, block_name);\n        return EINVAL;\n    } else if (extra_cnt == 0) {\n        /* Default to \"asc\" if no parameter is specified. */\n        policy->default_lru_sort_order = SORT_ASC;\n    } else {\n        rc = str2sort_order(extra[0]);\n        if (rc < 0) {\n            sprintf(msg_out, \"Invalid sort order '%s' in block '%s':\"\n                    \" 'asc' or 'desc' expected\", extra[0], block_name);\n            return EINVAL;\n        }\n        policy->default_lru_sort_order = rc;\n    }\n\n    /* is it a supported attribute? */\n    rc = str2lru_attr(tmpstr, policy->status_mgr);\n    if (rc == LRU_ATTR_INVAL) {\n        strcpy(msg_out, \"Attribute not supported for 'default_lru_sort_attr': \"\n               \"Expected: \"ALLOWED_LRU_ATTRS_STR \"...\");\n        return EINVAL;\n    } else\n        policy->default_lru_sort_attr = rc;\n\n    /* get scope parameter */\n    unique = true;\n    sub_item = rh_config_GetItemByName(config_blk, \"scope\", &unique);\n\n    if (sub_item == NULL) {\n        sprintf(msg_out, \"Missing mandatory parameter 'scope' in block '%s %s'\",\n                block_name, name);\n        return ENOENT;\n    }\n    if (!unique) {\n        sprintf(msg_out,\n                \"Duplicate scope declaration in block '%s %s', line %d.\",\n                block_name, name, rh_config_GetItemLine(sub_item));\n        return EEXIST;\n    }\n\n    if (rh_config_ItemType(sub_item) == CONFIG_ITEM_VAR) {\n        char *vname;\n        char *value;\n        int extra_args = 0;\n\n        rc = rh_config_GetKeyValue(sub_item, &vname, &value, &extra_args);\n        if (rc)\n            return EINVAL;\n\n        if (strcasecmp(value, \"all\") != 0) {\n            sprintf(msg_out,\n                    \"Sub-block (or 'scope = all') is expected for '%s' item in block '%s %s', line %d\",\n                    \"scope\", block_name, name, rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }\n\n        if (extra_args) {\n            sprintf(msg_out,\n                    \"Unexpected argument after 'scope = all' in block '%s %s', line %d\",\n                    block_name, name, rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }\n\n        ConstantBoolExpr(true, &policy->scope);\n        policy->scope_mask = null_mask;\n    } else if (rh_config_ItemType(sub_item) == CONFIG_ITEM_BLOCK) {\n        /* analyze boolean expression */\n        /* pass the status manager instance to interpret status condition\n         * depending on the context */\n        mask = null_mask;\n        rc = GetBoolExpr(sub_item, SCOPE_BLOCK, &policy->scope, &mask,\n                         msg_out, policy->status_mgr);\n        if (rc)\n            return rc;\n\n        policy->scope_mask = mask;\n    } else {\n        sprintf(msg_out, \"Sub-block (or 'scope = all') is expected for '%s' \"\n                \"item in block '%s %s', line %d\", \"scope\", block_name, name,\n                rh_config_GetItemLine(sub_item));\n        return EINVAL;\n    }\n\n    /* get status_current parameter (necessary to check status of outstanding\n     * actions) */\n    rc = GetStringParam(config_blk, block_name, \"status_current\",\n                        PFLG_NO_WILDCARDS, tmpstr, sizeof(tmpstr),\n                        &extra, &extra_cnt, msg_out);\n    if (rc == 0) {\n        if (policy->status_mgr == NULL) {\n            sprintf(msg_out,\n                    \"Can't specify a 'status_current' parameter without a status manager, \"\n                    \"in block '%s %s', line %d\", block_name, name,\n                    rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }\n\n        policy->status_current = get_status_str(policy->status_mgr->sm, tmpstr);\n        if (policy->status_current == NULL) {\n            sprintf(msg_out, \"Invalid value for 'status_current' parameter in \"\n                    \"block '%s %s', line %d: '%s' (valid status expected)\",\n                    block_name, name, rh_config_GetItemLine(sub_item), tmpstr);\n            return EINVAL;\n        }\n    } else if (rc != ENOENT)\n        return rc;\n\n    CheckUnknownParameters(config_blk, block_name, expect);\n    return 0;\n}\n\nstatic int read_policy_definitions(config_file_t config, policies_t *pol,\n                                   char *msg_out)\n{\n    unsigned int blc_index;\n    int rc;\n\n    for (blc_index = 0; blc_index < rh_config_GetNbBlocks(config);\n         blc_index++) {\n        char *block_name;\n        config_item_t curr_item = rh_config_GetBlockByIndex(config, blc_index);\n        critical_err_check(curr_item, \"root\");\n\n        if (rh_config_ItemType(curr_item) != CONFIG_ITEM_BLOCK)\n            continue;\n\n        block_name = rh_config_GetBlockName(curr_item);\n        critical_err_check(block_name, \"root\");\n\n        if (!strcasecmp(block_name, POLICY_DECLARATION)) {\n            bool manage_deleted = false;\n\n            if (pol->policy_count == 0)\n                pol->policy_list =\n                    (policy_descr_t *) malloc(sizeof(policy_descr_t));\n            else\n                pol->policy_list = (policy_descr_t *) realloc(pol->policy_list,\n                                                              (pol->\n                                                               policy_count +\n                                                               1) *\n                                                              sizeof\n                                                              (policy_descr_t));\n\n            /* zero it */\n            memset(&pol->policy_list[pol->policy_count], 0,\n                   sizeof(policy_descr_t));\n\n            /* analyze policy declaration */\n            rc = parse_policy_decl(curr_item, block_name,\n                                   &pol->policy_list[pol->policy_count],\n                                   &manage_deleted, pol, msg_out);\n            if (rc)\n                return rc;\n\n            if (manage_deleted)\n                pol->manage_deleted = 1;\n\n            pol->policy_count++;\n        }\n    }\n    return 0;\n}\n\nstatic int write_default_filesets(FILE *output)\n{\n    print_begin_block(output, 0, FILESETS_SECTION, NULL);\n    print_line(output, 1, \"# none\");\n    print_end_block(output, 0);\n\n    return 0;\n}\n\n#if 0\nstatic int write_default_policy(FILE *output, policy_type_t policy_type)\n{\n    if (policy_type == PURGE_POLICY)\n        print_begin_block(output, 0, PURGEPOLICY_BLOCK, NULL);\n    else if (policy_type == MIGR_POLICY)\n        print_begin_block(output, 0, MIGRPOLICY_BLOCK, NULL);\n\n    print_line(output, 1, \"# none\");\n    print_end_block(output, 0);\n    return 0;\n}\n#endif\n\nstatic int write_template_filesets(FILE *output)\n{\n    fprintf(output, \"#### Fileclasses definitions ####\\n\\n\");\n    print_begin_block(output, 0, FILESET_BLOCK, \"Experiment_A\");\n\n    print_begin_block(output, 1, DEFINITION_BLOCK, NULL);\n    print_line(output, 3, \"tree == \\\"/mnt/lustre/dir_A\\\"\");\n    print_end_block(output, 1);\n\n#ifdef HAVE_MIGR_POLICY\n    print_line(output, 1,\n               \"# arbitrary parameters to pass to the migration command\");\n    print_line(output, 1, \"migration_action_params {\");\n    print_line(output, 1, \"    cos      = 3;\");\n    print_line(output, 1, \"    priority = 2;\");\n    print_line(output, 1, \"}\");\n#endif\n#ifdef _LUSTRE_HSM\n    print_line(output, 1, \"# target archive\");\n    print_line(output, 1, \"lhsm_archive_action_params { archive_id = 1; }\");\n#endif\n    print_end_block(output, 0);\n\n    fprintf(output, \"\\n\");\n\n    print_begin_block(output, 0, FILESET_BLOCK, \"visualization\");\n\n    print_begin_block(output, 1, DEFINITION_BLOCK, NULL);\n    print_line(output, 3, \"tree == \\\"/mnt/lustre/dir_*\\\"\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"xattr.user.tag_visu == 1\");\n    print_end_block(output, 1);\n\n#ifdef HAVE_MIGR_POLICY\n    print_line(output, 1,\n               \"# arbitrary parameters to pass to the migration command\");\n    print_line(output, 1, \"migration_action_params {\");\n    print_line(output, 1, \"    cos      = 4;\");\n    print_line(output, 1, \"    priority = 5;\");\n    print_line(output, 1, \"}\");\n#endif\n#ifdef _LUSTRE_HSM\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# target archive\");\n    print_line(output, 1, \"lhsm_archive_action_params { archive_id = 2 ; }\");\n#endif\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n\n#ifdef _LUSTRE\n    print_begin_block(output, 0, FILESET_BLOCK, \"pool_ssd\");\n\n    print_begin_block(output, 1, DEFINITION_BLOCK, NULL);\n    print_line(output, 3, \"ost_pool == \\\"ssd*\\\"\");\n    print_end_block(output, 1);\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n    print_begin_block(output, 0, FILESET_BLOCK, \"ost_set\");\n    print_begin_block(output, 1, DEFINITION_BLOCK, NULL);\n    print_line(output, 3, \"# condition on ost_index is true\");\n    print_line(output, 3, \"# if one of the storage objects of the file\");\n    print_line(output, 3, \"# matches each condition:\");\n    print_line(output, 3, \"# ost_index == 15 or ost_index == 20\");\n    print_line(output, 3, \"# => a part of the file must be on OST 15 or 20\");\n    print_line(output, 3, \"# ost_index == 15 and ost_index == 20\");\n    print_line(output, 3,\n               \"# => the file must have objects at least on OST 15 and 20\");\n    print_line(output, 3, \"# ost_index != 12 and ost_index != 13\");\n    print_line(output, 3,\n               \"# => the file must not have objects on OSTs 12 and 13\");\n    print_line(output, 3, \"ost_index == 1 or ost_index == 2 or\");\n    print_line(output, 3, \"ost_index == 1 or ost_index == 2 or\");\n    print_line(output, 3, \"ost_index == 3 or ost_index == 4\");\n    print_end_block(output, 1);\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n#endif\n\n    print_line(output, 0, \"# defining fileclass as a union or intersection:\");\n    print_begin_block(output, 0, FILESET_BLOCK, \"visu_expA\");\n    print_begin_block(output, 1, DEFINITION_BLOCK, NULL);\n    print_line(output, 2, \"visualization inter Experiment_A\");\n    print_end_block(output, 1);\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n\n    return 0;\n}\n\n#if 0\n#ifdef HAVE_MIGR_POLICY\n\nstatic int write_migration_policy_template(FILE *output)\n{\n\n    fprintf(output, \"#### Migration policies (archiving) ####\\n\\n\");\n\n    print_begin_block(output, 0, MIGRPOLICY_BLOCK, NULL);\n\n    print_begin_block(output, 1, POLICY_BLOCK, \"standard_copy\");\n\n    print_line(output, 2, \"target_fileclass = experiment_A;\");\n    print_line(output, 2, \"target_fileclass = pool_ssd;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 2,\n               \"# Copy a file 6hours after its creation if it as never been archived.\");\n    print_line(output, 2, \"# For next changes, archive it daily.\");\n    print_line(output, 2,\n               \"# In all cases, do nothing when it has been modified too recently (-30min).\");\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"((last_archive == 0 and creation > 6h) \");\n    print_line(output, 3, \"  or last_archive > 1d)\");\n    print_line(output, 3, \"and last_mod > 30min\");\n    print_end_block(output, 2);\n\n#ifdef _LUSTRE_HSM\n    fprintf(output, \"\\n\");\n    print_line(output, 2,\n               \"# target archive (/!\\\\ policy archive_id overrides fileset archive_id)\");\n    print_line(output, 2, \"lhsm_archive_hints = \" archive_id = 3 \" ;\");\n#endif\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_begin_block(output, 1, POLICY_BLOCK, \"visu_copy\");\n\n    print_line(output, 2, \"target_fileclass = visualization ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 2,\n               \"# copy those files quickly after they have been modified\");\n    print_line(output, 2, \"# or if they have been archived more that 6h ago\");\n    print_line(output, 2, \"# (if they are still beeing modified)\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_mod > 1h\");\n    print_line(output, 3, \"or\");\n    print_line(output, 3, \"last_archive > 6h\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# Default migration policy.\");\n    print_line(output, 1,\n               \"# This applies to files that don't match previous fileclasses, i.e:\");\n    print_line(output, 1, \"#   - don't match the 'ignore' block\");\n    print_line(output, 1,\n               \"#   - don't match a fileclass of 'ignore_fileclass' directives\");\n    print_line(output, 1,\n               \"#   - don't match any 'target_fileclass' of migration policies above\");\n    print_begin_block(output, 1, POLICY_BLOCK, \"default\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_mod > 12h\");\n    print_end_block(output, 2);\n\n#ifdef _LUSTRE_HSM\n    fprintf(output, \"\\n\");\n    print_line(output, 2, \"# target archive\");\n    print_line(output, 2, \"lhsm_archive_hints = \" archive_id = 2 \" ;\");\n#endif\n\n    print_end_block(output, 1);\n\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n\n    return 0;\n}\n#endif\n\n#ifdef HAVE_PURGE_POLICY\n#ifndef _LUSTRE_HSM\n/* Template for NON-lustre-HSM purposes */\nstatic int write_purge_policy_template(FILE *output)\n{\n    print_begin_block(output, 0, PURGEPOLICY_BLOCK, NULL);\n\n    print_line(output, 1,\n               \"# Do not purge files accessed recently, or whose size is 0,\");\n    print_line(output, 1,\n               \"# or located in \\\"/mnt/lustre/system_files\\\" directory.\");\n    print_begin_block(output, 1, IGNORE_BLOCK, NULL);\n    print_line(output, 2, \"last_access < 1h\");\n    print_line(output, 2, \"or size == 0\");\n    print_line(output, 2, \"or tree == \\\"/mnt/lustre/system_files\\\"\");\n    print_end_block(output, 1);\n\n    fprintf(output, \"\\n\");\n\n#ifdef _LUSTRE\n    print_line(output, 1, \"# do not purge files in FileClass \\\"pool_ssd\\\"\");\n    print_line(output, 1, IGNORE_FC \" = pool_ssd ;\");\n    fprintf(output, \"\\n\");\n#endif\n\n    print_line(output, 1, \"# Purge files of class 'Experiment_A' after 24h\");\n    print_begin_block(output, 1, POLICY_BLOCK, \"expA_1day\");\n\n    print_line(output, 2, \"target_fileclass = Experiment_A ;\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_mod > 24h\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"last_access > 6h\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1,\n               \"# Release files of class 'visualization' after 1 month,\");\n    print_line(output, 1, \"# if they are not accessed for more that 3 days\");\n    print_begin_block(output, 1, POLICY_BLOCK, \"visu_1month\");\n\n    print_line(output, 2, \"target_fileclass = visualization ;\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_mod > 30d\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"last_access > 3d\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# Default purge policy.\");\n    print_line(output, 1,\n               \"# This applies to files that don't match previous fileclasses, i.e:\");\n    print_line(output, 1, \"#   - don't match the 'ignore' block\");\n    print_line(output, 1,\n               \"#   - don't match a fileclass of 'ignore_fileclass' directives\");\n    print_line(output, 1,\n               \"#   - don't match any 'target_fileclass' of purge policies above\");\n    print_begin_block(output, 1, POLICY_BLOCK, \"default\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_access > 12h\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n\n    return 0;\n}\n\n#else\n\nstatic int write_purge_policy_template(FILE *output)\n{\n    fprintf(output, \"#### Purge policies (space release) ####\\n\\n\");\n\n    print_begin_block(output, 0, PURGEPOLICY_BLOCK, NULL);\n\n    print_line(output, 1,\n               \"# do not purge files owned by \\\"foo\\\" or \\\"charlie\\\"\");\n    print_begin_block(output, 1, IGNORE_BLOCK, NULL);\n    print_line(output, 2, \"owner == \\\"foo\\\"\");\n    print_line(output, 2, \"or\");\n    print_line(output, 2, \"owner == \\\"charlie\\\"\");\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# do not purge files in FileClass \\\"pool_ssd\\\"\");\n    print_line(output, 1, IGNORE_FC \" = pool_ssd ;\");\n    fprintf(output, \"\\n\");\n\n    print_begin_block(output, 1, POLICY_BLOCK, \"Experiment_A_purge\");\n\n    print_line(output, 2, \"target_fileclass = experiment_A ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 2,\n               \"# purge files not accessed within the last 6 hours,\");\n    print_line(output, 2, \"# and not copied-in within the last 12 hours\");\n    print_line(output, 2, \"# and copied-out more that 2 hours ago\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_access > 6h\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"last_restore > 12h\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"last_archive > 2h\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_begin_block(output, 1, POLICY_BLOCK, \"visu_purge\");\n\n    print_line(output, 2, \"target_fileclass = visualization ;\");\n    fprintf(output, \"\\n\");\n\n    print_line(output, 2, \"# purge files not accessed within the last day,\");\n    print_line(output, 2, \"# or modified during the week\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_access > 1d\");\n    print_line(output, 3, \"and\");\n    print_line(output, 3, \"last_mod > 7d\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n    fprintf(output, \"\\n\");\n\n    print_line(output, 1, \"# Default purge policy.\");\n    print_line(output, 1,\n               \"# This applies to files that don't match previous fileclasses, i.e:\");\n    print_line(output, 1, \"#   - don't match the 'ignore' block\");\n    print_line(output, 1,\n               \"#   - don't match a fileclass of 'ignore_fileclass' directives\");\n    print_line(output, 1,\n               \"#   - don't match any 'target_fileclass' of purge policies above\");\n    print_begin_block(output, 1, POLICY_BLOCK, \"default\");\n\n    print_begin_block(output, 2, CONDITION_BLOCK, NULL);\n    print_line(output, 3, \"last_access > 12h\");\n    print_end_block(output, 2);\n\n    print_end_block(output, 1);\n\n    print_end_block(output, 0);\n\n    fprintf(output, \"\\n\");\n\n    return 0;\n}\n#endif /* HSM switch */\n#endif /* purge policy */\n#endif /* 0 */\n\naction_params_t *get_fileset_policy_params(const fileset_item_t *fileset,\n                                           const char *policy_name)\n{\n    action_params_t *params;\n    char *key;\n\n    if (fileset->policy_action_params == NULL)\n        return NULL;\n\n    /* convert policy name to lower case */\n    key = strdup_lower(policy_name);\n    params = g_hash_table_lookup(fileset->policy_action_params, key);\n    free(key);\n\n#ifdef _DEBUG_POLICIES\n    fprintf(stderr, \"Founds parameters for policy '%s' in fileset '%s'\\n\",\n            policy_name, fileset->fileset_id);\n#endif\n\n    return params;\n}\n\n/**\n * Get an allocated action_params the given fileset and policy.\n * @retval NULL if memory allocation fails.\n */\nstatic action_params_t *alloc_policy_params(fileset_item_t *fset,\n                                            const char *policy_name,\n                                            char *msg_out)\n{\n    action_params_t *params = NULL;\n\n    if (fset->policy_action_params == NULL) {\n        /* allocate an empty hash table */\n        fset->policy_action_params =\n            g_hash_table_new_full(g_str_hash, g_str_equal, free,\n                                  (GDestroyNotify) rbh_params_free);\n\n        if (fset->policy_action_params == NULL)\n            return NULL;\n    } else\n        params = get_fileset_policy_params(fset, policy_name);\n\n    if (params == NULL) {\n        /* allocate and add parameters for this policy, if they don't exist */\n        params = calloc(1, sizeof(action_params_t));\n        if (!params)\n            return NULL;\n\n#ifdef _DEBUG_POLICIES\n        fprintf(stderr, \"Creating parameters for policy '%s' in fileset '%s'\\n\",\n                policy_name, fset->fileset_id);\n#endif\n        g_hash_table_insert(fset->policy_action_params,\n                            strdup_lower(policy_name), params);\n    }\n\n    return params;\n}\n\n/**\n * Fill a action_params_t structure from a config block.\n * @param[in]     param_block the action_params configuration block.\n * @param[in,out] params      pointer to action_params_t to be filled.\n * @param[in,out] mask        pointer to the attribute mask of placeholders\n *                            in action param values.\n */\nint read_action_params(config_item_t param_block, action_params_t *params,\n                       attr_mask_t *mask, char *msg_out)\n{\n    int i, rc;\n    bool error = false;\n\n    /* iterate on key/values of an action_params block */\n    for (i = 0; i < rh_config_GetNbItems(param_block); i++) {\n        config_item_t sub_item = rh_config_GetItemByIndex(param_block, i);\n        char *subitem_name;\n        char *value;\n        char *descr;\n        attr_mask_t m;\n        int extra = 0;\n\n        rc = rh_config_GetKeyValue(sub_item, &subitem_name, &value, &extra);\n        if (rc)\n            return rc;\n        if (extra) {\n            sprintf(msg_out,\n                    \"Unexpected extra argument for parameter '%s' in %s, line %u.\",\n                    subitem_name, rh_config_GetBlockName(param_block),\n                    rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }\n#ifdef _DEBUG_POLICIES\n        fprintf(stderr, \"adding parameter[%d]: '%s'\\n\", i, subitem_name);\n#endif\n\n        /* add param to the list (don't allow duplicates) */\n        rc = rbh_param_set(params, subitem_name, value, false);\n        if (rc) {\n            if (rc == -EEXIST)\n                sprintf(msg_out, \"Duplicate key '%s' in block %s, line %d.\",\n                        subitem_name, rh_config_GetBlockName(param_block),\n                        rh_config_GetItemLine(sub_item));\n            else\n                sprintf(msg_out, \"Failed to set key %s: %s\", subitem_name,\n                        strerror(-rc));\n            return -rc;\n        }\n\n        /* build description (for logging purpose) */\n        if (asprintf(&descr, \"%s::%s parameter, line %d\",\n                     rh_config_GetBlockName(param_block), subitem_name,\n                     rh_config_GetItemLine(sub_item)) < 0)\n            return ENOMEM;\n\n        /* Get attribute mask for this parameter, in case it contains attribute\n         * placeholder */\n        m = params_mask(value, descr, &error);\n        free(descr);\n        if (error) {\n            sprintf(msg_out, \"Unexpected parameters in %s, line %u.\",\n                    rh_config_GetBlockName(param_block),\n                    rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }\n        *mask = attr_mask_or(mask, &m);\n    }\n\n    return 0;\n}\n\n/** read a <policy>_action_params block in a fileset */\nstatic int read_fset_action_params(config_item_t param_block,\n                                   const char *blk_name, fileset_item_t *fset,\n                                   policies_t *p_pols, char *msg_out)\n{\n    int rc = 0;\n    char *pol_name;\n    int pol_idx;\n    size_t sfx_len = strlen(ACT_PARAMS_SFX);\n    size_t blk_len = strlen(blk_name);\n    action_params_t *params;\n\n    if (blk_len < sfx_len) {\n        sprintf(msg_out, \"unexpected block name '%s' in this context, line %d: \"\n                \"<policy_name>%s expected\", blk_name,\n                rh_config_GetItemLine(param_block), ACT_PARAMS_SFX);\n        return EINVAL;\n    }\n\n    /* parse the name to get the related param structure */\n    pol_name = strdup(blk_name);\n    if (pol_name == NULL) {\n        strcpy(msg_out, \"could not allocate memory\");\n        return ENOMEM;\n    }\n\n    /* truncate ACT_PARAMS_SFX:\n     * 'xxxxx_yy': len=8, sfx_len=3\n     * zero str[5]=str[8-3]\n     */\n    pol_name[blk_len - sfx_len] = '\\0';\n\n    pol_idx = -1;\n    if (!_policy_exists(p_pols, pol_name, &pol_idx) || pol_idx == -1) {\n        sprintf(msg_out, \"No declaration found for policy '%s' \"\n                \"while processing block '%s' line %d.\", pol_name, blk_name,\n                rh_config_GetItemLine(param_block));\n        rc = ENOENT;\n        goto out_free;\n    }\n\n    params = alloc_policy_params(fset, pol_name, msg_out);\n    if (params == NULL) {\n        rc = ENOMEM;\n        goto out_free;\n    }\n#ifdef _DEBUG_POLICIES\n    fprintf(stderr, \"processing parameters '%s' for fileset '%s'\\n\", pol_name,\n            fset->fileset_id);\n#endif\n\n    rc = read_action_params(param_block, params,\n                            &p_pols->policy_list[pol_idx].rules.run_attr_mask,\n                            msg_out);\n\n out_free:\n    free(pol_name);\n    return rc;\n}\n\n/**\n * Read a [<policy>_]action_params block in a policy or a rule.\n * @param param_block   The configuration block to read from.\n * @param blk_name      Name of the configuration block.\n * @param policy        Name of the current policy.\n * @param params        The action_param struct to be filled.\n * @param[in,out] mask  Pointer to the attribute mask of placeholders\n *                      in action param values.\n * @param msg_out       Set to detailed error message in case of error.\n */\nstatic int read_policy_action_params(config_item_t param_block,\n                                     const char *blk_name,\n                                     const char *policy_name,\n                                     action_params_t *params,\n                                     attr_mask_t *mask, char *msg_out)\n{\n    int rc = 0;\n\n    /* Check block name: allowed values are 'action_params'\n     * and '<policy>_action_params. */\n    if (strcasecmp(blk_name, ACT_PARAMS) != 0) {\n        char *expected;\n\n        if (asprintf(&expected, \"%s\" ACT_PARAMS_SFX, policy_name) < 0)\n            return -ENOMEM;\n\n        /* expected: <policy>_action_params */\n        rc = strcasecmp(blk_name, expected);\n        free(expected);\n\n        if (rc != 0) {\n            sprintf(msg_out, \"Unexpected block name '%s' (line %u): \" ACT_PARAMS\n                    \" or %s\" ACT_PARAMS_SFX \" expected.\", blk_name,\n                    rh_config_GetItemLine(param_block), policy_name);\n            return EINVAL;\n        }\n    }\n#ifdef _DEBUG_POLICIES\n    fprintf(stderr, \"processing parameters for policy '%s'\\n\", policy_name);\n#endif\n\n    return read_action_params(param_block, params, mask, msg_out);\n}\n\n/** test if the variable name is a policy hint (deprecated) */\nstatic inline bool match_policy_action_hints(const char *s)\n{\n    return !fnmatch(\"*\" OLD_ACT_PARAMS_SFX, s, FNM_CASEFOLD)\n        || !strcasecmp(s, OLD_ACT_PARAMS);\n}\n\n/** test if the variable name is a policy action params */\nstatic inline bool match_policy_action_params(const char *s)\n{\n    return !fnmatch(\"*\" ACT_PARAMS_SFX, s, FNM_CASEFOLD)\n        || !strcasecmp(s, ACT_PARAMS);\n}\n\nstatic void free_fileclass(fileset_item_t *fset)\n{\n    /* free fileset definition */\n    FreeBoolExpr(&fset->definition, false);\n\n    /* free action params */\n    if (fset->policy_action_params != NULL) {\n        g_hash_table_destroy(fset->policy_action_params);\n        fset->policy_action_params = NULL;\n    }\n}\n\nstatic void free_filesets(policies_t *p_policies)\n{\n    int i;\n\n    for (i = 0; i < p_policies->fileset_count; i++)\n        free_fileclass(&p_policies->fileset_list[i]);\n\n    free(p_policies->fileset_list);\n    p_policies->fileset_list = NULL;\n    p_policies->fileset_count = 0;\n}\n\n/** get fileset from name (iterate up to count) */\nstatic fileset_item_t *_get_fileset_by_name_max(const policies_t *p_policies,\n                                                const char *name, int count)\n{\n    int i;\n\n    for (i = 0; i < count; i++) {\n        if (!strcasecmp(p_policies->fileset_list[i].fileset_id, name))\n            return &p_policies->fileset_list[i];\n    }\n    return NULL;    /* not found */\n}\n\nfileset_item_t *get_fileset_by_name(const policies_t *p_policies,\n                                    const char *name)\n{\n    return _get_fileset_by_name_max(p_policies, name,\n                                    p_policies->fileset_count);\n}\n\n/** read a fileclass::definition block */\nstatic int read_fileclass_definition(config_item_t cfg_item,\n                                     fileset_item_t *fset,\n                                     policies_t *p_policies, char *msg_out)\n{\n    int rc;\n\n    /* 2 possible definition types expected: boolean expression\n     * or fileset union and/or intersection */\n    switch (rh_config_ContentType(cfg_item)) {\n    case CONFIG_ITEM_BOOL_EXPR:\n        /* analyze boolean expression */\n        rc = GetBoolExpr(cfg_item, DEFINITION_BLOCK,\n                         &fset->definition, &fset->attr_mask, msg_out, NULL);\n        if (rc)\n            return rc;\n        break;\n\n    case CONFIG_ITEM_SET:\n        /* Build a policy boolean expression from a\n         * union/intersection or fileclasses */\n        rc = GetSetExpr(cfg_item, DEFINITION_BLOCK,\n                        &fset->definition, &fset->attr_mask,\n                        p_policies, msg_out);\n        if (rc)\n            return rc;\n        break;\n\n    default:\n        sprintf(msg_out, \"Boolean expression or set-based definition \"\n                \"expected in block '%s', line %d\",\n                rh_config_GetBlockName(cfg_item),\n                rh_config_GetItemLine(cfg_item));\n        return EINVAL;\n    }\n\n    p_policies->global_fileset_mask =\n        attr_mask_or(&p_policies->global_fileset_mask, &fset->attr_mask);\n\n    /* @FIXME check standard attributes + sm_info of type PT_DURATION */\n    if (fset->attr_mask.std & (ATTR_MASK_last_access | ATTR_MASK_last_mod)) {\n        DisplayLog(LVL_MAJOR, CHK_TAG, \"WARNING: in FileClass '%s', line %d: \"\n                   \"time-based conditions should be specified in policy \"\n                   \"condition instead of file class definition\",\n                   fset->fileset_id, rh_config_GetItemLine(cfg_item));\n    }\n\n    return 0;\n}\n\n/** read a fileclass block */\nstatic int read_fileclass_block(config_item_t class_cfg,\n                                policies_t *p_policies, int curr_idx,\n                                char *msg_out)\n{\n    bool definition_done = false;\n    const char *class_name;\n    fileset_item_t *fset;\n    int i, rc;\n\n    /* get fileclass name */\n    class_name = rh_config_GetBlockId(class_cfg);\n\n#ifdef _DEBUG_POLICIES\n    printf(\"parsing fileclass '%s'\\n\", class_name);\n#endif\n\n    if ((class_name == NULL) || (strlen(class_name) == 0)) {\n        sprintf(msg_out, \"Fileclass name expected for block \"\n                FILESET_BLOCK \", line %d. \"\n                \"e.g. \" FILESET_BLOCK \" myclass { ...\",\n                rh_config_GetItemLine(class_cfg));\n        return EINVAL;\n    }\n\n    /* check that class name is not already used (up to idx-1) */\n    if (_get_fileset_by_name_max(p_policies, class_name, curr_idx) != NULL) {\n        sprintf(msg_out, \"Duplicate fileclass declaration: '%s', line %d.\",\n                class_name, rh_config_GetItemLine(class_cfg));\n        return EINVAL;\n    }\n\n    fset = &p_policies->fileset_list[curr_idx];\n\n    /* initialize the slot */\n    memset(fset, 0, sizeof(*fset));\n\n    rh_strncpy(fset->fileset_id, class_name, FILESET_ID_LEN);\n\n    /* set default */\n    fset->matchable = 1;\n\n    for (i = 0; i < rh_config_GetNbItems(class_cfg); i++) {\n        config_item_t sub_item = rh_config_GetItemByIndex(class_cfg, i);\n        char *subitem_name;\n\n        critical_err_check(sub_item, FILESET_BLOCK);\n\n        switch (rh_config_ItemType(sub_item)) {\n        case CONFIG_ITEM_BLOCK:\n            {\n                subitem_name = rh_config_GetBlockName(sub_item);\n                critical_err_check(subitem_name, FILESET_BLOCK);\n\n                if (strcasecmp(subitem_name, DEFINITION_BLOCK) == 0) {\n                    /* check double definition */\n                    if (definition_done) {\n                        sprintf(msg_out, \"Double fileclass definition in \"\n                                FILESET_BLOCK \" block, line %d.\",\n                                rh_config_GetItemLine(sub_item));\n                        return EINVAL;\n                    }\n\n                    /* read fileclass definition */\n                    rc = read_fileclass_definition(sub_item, fset,\n                                                   p_policies, msg_out);\n                    if (rc == 0)\n                        definition_done = true;\n                } else if (match_policy_action_params(subitem_name)) {\n                    /* read policy action params */\n                    rc = read_fset_action_params(sub_item, subitem_name, fset,\n                                                 p_policies, msg_out);\n                } else {\n                    sprintf(msg_out, \"'%s' sub-block unexpected in \"\n                            FILESET_BLOCK \" block, line %d.\",\n                            subitem_name, rh_config_GetItemLine(sub_item));\n                    rc = EINVAL;\n                }\n                if (rc)\n                    return rc;\n\n                break;\n            }\n        case CONFIG_ITEM_VAR:\n            {\n                char *value = NULL;\n                int extra_args = 0;\n\n                rc = rh_config_GetKeyValue(sub_item, &subitem_name,\n                                           &value, &extra_args);\n                if (rc)\n                    return rc;\n\n                if (!strcasecmp(subitem_name, \"report\")) {\n                    if (extra_args) {\n                        sprintf(msg_out,\n                                \"Unexpected arguments for 'report' parameter, line %d.\",\n                                rh_config_GetItemLine(sub_item));\n                        return EINVAL;\n                    }\n                    int tmp = str2bool(value);\n                    if (tmp == -1) {\n                        sprintf(msg_out,\n                                \"Boolean expected for 'report' parameter, line %d.\",\n                                rh_config_GetItemLine(sub_item));\n                        return EINVAL;\n                    }\n                    fset->matchable = tmp;\n                }\n                /* manage archive_id deprecation (now in action_params) */\n                else if (!strcasecmp(subitem_name, \"archive_id\")\n                         /* for backward compat: */\n                         || !strcasecmp(subitem_name, \"archive_num\")) {\n                    sprintf(msg_out, \"archive_id parameter (line %u) must be \"\n                            \"specified in a <policy>_action_params block.\",\n                            rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n                /* is the variable of the form <policy_name>_hints ? */\n                else if (match_policy_action_hints(subitem_name)) {\n                    sprintf(msg_out,\n                            \"line %u: '<policy>_hints' parameters are no longer supported. \"\n                            \"Define a '<policy>_action_params' block instead.\",\n                            rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                } else {\n                    DisplayLog(LVL_CRIT, \"Config Check\",\n                               \"WARNING: unknown parameter '%s' in block '%s' line %d\",\n                               subitem_name, FILESET_BLOCK,\n                               rh_config_GetItemLine(sub_item));\n                }\n                break;\n            }\n        default:\n            /* unexpected content */\n            sprintf(msg_out,\n                    \"Unexpected item in \" FILESET_BLOCK \" block, line %d.\",\n                    rh_config_GetItemLine(sub_item));\n            return EINVAL;\n        }   /* switch on item type */\n\n    }   /* loop on \"fileclass\" block contents */\n\n    if (!definition_done) {\n        sprintf(msg_out,\n                \"No definition in file class '%s', line %d\", class_name,\n                rh_config_GetItemLine(class_cfg));\n        return ENOENT;\n    }\n    return 0;\n}\n\n/** Resize the fileset array by adding 'count' slots */\nstatic int add_fileset_slots(policies_t *p_policies, int count)\n{\n    if (unlikely(count <= 0))\n        return 0;\n\n    p_policies->fileset_list =\n        (fileset_item_t *) realloc(p_policies->fileset_list,\n                                   (p_policies->fileset_count +\n                                    count) * sizeof(fileset_item_t));\n    if (p_policies->fileset_list == NULL)\n        return ENOMEM;\n    p_policies->fileset_count += count;\n\n    return 0;\n}\n\n/** Read filesets block */\nstatic int read_filesets(config_file_t config, policies_t *p_policies,\n                         char *msg_out)\n{\n    unsigned int i;\n    int rc;\n\n    /* initialize global attributes mask and fileset list */\n    p_policies->global_fileset_mask = null_mask;\n    p_policies->fileset_list = NULL;\n    p_policies->fileset_count = 0;\n\n    /* rbh v3: allow specifying fileclass blocks in config file root */\n    for (i = 0; i < rh_config_GetNbBlocks(config); i++) {\n        char *block_name;\n        int curr_idx, j;\n        config_item_t root_block = rh_config_GetBlockByIndex(config, i);\n        critical_err_check(root_block, \"root\");\n\n        if (rh_config_ItemType(root_block) != CONFIG_ITEM_BLOCK)\n            continue;\n\n        block_name = rh_config_GetBlockName(root_block);\n        critical_err_check(block_name, \"root\");\n\n        if (!strcasecmp(block_name, FILESET_BLOCK)) {\n            /* index of the new fileset is the current count */\n            curr_idx = p_policies->fileset_count;\n\n            /* add 1 fileclass slot */\n            rc = add_fileset_slots(p_policies, 1);\n            if (rc)\n                goto clean_filesets;\n\n            /* read fileclass block contents */\n            rc = read_fileclass_block(root_block, p_policies, curr_idx,\n                                      msg_out);\n            if (rc)\n                goto clean_except_last;\n\n        } else if (!strcasecmp(block_name, FILESETS_SECTION)) {\n            int nb_sub_items; /**< nbr of fileclasses in the block */\n\n            /* first index of new filesets is the current count */\n            curr_idx = p_policies->fileset_count;\n\n            /* add as many slots as sub-blocks in Filesets blocks. */\n            nb_sub_items = rh_config_GetNbItems(root_block);\n            rc = add_fileset_slots(p_policies, nb_sub_items);\n            if (rc)\n                goto clean_filesets;\n\n            for (j = 0; j < nb_sub_items; j++) {\n                char *sub_block_name;\n                config_item_t sub_item =\n                    rh_config_GetItemByIndex(root_block, j);\n                critical_err_check_goto(sub_item, FILESETS_SECTION, rc,\n                                        clean_filesets);\n\n                if (rh_config_ItemType(sub_item) != CONFIG_ITEM_BLOCK) {\n                    strcpy(msg_out,\n                           \"Only \" FILESET_BLOCK \" sub-blocks are expected in \"\n                           FILESETS_SECTION \" section\");\n                    rc = EINVAL;\n                    goto clean_except_last;\n                }\n                sub_block_name = rh_config_GetBlockName(sub_item);\n                critical_err_check_goto(sub_block_name, FILESETS_SECTION, rc,\n                                        clean_filesets);\n\n                if (!strcasecmp(sub_block_name, FILESET_BLOCK)) {\n                    /* read fileclass block contents */\n                    rc = read_fileclass_block(sub_item, p_policies,\n                                              curr_idx + j, msg_out);\n                    if (rc)\n                        goto clean_except_last;\n                } else {\n                    sprintf(msg_out,\n                            \"'%s' sub-block unexpected in %s section, line %d.\",\n                            sub_block_name, FILESETS_SECTION,\n                            rh_config_GetItemLine(sub_item));\n                    rc = EINVAL;\n                    goto clean_filesets;\n                }\n            }\n        }   /* end of \"filesets\" section */\n    }\n\n    return 0;\n\nclean_except_last:\n    /* don't try to clean the last one as it is failed/incomplete\n     * and likely already freed */\n    p_policies->fileset_count--;\nclean_filesets:\n    free_filesets(p_policies);\n    return rc;\n}\n\n/** parse a rule config block and fill rule_item_t structure\n * @param[in]  all_policies  Needed to check fileset definition\n * @param[in]  policy        Needed to build specific parameter name like\n                            '<policy>_hints', check status manager properties...\n * @param[in]  policy_rules  Needed to check other rule names in the policy\n * @param[out] rule          The rule structure to fill-in\n */\nstatic int parse_rule_block(config_item_t config_item,\n                            const char *block_name,\n                            const policies_t *all_policies,\n                            const policy_descr_t *policy,\n                            const policy_rules_t *policy_rules,\n                            rule_item_t *rule, char *msg_out)\n{\n    char *rule_name;\n    bool is_default = false;\n    bool has_target = false;\n    int i, j, k, rc;\n    attr_mask_t mask;\n    bool definition_done = false;\n\n    /* initialize output */\n    memset(rule, 0, sizeof(rule_item_t));\n\n    /* get policy id */\n    rule_name = rh_config_GetBlockId(config_item);\n    critical_err_check(rule_name, block_name);\n\n    /* check that this rule name is not already used in this policy */\n    for (i = 0; i < policy_rules->rule_count; i++) {\n        if (!strcasecmp(rule_name, policy_rules->rules[i].rule_id)) {\n            sprintf(msg_out,\n                    \"Rule name '%s' (line %d) is already used by another rule in the policy.\",\n                    rule_name, rh_config_GetItemLine(config_item));\n            return EINVAL;\n        }\n    }\n\n    /* is it a default block? */\n    is_default = !strcasecmp(rule_name, \"default\");\n\n    /* save policy id */\n    rh_strncpy(rule->rule_id, rule_name, sizeof(rule->rule_id));\n\n    /* read block contents */\n    for (i = 0; i < rh_config_GetNbItems(config_item); i++) {\n        config_item_t sub_item = rh_config_GetItemByIndex(config_item, i);\n        critical_err_check(sub_item, block_name);\n        char *subitem_name;\n\n        if (rh_config_ItemType(sub_item) == CONFIG_ITEM_BLOCK) {\n            subitem_name = rh_config_GetBlockName(sub_item);\n            critical_err_check(subitem_name, block_name);\n\n            /* allowed blocks: action_params and condition */\n            if (match_policy_action_params(subitem_name)) {\n                /* read policy action params */\n                rc = read_policy_action_params(sub_item, subitem_name,\n                                               policy->name,\n                                               &rule->action_params,\n                                               &rule->attr_mask, msg_out);\n                if (rc)\n                    return rc;\n                continue;\n            } else if (strcasecmp(subitem_name, CONDITION_BLOCK) != 0) {\n                sprintf(msg_out,\n                        \"'%s' sub-block unexpected in %s block, line %d.\",\n                        subitem_name, block_name,\n                        rh_config_GetItemLine(sub_item));\n                return EINVAL;\n            }\n\n            /* check double condition */\n            if (definition_done) {\n                sprintf(msg_out,\n                        \"Double condition in policy rule '%s', line %d.\",\n                        rule_name, rh_config_GetItemLine(sub_item));\n                return EINVAL;\n            }\n\n            /* analyze boolean expression */\n            /* allow using 'status' related info in conditions */\n            mask = null_mask;\n            rc = GetBoolExpr(sub_item, CONDITION_BLOCK, &rule->condition,\n                             &mask, msg_out, policy->status_mgr);\n            if (rc)\n                return rc;\n\n            rule->attr_mask = attr_mask_or(&rule->attr_mask, &mask);\n            definition_done = true;\n        } else {    /* not a block */\n\n            char *value = NULL;\n            int extra_args = 0;\n            fileset_item_t *fs;\n\n            rc = rh_config_GetKeyValue(sub_item, &subitem_name, &value,\n                                       &extra_args);\n            if (rc) {\n                sprintf(msg_out,\n                        \"Failed to parse configuration item line %d (block or key=value expected)\",\n                        rh_config_GetItemLine(sub_item));\n                return rc;\n            }\n\n            /* expected : target filesets or action parameters */\n            if (!strcasecmp(subitem_name, \"target_fileclass\")) {\n                if (is_default) {\n                    sprintf(msg_out,\n                            \"No target_fileclass expected for default policy, line %d.\",\n                            rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n\n                if (extra_args) {\n                    sprintf(msg_out,\n                            \"Unexpected arguments for %s parameter, line %d.\",\n                            subitem_name, rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n\n                /* get the associated fileset */\n                fs = get_fileset_by_name(all_policies, value);\n\n                if (fs == NULL) {\n                    sprintf(msg_out,\n                            \"Policy rule references unknown fileclass '%s', line %d.\",\n                            value, rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n                fs->used_in_policy = 1;\n\n                /* note: matchable is only for the fileclass in DB.\n                 * allow using non-matchable in policies */\n\n                /* check that the fileset is not already referenced in the\n                 * policy */\n                for (j = 0; j < policy_rules->rule_count; j++) {\n                    for (k = 0; k < policy_rules->rules[j].target_count; k++)\n                        if (fs == policy_rules->rules[j].target_list[k]) {\n                            sprintf(msg_out,\n                                    \"Fileclass '%s' is already a target of policy rule '%s'\",\n                                    value, policy_rules->rules[j].rule_id);\n                            return EINVAL;\n                        }\n                }\n\n                /* also check ignore_fileclass directives */\n                for (j = 0; j < policy_rules->ignore_count; j++) {\n                    if (fs == policy_rules->ignore_list[j]) {\n                        sprintf(msg_out,\n                                \"Fileclass '%s' is simultaneously ignored and referenced as a target for policy '%s'\",\n                                value, rule_name);\n                        return EINVAL;\n                    }\n                }\n\n                has_target = true;\n                /* append the fileset list */\n                rule->target_count++;\n                rule->target_list =\n                    (fileset_item_t **) realloc(rule->target_list,\n                                                rule->target_count *\n                                                sizeof(fileset_item_t *));\n                rule->target_list[rule->target_count - 1] = fs;\n\n                /* add fileset mask to policy mask */\n                rule->attr_mask =\n                    attr_mask_or(&rule->attr_mask, &fs->attr_mask);\n            } else if (!strcasecmp(subitem_name, \"action\")) {\n                char **extra_arg_tab = NULL;\n\n                /* get extra args if there are */\n                if (extra_args)\n                    extra_args =\n                        rh_config_GetExtraArgs(sub_item, &extra_arg_tab);\n\n                /* action defined at the policy level: overrides policy\n                 * defaults */\n                rc = parse_policy_action(\"action\", value, extra_arg_tab,\n                                         extra_args, &rule->action,\n                                         &rule->attr_mask, msg_out);\n                if (rc)\n                    return rc;\n            }\n            /* manage action_hints deprecation (now in action_params) */\n            else if (match_policy_action_hints(subitem_name)) {\n                sprintf(msg_out,\n                        \"line %u: '%s' parameters are no longer supported. \"\n                        \"Define an 'action_params' block instead.\",\n                        rh_config_GetItemLine(sub_item), subitem_name);\n                return EINVAL;\n            }\n            /* manage archive_id deprecation (now in action_params) */\n            else if (!strcasecmp(subitem_name, \"archive_id\")\n                     /* for backward compat: */\n                     || !strcasecmp(subitem_name, \"archive_num\")) {\n                sprintf(msg_out, \"archive_id parameter (line %u) must be \"\n                        \"specified in a <policy>_action_params block.\",\n                        rh_config_GetItemLine(sub_item));\n                return EINVAL;\n            } else if (!strcasecmp(subitem_name, CONDITION_BLOCK)) {\n                if (strcasecmp(value, \"true\") != 0) {\n                    sprintf(msg_out,\n                            \"Sub-block (or 'condition = true') is expected for '%s' item in block '%s %s', line %d\",\n                            subitem_name, block_name, rule_name,\n                            rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n\n                if (extra_args) {\n                    sprintf(msg_out,\n                            \"Unexpected argument after 'condition = true' in block '%s %s', line %d\",\n                            block_name, rule_name,\n                            rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n\n                /* check double condition */\n                if (definition_done) {\n                    sprintf(msg_out,\n                            \"Double condition in policy rule '%s', line %d.\",\n                            rule_name, rh_config_GetItemLine(sub_item));\n                    return EINVAL;\n                }\n\n                ConstantBoolExpr(true, &rule->condition);\n                definition_done = true;\n            } else {\n                DisplayLog(LVL_CRIT, \"Config Check\",\n                           \"WARNING: unknown parameter '%s' in block '%s' line %d\",\n                           subitem_name, block_name,\n                           rh_config_GetItemLine(sub_item));\n            }\n        }   /* end of vars */\n    }   /* loop on \"rule\" block content */\n\n    if (!definition_done) {\n        sprintf(msg_out, \"No condition specified in policy rule '%s', line %d\",\n                rule_name, rh_config_GetItemLine(config_item));\n        return EINVAL;\n    }\n    if (!has_target && !is_default) {\n        sprintf(msg_out,\n                \"No target fileclass specified in policy rule '%s', line %d \"\n                \"(or define a 'default' rule to match all entries).\", rule_name,\n                rh_config_GetItemLine(config_item));\n        return EINVAL;\n    }\n\n    return 0;\n}\n\nstatic void free_policy_action(policy_action_t *action)\n{\n    switch (action->type) {\n    case ACTION_UNSET:\n    case ACTION_NONE:\n        break;\n    case ACTION_FUNCTION:\n        free(action->action_u.func.name);\n        break;\n    case ACTION_COMMAND:\n        g_strfreev(action->action_u.command);\n        break;\n    }\n}\n\nstatic void free_rules_list(rule_item_t *items, int count)\n{\n    int i;\n\n    for (i = 0; i < count; i++) {\n        free(items[i].target_list);\n        FreeBoolExpr(&items[i].condition, false);\n        free_policy_action(&items[i].action);\n        rbh_params_free(&items[i].action_params);\n    }\n\n    free(items);\n}\n\nstatic void free_policy_rules(policy_rules_t *rules)\n{\n    if (rules->rules)\n        free_rules_list(rules->rules, rules->rule_count);\n    if (rules->ignore_list)\n        free(rules->ignore_list);\n    if (rules->whitelist_count > 0)\n        free_whitelist(rules->whitelist_rules, rules->whitelist_count);\n    else if (rules->whitelist_rules)    /* preallocated? */\n        free(rules->whitelist_rules);\n\n    rules->rules = NULL;\n    rules->ignore_list = NULL;\n    rules->whitelist_rules = NULL;\n    rules->whitelist_count = 0;\n}\n\nstatic void free_policy_descr(policy_descr_t *descr)\n{\n    /** FIXME free sm_instance */\n    free_policy_rules(&descr->rules);\n    FreeBoolExpr(&descr->scope, false);\n    free(descr->implements);\n    free_policy_action(&descr->default_action);\n}\n\n/* macro for preallocating array depending on configuration blocks in Read_Policy_ */\n#define PREALLOC_ARRAY_CONFIG(_block_name_, _type_, _array_var, _goto_label)   \\\ndo {                                                                           \\\n     count = rh_config_CountItemNames(section, _block_name_);                  \\\n     if (count > 0) {                                                          \\\n         rules->_array_var = (_type_ *)calloc(count, sizeof(_type_));          \\\n         if (rules->_array_var == NULL) {                                      \\\n             rc = ENOMEM;                                                      \\\n             goto _goto_label;                                                 \\\n         }                                                                     \\\n     } else if (count == 0) {                                                  \\\n         rules->_array_var = NULL;                                             \\\n     } else {                                                                  \\\n         rc = -1;                                                              \\\n         goto _goto_label;                                                     \\\n     }                                                                         \\\n } while (0)\n\nstatic int read_policy(config_file_t config, const policies_t *p_policies,\n                       char *msg_out, policy_descr_t *policy_descr)\n{\n    unsigned int i, j, k;\n    int rc, count;\n\n    policy_rules_t *rules;\n    config_item_t section;\n    /* 16: strlen(\"_policy\") + aligned padding */\n    char section_name[POLICY_NAME_LEN + 16] = \"\";\n\n/* macros for cleaner code */\n    rules = &policy_descr->rules;\n#define curr_ign        rules->whitelist_count\n#define curr_ign_fc     rules->ignore_count\n#define curr_rule       rules->rule_count\n\n    /* check if the new name exists first */\n    snprintf(section_name, sizeof(section_name) - 1, \"%s_%s\",\n             policy_descr->name, POLICIES_BLOCK);\n    section_name[sizeof(section_name) - 1] = '\\0';\n\n    /* get policy section */\n    rc = get_cfg_block(config, section_name, &section, msg_out);\n    if (rc == ENOENT) {\n        /* try with old block name */\n        snprintf(section_name, sizeof(section_name) - 1, \"%s_%s\",\n                 policy_descr->name, OLD_POLICIES_BLOCK);\n        section_name[sizeof(section_name) - 1] = '\\0';\n\n        /* get policy section */\n        rc = get_cfg_block(config, section_name, &section, msg_out);\n        if (rc == ENOENT)\n            /* not mandatory: no error */\n            return 0;\n        else if (rc != 0)\n            return rc;\n\n        /* Deprecation warning */\n        DisplayLog(LVL_MAJOR, LOADER_TAG, \"WARNING: '*_\" OLD_POLICIES_BLOCK\n                   \"' block names are deprecated. Rename '%s' block to \"\n                   \"'%s_\" POLICIES_BLOCK \"'.\", section_name,\n                   policy_descr->name);\n    } else if (rc != 0)\n        return rc;\n\n    msg_out[0] = '\\0';\n\n    /* prealloc config arrays */\n    PREALLOC_ARRAY_CONFIG(IGNORE_BLOCK, whitelist_item_t, whitelist_rules, err);\n    PREALLOC_ARRAY_CONFIG(IGNORE_FC, fileset_item_t *, ignore_list, err);\n    /* can't use PREALLOC_ARRAY_CONFIG for rules, as we also accept old rule\n     * name (policy)  */\n\n    count = rh_config_CountItemNames(section, RULE_BLOCK) +\n        rh_config_CountItemNames(section, OLD_RULE_BLOCK);\n    if (count > 0) {\n        rules->rules = (rule_item_t *) calloc(count, sizeof(rule_item_t));\n        if (rules->rules == NULL) {\n            strcpy(msg_out, \"memory allocation failed\");\n            rc = ENOMEM;\n            goto err;\n        }\n    } else if (count == 0) {\n        rules->rules = NULL;\n    } else {\n        sprintf(msg_out, \"Unexpected number of blocks: %d + %d = %d\\n\",\n                rh_config_CountItemNames(section, RULE_BLOCK),\n                rh_config_CountItemNames(section, OLD_RULE_BLOCK), count);\n        rc = -1;\n        goto err;\n    }\n\n    count = rh_config_GetNbItems(section);\n\n    /* read sub-blocks */\n    for (i = 0; i < count; i++) {\n        char *item_name;\n        config_item_t curr_item = rh_config_GetItemByIndex(section, i);\n        critical_err_check_goto(curr_item, section_name, rc, err);\n\n        if (rh_config_ItemType(curr_item) == CONFIG_ITEM_BLOCK) {\n            item_name = rh_config_GetBlockName(curr_item);\n            critical_err_check_goto(item_name, section_name, rc, err);\n\n            if (!strcasecmp(item_name, IGNORE_BLOCK)) {\n                /* analyze boolean expression */\n                /* allow using status related info in ignore statement? */\n                rc = GetBoolExpr(curr_item, item_name,\n                                 &rules->whitelist_rules[curr_ign].bool_expr,\n                                 &rules->whitelist_rules[curr_ign].attr_mask,\n                                 msg_out, policy_descr->status_mgr);\n                if (rc)\n                    goto err;\n\n                /* add expression attr mask to policy mask */\n                rules->run_attr_mask = attr_mask_or(&rules->run_attr_mask,\n                                                    &rules->\n                                                    whitelist_rules[curr_ign].\n                                                    attr_mask);\n                curr_ign++;\n            }\n            /* allow 'rule' or 'policy' */\n            else if (!strcasecmp(item_name, RULE_BLOCK)\n                     || !strcasecmp(item_name, OLD_RULE_BLOCK)) {\n                /* parse 'rule' block */\n                rc = parse_rule_block(curr_item, item_name, p_policies,\n                                      policy_descr, rules,\n                                      &rules->rules[curr_rule], msg_out);\n                if (rc)\n                    goto err;\n\n                rules->run_attr_mask = attr_mask_or(&rules->run_attr_mask,\n                                                    &rules->rules[curr_rule].\n                                                    attr_mask);\n                curr_rule++;\n            } else {\n                sprintf(msg_out,\n                        \"'%s' sub-block unexpected in %s block, line %d.\",\n                        item_name, section_name,\n                        rh_config_GetItemLine(curr_item));\n                rc = EINVAL;\n                goto err;\n            }\n        } else {    /* not a block */\n\n            char *value;\n            int extra_args = 0;\n\n            rc = rh_config_GetKeyValue(curr_item, &item_name, &value,\n                                       &extra_args);\n            if (rc)\n                goto err;\n\n            /* only \"ignore_fileclass\" expected */\n            if (strcasecmp(item_name, IGNORE_FC) != 0) {\n                sprintf(msg_out,\n                        \"'%s' parameter unexpected in %s block, line %d.\",\n                        item_name, section_name,\n                        rh_config_GetItemLine(curr_item));\n                rc = EINVAL;\n                goto err;\n            }\n\n            if (extra_args) {\n                sprintf(msg_out,\n                        \"Unexpected arguments for %s parameter, line %d.\",\n                        item_name, rh_config_GetItemLine(curr_item));\n                rc = EINVAL;\n                goto err;\n            }\n\n            /* find fileset in policy */\n            rules->ignore_list[curr_ign_fc] =\n                get_fileset_by_name(p_policies, value);\n\n            if (rules->ignore_list[curr_ign_fc] == NULL) {\n                sprintf(msg_out,\n                        \"Policy definition references unknown fileclass '%s', line %d.\",\n                        value, rh_config_GetItemLine(curr_item));\n                rc = EINVAL;\n                goto err;\n            }\n\n            rules->ignore_list[curr_ign_fc]->used_in_policy = 1;\n\n            /* check that the fileset is not already referenced in a policy */\n            for (j = 0; j < curr_rule; j++) {\n                for (k = 0; k < rules->rules[j].target_count; k++)\n                    if (rules->ignore_list[curr_ign_fc] ==\n                        rules->rules[j].target_list[k]) {\n                        sprintf(msg_out,\n                                \"Fileclass '%s' is simultaneously ignored and referenced as a target for policy '%s'\",\n                                value, rules->rules[j].rule_id);\n                        rc = EINVAL;\n                        goto err;\n                    }\n            }\n\n            /* add fileset attr mask to policy mask */\n            rules->run_attr_mask = attr_mask_or(&rules->run_attr_mask,\n                                                &rules->\n                                                ignore_list[curr_ign_fc]->\n                                                attr_mask);\n            curr_ign_fc++;\n\n        }   /* end of vars */\n\n    }   /* end of section content */\n\n    return 0;\n\n err:\n    free_policy_rules(rules);\n\n    return rc;\n}\n\n/** @TODO manage SM config + SM init */\n\nstatic int reload_policies(policies_t *p_policies)\n{\n    if (p_policies->policy_count != policies.policy_count) {\n        /* policy count changed */\n        DisplayLog(LVL_MAJOR, RELOAD_TAG, \"Policy count changed %u->%u: \"\n                   \"program restart required (skipping policy rules update).\",\n                   policies.policy_count, p_policies->policy_count);\n    }\n\n    /* TODO reload all policies */\n\n#if 0\n    purge_policy_t *policy = (purge_policy_t *) module_config;\n\n    /* Reloading purge policies dynamically is quite complex:\n     * 1) compare triggers: if they are the same (same count, same type)\n     *    update their simple parameters: thresholds and check interval\n     * 2) compare whitelist expressions count and structures.\n     *    If the structure is the same (same parameters, comparators, etc.)\n     *    only update the numerical values for conditions.\n     */\n\n    /** @TODO prevent from concurrent access when policy is being checked */\n\n    update_whitelist(policies.purge_policy.whitelist_rules,\n                     policies.purge_policy.whitelist_count,\n                     policy->whitelist_rules, policy->whitelist_count,\n                     PURGEPOLICY_BLOCK);\n\n    /* XXX global_attr_mask is unchanged, since we keep the same expressions */\n\n    /* free reloaded config structure (no used anymore) */\n    free_whitelist(policy->whitelist_rules, policy->whitelist_count);\n\n#endif\n    return 0;\n}\n\nstatic int set_policies(void *cfg, bool reload)\n{\n    policies_t *p_policies = (policies_t *) cfg;\n\n    if (reload)\n        return reload_policies(p_policies);\n    else {\n        policies = *p_policies;\n\n        /* update status manager masks, once they are all loaded */\n        smi_update_masks();\n    }\n    return 0;\n}\n\nstatic void set_default_policies(void *module_config)\n{\n    policies_t *pol = (policies_t *) module_config;\n\n    *pol = policy_initializer;\n}\n\nstatic int read_policies(config_file_t config, void *cfg, char *msg_out)\n{\n    policies_t *pol = (policies_t *) cfg;\n    int rc, i;\n\n    memset(pol, 0, sizeof(*pol));\n\n    /* read policy declarations, allocate policy descriptors */\n    rc = read_policy_definitions(config, pol, msg_out);\n    if (rc)\n        return rc;\n\n    /* load fileset definitions, and check fileset hints against defined\n     * policies */\n    rc = read_filesets(config, pol, msg_out);\n    if (rc)\n        return rc;\n\n#ifdef _DEBUG_POLICIES\n    for (i = 0; i < pol->fileset_count; i++)\n        printf(\"> Fileclass '%s'\\n\", pol->fileset_list[i].fileset_id);\n#endif\n\n    /* iterate on declared policies */\n    for (i = 0; i < pol->policy_count; i++) {\n        rc = read_policy(config, pol, msg_out, &pol->policy_list[i]);\n        if (rc)\n            return rc;\n    }\n\n    return 0;\n}\n\nstatic void write_policy_template(FILE *output)\n{\n#ifdef _LUSTRE_HSM\n    print_line(output, 0, \"# Load policy definitions for Lustre/HSM\");\n    print_line(output, 0, \"%%include \\\"includes/lhsm.inc\\\"\");\n    fprintf(output, \"\\n\");\n#else\n    print_line(output, 0,\n               \"# Load policy definitions for tmp filesystem cleanup\");\n    print_line(output, 0, \"%%include \\\"includes/tmpfs.inc\\\"\");\n    fprintf(output, \"\\n\");\n#endif\n\n    write_template_filesets(output);\n\n// FIXME write policy templates\n/*\n    print_line(output, 1, \"# default sort order for the policy (this is\");\n    print_line(output, 1, \"# overridden by policy parameters::lru_sort_attr)\");\n    print_line(output, 1, \"#default_lru_sort_attr = last_access ;\");\n\n    print_line(output, 1, \"# Default action for this policy.\");\n    print_line(output, 1, \"# The syntax to call built-in functions is <module_name>.<action_name>\");\n    print_line(output, 1, \"# e.g. common.copy, common.unlink, lhsm.archive, lhsm.release...\");\n    print_line(output, 1, \"default_action = common.unlink ;\");\n    print_line(output, 1, \"# To call a custom script instead, use the following syntax:\");\n    print_line(output, 1, \"# default_action = cmd(\\\"/usr/bin/move_to_trash.sh {path}\\\") ;\");\n    print_line(output, 1, \"# Special parameters can passed to the command:\");\n    print_line(output, 1, \"#    {path}: posix path to the entry\");\n#ifdef _LUSTRE\n#   ifdef _HAVE_FID\n    print_line(output, 1, \"#    {fid}: fid of the entry\");\n#   endif\n    print_line(output, 1, \"#    {fsname}: Lustre fsname\");\n#endif\n    print_line(output, 1, \"#    {hints}: pass action_hints to the command\");\n    fprintf(output, \"\\n\");\n*/\n}\n\nstatic void write_policy_default(FILE *output)\n{\n    write_default_filesets(output);\n// FIXME write policy defaults\n}\n\nstatic void *policies_cfg_new(void)\n{\n    return calloc(1, sizeof(policies_t));\n}\n\nstatic void policies_cfg_free(void *arg)\n{\n    policies_t *cfg = (policies_t *) arg;\n    int i;\n\n    if (cfg == NULL)\n        return;\n\n    for (i = 0; i < cfg->policy_count; i++)\n        free_policy_descr(&cfg->policy_list[i]);\n\n    free(cfg->policy_list);\n    cfg->policy_list = NULL;\n    cfg->policy_count = 0;\n\n    free_filesets(cfg);\n    free(cfg);\n}\n\nmod_cfg_funcs_t policies_cfg_hdlr = {\n    .module_name = \"policies\",\n    .new = policies_cfg_new,\n    .free = policies_cfg_free,\n    .set_default = set_default_policies,\n    .read = read_policies,\n    .set_config = set_policies,\n    .write_default = write_policy_default,\n    .write_template = write_policy_template\n};\n"
  },
  {
    "path": "src/policies/policy_matching.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * functions for applying policies to entries\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_rules.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_cfg.h\"\n#include \"uidgidcache.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_boolexpr.h\"\n#include \"status_manager.h\"\n\n#include <string.h>\n#include <libgen.h>\n#include <fnmatch.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/xattr.h>\n\n#define POLICY_TAG \"Policy\"\n\n/* macros for displaying entries */\n#ifdef _HAVE_FID\n#define F_ENT_ID DFID\n#define P_ENT_ID(_id, _attr)    PFID(_id)\n#else\n#define F_ENT_ID    \"'%s'\"\n#define P_ENT_ID(_id, _attr)    ATTR(_attr, fullpath)\n#endif\n\n//#define _DEBUG_POLICIES 1\n\nstatic char *ExtractParentDir(const char *file_path, char *out_buff)\n{\n\n    char buff[RBH_PATH_MAX];\n    char *dir;\n\n    rh_strncpy(buff, file_path, RBH_PATH_MAX);\n\n    dir = dirname(buff);\n\n    strcpy(out_buff, dir);\n\n    return out_buff;\n}\n\n/* flags for calling TestRegexp and TestPathRegexp */\nenum regexp_flags {\n    REGEXP_IS_CHILD = (1 << 0),\n    REGEXP_ANY_LEVEL = (1 << 1),\n    REGEXP_INSENSITIVE = (1 << 2),\n};\n\n/** return true if 'to_be_tested' matches the given regexp */\nstatic bool TestRegexp(const char *regexp, const char *to_be_tested,\n                       enum regexp_flags flags)\n{\n    int match_flag = 0;\n\n    if (flags & REGEXP_INSENSITIVE)\n        match_flag = FNM_CASEFOLD;\n\n    return !fnmatch(regexp, to_be_tested, match_flag);\n}\n\nstatic bool TestPathRegexp(const char *regexp, const char *to_be_tested,\n                           enum regexp_flags flags)\n{\n    char full_path[RBH_PATH_MAX];\n    const char *full_regexp = regexp;\n    bool any_level = (flags & REGEXP_ANY_LEVEL);\n    int match_flag = 0, rc;\n\n    if (flags & REGEXP_IS_CHILD)\n        match_flag |= FNM_LEADING_DIR;\n\n    if (flags & REGEXP_INSENSITIVE)\n        match_flag |= FNM_CASEFOLD;\n\n    /* is the regexp relative ?\n     * (don't add the root path if expression starts with '**').\n     */\n    if (!IS_ABSOLUTE_PATH(regexp) && !(any_level && (regexp[0] == '*'))) {\n        /* add root path to the path to be tested */\n        rc = snprintf(full_path, RBH_PATH_MAX, \"%s/%s\",\n                      global_config.fs_path, regexp);\n        if (rc >= RBH_PATH_MAX) {\n            DisplayLog(LVL_VERB, POLICY_TAG,\n                       \"Path name too long: %s/%s. Try matching anyway.\",\n                       global_config.fs_path, regexp);\n        }\n        full_regexp = full_path;\n    }\n\n    if (!any_level)\n        match_flag |= FNM_PATHNAME;\n\n    if (!fnmatch(full_regexp, to_be_tested, match_flag)) {\n#ifdef _DEBUG_POLICIES\n        printf(\"MATCH regexp='%s', path='%s', flags=%#x\\n\", full_regexp,\n               to_be_tested, match_flag);\n#endif\n        return true;\n    }\n#ifdef _DEBUG_POLICIES\n    printf(\"NO MATCH regexp='%s', path='%s', tree=%#x\\n\", full_regexp,\n           to_be_tested, match_flag);\n#endif\n\n    return false;\n}   /* TestRegexp */\n\nstatic inline int size_compare(unsigned long long size1,\n                               compare_direction_t comp,\n                               unsigned long long size2)\n{\n    switch (comp) {\n    case COMP_GRTHAN:\n        return (size1 > size2);\n    case COMP_GRTHAN_EQ:\n        return (size1 >= size2);\n    case COMP_LSTHAN:\n        return (size1 < size2);\n    case COMP_LSTHAN_EQ:\n        return (size1 <= size2);\n    case COMP_EQUAL:\n        return (size1 == size2);\n    case COMP_DIFF:\n        return (size1 != size2);\n    default:\n        DisplayLog(LVL_CRIT, POLICY_TAG, \"Invalid comparator for size (%d)\",\n                   comp);\n        return 0;\n    }\n}\n\nstatic inline int int_compare(int int1, compare_direction_t comp, int int2)\n{\n    switch (comp) {\n    case COMP_GRTHAN:\n        return (int1 > int2);\n    case COMP_GRTHAN_EQ:\n        return (int1 >= int2);\n    case COMP_LSTHAN:\n        return (int1 < int2);\n    case COMP_LSTHAN_EQ:\n        return (int1 <= int2);\n    case COMP_EQUAL:\n        return (int1 == int2);\n    case COMP_DIFF:\n        return (int1 != int2);\n    default:\n        DisplayLog(LVL_CRIT, POLICY_TAG, \"Invalid comparator for int (%d)\",\n                   comp);\n        return 0;\n    }\n}\n\nstatic inline policy_match_t bool2policy_match(int rc)\n{\n    return rc ? POLICY_MATCH : POLICY_NO_MATCH;\n}\n\nstatic inline int negate_match(int rc)\n{\n    if (rc == POLICY_MATCH)\n        return POLICY_NO_MATCH;\n    else if (rc == POLICY_NO_MATCH)\n        return POLICY_MATCH;\n    else\n        return rc;\n}\n\n#ifdef _DEBUG_POLICIES\n#define CHECK_ATTR(_pset_, _attr_, _no_trace) do {            \\\n                    if (!ATTR_MASK_TEST(_pset_, _attr_)) {     \\\n                        DisplayLog(LVL_MAJOR, POLICY_TAG,  \\\n                            \"Missing attribute '%s' for evaluating \"   \\\n                            \"boolean expression on \"       \\\n                             DFID, (#_attr_), PFID(p_entry_id)); \\\n                        return POLICY_MISSING_ATTR;            \\\n                    }                                          \\\n             } while (0)\n#else\n#define CHECK_ATTR(_pset_, _attr_, _no_trace) do {            \\\n                    if (!ATTR_MASK_TEST(_pset_, _attr_)) {     \\\n                        if (!(_no_trace))                      \\\n                            DisplayLog(LVL_MAJOR, POLICY_TAG,  \\\n                                \"Missing attribute '%s' for evaluating \"   \\\n                                \"boolean expression on \"       \\\n                                 DFID, (#_attr_), PFID(p_entry_id)); \\\n                        return POLICY_MISSING_ATTR;            \\\n                    }                                          \\\n             } while (0)\n#endif\n\n\nstatic filter_comparator_t Policy2FilterComparator(compare_direction_t comp,\n                                                   enum compare_flags flags)\n{\n    switch (comp) {\n    case COMP_GRTHAN:\n        return MORETHAN_STRICT;\n    case COMP_GRTHAN_EQ:\n        return MORETHAN;\n    case COMP_LSTHAN:\n        return LESSTHAN_STRICT;\n    case COMP_LSTHAN_EQ:\n        return LESSTHAN;\n    case COMP_EQUAL:\n        return EQUAL;\n    case COMP_DIFF:\n        return NOTEQUAL;\n    case COMP_LIKE:\n        return (flags & CMP_FLG_INSENSITIVE) ? ILIKE : LIKE;\n    case COMP_UNLIKE:\n        return (flags & CMP_FLG_INSENSITIVE) ? IUNLIKE : UNLIKE;\n\n    default:\n        /* Error */\n        DisplayLog(LVL_CRIT, POLICY_TAG, \"ERROR: unknown comparator %d\", comp);\n        return (filter_comparator_t)-1;\n    }\n}\n\n/* return the opposite direction */\nstatic compare_direction_t oppose_compare(compare_direction_t comp)\n{\n    switch (comp) {\n    case COMP_GRTHAN:\n        return COMP_LSTHAN;\n    case COMP_GRTHAN_EQ:\n        return COMP_LSTHAN_EQ;\n    case COMP_LSTHAN:\n        return COMP_GRTHAN;\n    case COMP_LSTHAN_EQ:\n        return COMP_GRTHAN_EQ;\n\n        /* reflexive operation */\n    case COMP_EQUAL:\n    case COMP_DIFF:\n    case COMP_LIKE:\n    case COMP_UNLIKE:\n        return comp;\n\n    default:\n        /* Error */\n        DisplayLog(LVL_CRIT, POLICY_TAG, \"ERROR: unknown comparator %d\", comp);\n        return (compare_direction_t)-1;\n    }\n}\n\nstatic inline time_t time_modify(time_t orig, const time_modifier_t *p_pol_mod)\n{\n    time_t newtime;\n    if (!p_pol_mod) /* no modifier */\n        return orig;\n\n    /* if orig is already under time_min, keep it */\n    if (orig <= p_pol_mod->time_min)\n        return orig;\n\n    newtime = orig * p_pol_mod->time_factor;\n    if (newtime < p_pol_mod->time_min)\n        newtime = p_pol_mod->time_min;\n\n    DisplayLog(LVL_FULL, POLICY_TAG,\n               \"Policy modifier enabled: time condition changed: %u -> %u\",\n               (unsigned int)orig, (unsigned int)newtime);\n    return newtime;\n}\n\n/**\n * compare a value according to the attr type described in sm_info_def_t.\n * @return a POLICY_* value\n */\nstatic int compare_generic(const sm_info_def_t *def,\n                           const compare_triplet_t *p_triplet,\n                           void *val, const time_modifier_t *p_pol_mod)\n{\n    int rc;\n\n    switch (def->db_type) {\n    case DB_TEXT:\n        if (def->crit_type != PT_STRING) {\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Criteria type of '%s' is incompatible with DB type TEXT\",\n                       def->user_name);\n            return POLICY_ERR;\n        }\n        assert(p_triplet->val.str != NULL);\n\n        /* NULL value only matches empty string */\n        if (val == NULL)\n            rc = EMPTY_STRING(p_triplet->val.str);\n        else\n            /* compare crit_value->str and (char *)val */\n            rc = TestRegexp(p_triplet->val.str, (char *)val, 0);\n\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case DB_INT:\n    case DB_UINT:\n        if (def->crit_type == PT_DURATION) {\n            /* XXX \"dur_attr == 0\" has a special meaning:\n               it matches if time has not been set */\n            if (val == NULL || *((int *)val) == 0) {\n                if ((p_triplet->op == COMP_EQUAL)\n                    && (p_triplet->val.duration == 0))\n                    return POLICY_MATCH;\n                else    /* dur_attr > X do not match */\n                    return POLICY_NO_MATCH;\n\n            }\n            /* at this point val is set and != 0 */\n            if ((p_triplet->val.duration == 0)\n                && (p_triplet->op == COMP_EQUAL\n                    || p_triplet->op == COMP_DIFF)) {\n                /* criterion 'duration == 0' means attr never set (which is false)\n                 * criterion 'duration != 0' means attr is set (which is true)\n                 */\n                if (p_triplet->op == COMP_EQUAL)\n                    return POLICY_NO_MATCH;\n                else\n                    return POLICY_MATCH;\n            }\n\n            /* compare with time enlapsed since date.\n             * take time modifiers into account */\n            rc = int_compare(time(NULL) - *((int *)val), p_triplet->op,\n                             time_modify(p_triplet->val.duration, p_pol_mod));\n        } else if (def->crit_type == PT_INT) {\n            if (val == NULL)\n                return POLICY_MISSING_ATTR;\n\n            rc = int_compare(*((int *)val), p_triplet->op,\n                             p_triplet->val.integer);\n        } else {\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Criteria type of '%s' is incompatible with DB type INT/UINT\",\n                       def->user_name);\n            return POLICY_ERR;\n        }\n        return bool2policy_match(rc);\n\n    case DB_BIGINT:\n    case DB_BIGUINT:\n\n        if (val == NULL)\n            return POLICY_MISSING_ATTR;\n\n        if (def->crit_type != PT_INT64) {\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Criteria type of '%s' is incompatible with DB type BIGINT/BIGUINT\",\n                       def->user_name);\n            return POLICY_ERR;\n        }\n\n        rc = size_compare(*((ull_t *) val), p_triplet->op, p_triplet->val.size);\n        return bool2policy_match(rc);\n\n    case DB_ENUM_FTYPE:\n        {\n            const char *typedb;\n\n            if (val == NULL)\n                return POLICY_MISSING_ATTR;\n\n            if (def->crit_type != PT_TYPE) {\n                DisplayLog(LVL_MAJOR, POLICY_TAG,\n                           \"Criteria type of '%s' is incompatible with DB type ENUM_FTYPE\",\n                           def->user_name);\n                return POLICY_ERR;\n            }\n            typedb = type2db(p_triplet->val.type);\n            if (typedb == NULL)\n                return POLICY_ERR;\n            else\n                rc = !strcmp((char *)val, typedb);\n\n            if (p_triplet->op == COMP_EQUAL)\n                return bool2policy_match(rc);\n            else\n                return bool2policy_match(!rc);\n        }\n\n    case DB_BOOL:\n        if (val == NULL)\n            return POLICY_MISSING_ATTR;\n\n        if (def->crit_type != PT_BOOL) {\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Criteria type of '%s' is incompatible with DB type BOOL\",\n                       def->user_name);\n            return POLICY_ERR;\n        }\n\n        /* Boolean are stored in 'integer' field by criteria2condition(). */\n        /* Compare a bool with an integer (0 or <>0) */\n        rc = ((*((bool *) val)) == !!p_triplet->val.integer);\n\n        if (p_triplet->op == COMP_EQUAL)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case DB_SHORT:\n    case DB_USHORT:\n        DisplayLog(LVL_MAJOR, POLICY_TAG, \"Criteria type non supported: SHORT\");\n        return POLICY_ERR;\n\n    case DB_ID:\n        DisplayLog(LVL_MAJOR, POLICY_TAG, \"Criteria type non supported: ID\");\n        return POLICY_ERR;\n\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        DisplayLog(LVL_MAJOR, POLICY_TAG,\n                   \"Criteria type non supported: STRIPE_INFO/STRIPE_ITEMS\");\n        return POLICY_ERR;\n\n    default:\n        DisplayLog(LVL_MAJOR, POLICY_TAG, \"Unexpected criteria type in %s()\",\n                   __func__);\n        return POLICY_ERR;\n    }\n\n    /** XXX how are stored: PT_FLOAT? */\n}\n\n/** Convert a comparator value to a DB filter value,\n * according to the given sm_info definition.\n * @param oppose_cmp\n */\nstatic int set_filter_value_generic(const sm_info_def_t *def,\n                                    compare_direction_t op,\n                                    const compare_value_t *comp_val,\n                                    db_type_u *f_val, bool *oppose_cmp)\n{\n\n    switch (def->db_type) {\n    case DB_TEXT:\n        f_val->val_str = comp_val->str;\n        break;\n    case DB_INT:   /* integer or date */\n        if (def->crit_type == PT_DURATION) {\n            /* XXX \"dur_attr == 0\" has a special meaning:\n               it matches if time has not been set */\n            if ((op == COMP_EQUAL) && (comp_val->duration == 0)) {\n                f_val->val_int = 0;\n            } else {\n                f_val->val_int = time(NULL) - comp_val->duration;\n\n                /* enlapsed > X <=>  date < now - X */\n                *oppose_cmp = true;\n            }\n        } else {\n            f_val->val_int = comp_val->integer;\n        }\n        break;\n    case DB_UINT:\n        if (def->crit_type == PT_DURATION) {\n            /* XXX \"dur_attr == 0\" has a special meaning:\n               it matches if time has not been set */\n            if ((op == COMP_EQUAL) && (comp_val->duration == 0)) {\n                f_val->val_uint = 0;\n            } else {\n                f_val->val_uint = time(NULL) - comp_val->duration;\n                /* enlapsed > X <=>  date < now - X */\n                *oppose_cmp = true;\n            }\n        } else {\n            f_val->val_uint = comp_val->integer;\n        }\n        break;\n    case DB_BIGINT:\n        f_val->val_bigint = comp_val->size;\n        break;\n    case DB_BIGUINT:\n        f_val->val_biguint = comp_val->size;\n        break;\n    case DB_ENUM_FTYPE:\n        f_val->val_str = type2db(comp_val->type);\n        break;\n    case DB_BOOL:\n        f_val->val_bool = comp_val->integer;\n        break;\n    case DB_SHORT:\n        f_val->val_short = comp_val->integer;\n        break;\n    case DB_USHORT:\n        f_val->val_ushort = comp_val->integer;\n        break;\n    case DB_ID:\n        DisplayLog(LVL_MAJOR, POLICY_TAG, \"Criteria type non supported: ID\");\n        return -1;\n\n    case DB_STRIPE_INFO:\n    case DB_STRIPE_ITEMS:\n        DisplayLog(LVL_MAJOR, POLICY_TAG,\n                   \"Criteria type non supported: STRIPE_INFO/STRIPE_ITEMS\");\n        return -1;\n\n    default:\n        DisplayLog(LVL_MAJOR, POLICY_TAG, \"Unexpected criteria type in %s()\",\n                   __func__);\n        return -1;\n    }\n    return 0;\n}\n\n/**\n * Convert criteria to ListMgr data\n * \\param[in]  p_comp          The condition to be converted\n * \\param[out] p_attr_index    Related attribute index\n * \\param[out] p_compar        Listmgr comparator\n * \\param[out] db_type_u       Value\n * \\param[out] p_must_release  Set to TRUE if the db_type_u.val_str string\n *                             must be released.\n * \\retval -1 if this is not a criteria stored in DB.\n */\n/** @TODO factorize criteria2filter */\nint criteria2filter(const compare_triplet_t *p_comp,\n                    unsigned int *p_attr_index, filter_comparator_t *p_compar,\n                    filter_value_t *p_value, bool *p_must_release,\n                    const sm_instance_t *smi, const time_modifier_t *time_mod)\n{\n    int len;\n    char *new_str;\n    bool add_root = false;\n\n    *p_must_release = false;\n\n    /*@FIXME this function, could make more generic processing using the\n     * definitions of criteria_descr_t in rbh_boolexpr.c */\n    switch (p_comp->crit) {\n    case CRITERIA_TREE:\n        /* is the path relative ? */\n        if (!IS_ABSOLUTE_PATH(p_comp->val.str))\n            add_root = true;\n\n        /* fullpath like 'tree/% ' */\n\n        *p_attr_index = ATTR_INDEX_fullpath;\n\n        if ((p_comp->op == COMP_LIKE) || (p_comp->op == COMP_EQUAL))\n            *p_compar = (p_comp->flags & CMP_FLG_INSENSITIVE) ? ILIKE : LIKE;\n        else if ((p_comp->op == COMP_UNLIKE) || (p_comp->op == COMP_DIFF))\n            *p_compar =\n                (p_comp->flags & CMP_FLG_INSENSITIVE) ? IUNLIKE : UNLIKE;\n\n        len = strlen(p_comp->val.str);\n\n        if (add_root)\n            len += strlen(global_config.fs_path) + 1;   /* root path + '/' */\n\n        new_str = MemAlloc(len + 3);    /* 3 => '/' '%' '\\0' */\n        *p_must_release = true;\n\n        if (add_root)\n            sprintf(new_str, \"%s/%s\", global_config.fs_path, p_comp->val.str);\n        else\n            strcpy(new_str, p_comp->val.str);\n\n        /* XXX this won't match the root entry */\n\n        /* is a / needed ? */\n        if (!FINAL_SLASH(new_str)) {\n            new_str[len] = '/';\n            len++;\n        }\n        /* add db 'wildcard' */\n        new_str[len] = '*';\n        len++;\n        new_str[len] = '\\0';\n\n        p_value->value.val_str = new_str;\n        break;\n\n    case CRITERIA_PATH:    /* fullpath 'path' */\n        *p_attr_index = ATTR_INDEX_fullpath;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n\n        if (!IS_ABSOLUTE_PATH(p_comp->val.str)) {\n            /* add root path */\n            len = strlen(p_comp->val.str) + strlen(global_config.fs_path) + 1;\n            new_str = MemAlloc(len + 1);    /* +1 for \\0 */\n            *p_must_release = true;\n            sprintf(new_str, \"%s/%s\", global_config.fs_path, p_comp->val.str);\n            p_value->value.val_str = new_str;\n        } else\n            p_value->value.val_str = p_comp->val.str;\n\n        break;\n\n    case CRITERIA_NAME:    /* name like 'filename' */\n    case CRITERIA_INAME:   /* name ilike 'filename' */\n        *p_attr_index = ATTR_INDEX_name;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_str = p_comp->val.str;\n        break;\n\n    case CRITERIA_TYPE:    /* type = 'type' */\n        *p_attr_index = ATTR_INDEX_type;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_str = type2db(p_comp->val.type);\n        break;\n\n    case CRITERIA_OWNER:   /* owner like 'owner' */\n        *p_attr_index = ATTR_INDEX_uid;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        if (global_config.uid_gid_as_numbers)\n            p_value->value.val_int = p_comp->val.integer;\n        else\n            p_value->value.val_str = p_comp->val.str;\n        break;\n\n    case CRITERIA_GROUP:\n        *p_attr_index = ATTR_INDEX_gid;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        if (global_config.uid_gid_as_numbers)\n            p_value->value.val_int = p_comp->val.integer;\n        else\n            p_value->value.val_str = p_comp->val.str;\n        break;\n\n#ifdef _LUSTRE\n    case CRITERIA_PROJID:\n        *p_attr_index = ATTR_INDEX_projid;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_int = p_comp->val.integer;\n        break;\n#endif\n\n    case CRITERIA_SIZE:\n        *p_attr_index = ATTR_INDEX_size;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_biguint = p_comp->val.size;\n        break;\n\n    case CRITERIA_DEPTH:\n        *p_attr_index = ATTR_INDEX_depth;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_uint = p_comp->val.integer;\n        break;\n\n    case CRITERIA_DIRCOUNT:\n        *p_attr_index = ATTR_INDEX_dircount;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_uint = p_comp->val.integer;\n        break;\n\n    case CRITERIA_NLINK:\n        *p_attr_index = ATTR_INDEX_nlink;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_uint = p_comp->val.integer;\n        break;\n\n    case CRITERIA_LAST_ACCESS:\n        *p_attr_index = ATTR_INDEX_last_access;\n\n        /*   last_access > 2h <=> access_time < time(NULL) - 2h */\n\n        *p_compar =\n            Policy2FilterComparator(oppose_compare(p_comp->op), p_comp->flags);\n        p_value->value.val_uint =\n            time(NULL) - time_modify(p_comp->val.duration, time_mod);\n        break;\n\n    case CRITERIA_LAST_MOD:\n        *p_attr_index = ATTR_INDEX_last_mod;\n        *p_compar =\n            Policy2FilterComparator(oppose_compare(p_comp->op), p_comp->flags);\n        p_value->value.val_uint =\n            time(NULL) - time_modify(p_comp->val.duration, time_mod);\n        break;\n\n    case CRITERIA_CREATION:\n        *p_attr_index = ATTR_INDEX_creation_time;\n        *p_compar =\n            Policy2FilterComparator(oppose_compare(p_comp->op), p_comp->flags);\n        p_value->value.val_uint =\n            time(NULL) - time_modify(p_comp->val.duration, time_mod);\n        break;\n\n    case CRITERIA_LAST_MDCHANGE:\n        *p_attr_index = ATTR_INDEX_last_mdchange;\n        *p_compar =\n            Policy2FilterComparator(oppose_compare(p_comp->op), p_comp->flags);\n        p_value->value.val_uint =\n            time(NULL) - time_modify(p_comp->val.duration, time_mod);\n        break;\n\n    case CRITERIA_RMTIME:\n        if (smi == NULL || !(smi->sm->flags & SM_DELETED)) {\n            DisplayLog(LVL_CRIT, POLICY_TAG,\n                       \"rm_time condition out of a 'remove' policy\");\n            return -1;\n        }\n\n        /* XXX should only be used in a policy about 'removed' entries */\n        *p_attr_index = ATTR_INDEX_rm_time;\n        *p_compar =\n            Policy2FilterComparator(oppose_compare(p_comp->op), p_comp->flags);\n        p_value->value.val_uint =\n            time(NULL) - time_modify(p_comp->val.duration, time_mod);\n        break;\n\n    case CRITERIA_FILECLASS:\n        *p_attr_index = ATTR_INDEX_fileclass;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_str = p_comp->val.str;\n        break;\n\n#ifdef _LUSTRE\n    case CRITERIA_POOL:\n        *p_attr_index = ATTR_INDEX_stripe_info;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_str = p_comp->val.str;\n        break;\n\n    case CRITERIA_OST:\n        *p_attr_index = ATTR_INDEX_stripe_items;\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_uint = p_comp->val.integer;\n        break;\n#endif\n\n    case CRITERIA_STATUS:\n        if (smi == NULL)\n            RBH_BUG(\"status filter with no status manager in the context\");\n\n        *p_attr_index = smi_status_index(smi);\n        *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n        p_value->value.val_str = p_comp->val.str;\n        break;\n\n    case CRITERIA_SM_INFO:\n        {\n            const sm_info_def_t *def = NULL;\n            int rc;\n            unsigned int idx;\n            bool oppose = false;\n\n            rc = sm_attr_get(smi, NULL, p_comp->attr_name, NULL, &def, &idx);\n            if (rc < 0) {\n                DisplayLog(LVL_CRIT, POLICY_TAG,\n                           \"couldn't find criteria '%s' in context\",\n                           p_comp->attr_name);\n                return -1;\n            }\n            DisplayLog(LVL_FULL, POLICY_TAG, \"Attribute index of '%s' = %#X|%u\",\n                       p_comp->attr_name, idx & ATTR_INDEX_FLG_MASK,\n                       idx & ~ATTR_INDEX_FLG_MASK);\n            *p_attr_index = idx;\n            rc = set_filter_value_generic(def, p_comp->op, &p_comp->val,\n                                          &p_value->value, &oppose);\n            if (rc)\n                return -1;\n\n            if (oppose)\n                *p_compar =\n                    Policy2FilterComparator(oppose_compare(p_comp->op),\n                                            p_comp->flags);\n            else\n                *p_compar = Policy2FilterComparator(p_comp->op, p_comp->flags);\n\n        }\n        break;\n\n    case CRITERIA_XATTR:\n    default:\n        *p_attr_index = ATTR_INDEX_FLG_UNSPEC;\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic enum regexp_flags cmpflg2regexpflg(enum compare_flags flags)\n{\n    enum regexp_flags out = 0;\n\n    if (flags & CMP_FLG_ANY_LEVEL)\n        out |= REGEXP_ANY_LEVEL;\n\n    if (flags & CMP_FLG_INSENSITIVE)\n        out |= REGEXP_INSENSITIVE;\n\n    return out;\n}\n\n/**\n * Search for a fileclass regexp in a list of fileclasses.\n * @param regexp      Regular expression to match fileclass names.\n * @param class_list  List of fileclass names, represented as a string\n *                    delimited by LIST_SEP_CHAR.\n */\nstatic policy_match_t match_fileclass_list(const char *regexp,\n                                           const char *class_list)\n{\n    char *list;\n    char *curr;\n    char *first;\n    char *last = NULL;\n    static const char delim[] = { LIST_SEP_CHAR, '\\0' };\n\n    list = strdup(class_list);\n    if (!list)\n        return POLICY_ERR;\n\n    first = list;\n    while ((curr = strtok_r(first, delim, &last)) != NULL) {\n        /* first arg must be NULL for next call (according to man 3 strtok) */\n        first = NULL;\n\n        if (TestRegexp(regexp, curr, 0)) {\n            free(list);\n            return POLICY_MATCH;\n        }\n    }\n\n    free(list);\n    return POLICY_NO_MATCH;\n}\n\n/** @TODO factorize eval_condition */\nstatic policy_match_t eval_condition(const entry_id_t *p_entry_id,\n                                     const attr_set_t *p_entry_attr,\n                                     const compare_triplet_t *p_triplet,\n                                     const time_modifier_t *p_pol_mod,\n                                     const sm_instance_t *smi, int no_warning)\n{\n    char tmpbuff[RBH_PATH_MAX];\n    char *rep;\n    const char *typedb;\n    int rc;\n\n    switch (p_triplet->crit) {\n    case CRITERIA_TREE:\n        /* fullpath is required */\n        CHECK_ATTR(p_entry_attr, fullpath, no_warning);\n\n        rep = ExtractParentDir(ATTR(p_entry_attr, fullpath), tmpbuff);\n        rc = TestPathRegexp(p_triplet->val.str, rep, REGEXP_IS_CHILD |\n                            cmpflg2regexpflg(p_triplet->flags));\n        if (!rc) {  /* try matching root */\n            rc = TestPathRegexp(p_triplet->val.str,\n                                ATTR(p_entry_attr, fullpath),\n                                cmpflg2regexpflg(p_triplet->flags));\n        }\n#ifdef _DEBUG_POLICIES\n        if (rc)\n            printf(\"%s (dir %s) matches tree %s\\n\",\n                   ATTR(p_entry_attr, fullpath), rep, p_triplet->val.str);\n#endif\n\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case CRITERIA_PATH:\n        /* fullpath is required */\n        CHECK_ATTR(p_entry_attr, fullpath, no_warning);\n\n        rc = TestPathRegexp(p_triplet->val.str, ATTR(p_entry_attr, fullpath),\n                            cmpflg2regexpflg(p_triplet->flags));\n\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case CRITERIA_NAME:\n    case CRITERIA_INAME:\n\n        /* filename is required */\n        CHECK_ATTR(p_entry_attr, name, no_warning);\n\n        rc = TestRegexp(p_triplet->val.str, ATTR(p_entry_attr, name),\n                        cmpflg2regexpflg(p_triplet->flags));\n\n#ifdef _DEBUG_POLICIES\n        if (rc)\n            printf(\"%s matches filename %s\\n\", ATTR(p_entry_attr, name),\n                   p_triplet->val.str);\n#endif\n\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case CRITERIA_TYPE:\n        /* type is required */\n        CHECK_ATTR(p_entry_attr, type, no_warning);\n\n        typedb = type2db(p_triplet->val.type);\n        if (typedb == NULL)\n            return POLICY_ERR;\n        else\n            rc = !strcmp(ATTR(p_entry_attr, type), typedb);\n\n#ifdef _DEBUG_POLICIES\n        printf(DFID\" %s type %s\\n\", PFID(p_entry_id),\n               rc ? \"matches\" : \"doesn't match\", typedb);\n#endif\n        if (p_triplet->op == COMP_EQUAL)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case CRITERIA_OWNER:\n        /* owner is required */\n        CHECK_ATTR(p_entry_attr, uid, no_warning);\n\n        if (global_config.uid_gid_as_numbers) {\n            rc = int_compare(ATTR(p_entry_attr, uid).num, p_triplet->op,\n                             p_triplet->val.integer);\n            return bool2policy_match(rc);\n        } else {\n            rc = TestRegexp(p_triplet->val.str, ATTR(p_entry_attr, uid).txt,\n                            cmpflg2regexpflg(p_triplet->flags));\n            if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n                return bool2policy_match(rc);\n            else\n                return bool2policy_match(!rc);\n        }\n\n    case CRITERIA_GROUP:\n        /* group is required */\n        CHECK_ATTR(p_entry_attr, gid, no_warning);\n\n        if (global_config.uid_gid_as_numbers) {\n            rc = int_compare(ATTR(p_entry_attr, gid).num, p_triplet->op,\n                             p_triplet->val.integer);\n            return bool2policy_match(rc);\n        } else {\n            rc = TestRegexp(p_triplet->val.str, ATTR(p_entry_attr, gid).txt,\n                            cmpflg2regexpflg(p_triplet->flags));\n            if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n                return bool2policy_match(rc);\n            else\n                return bool2policy_match(!rc);\n        }\n\n#ifdef _LUSTRE\n    case CRITERIA_PROJID:\n        /* projid is required */\n        CHECK_ATTR(p_entry_attr, projid, no_warning);\n\n        rc = int_compare(ATTR(p_entry_attr, projid), p_triplet->op,\n                         p_triplet->val.integer);\n        return bool2policy_match(rc);\n#endif\n\n    case CRITERIA_SIZE:\n        /* size is required */\n        CHECK_ATTR(p_entry_attr, size, no_warning);\n\n        rc = size_compare(ATTR(p_entry_attr, size), p_triplet->op,\n                          p_triplet->val.size);\n        return bool2policy_match(rc);\n\n    case CRITERIA_DEPTH:\n        /* depth is required */\n        CHECK_ATTR(p_entry_attr, depth, no_warning);\n\n        rc = int_compare(ATTR(p_entry_attr, depth), p_triplet->op,\n                         p_triplet->val.integer);\n        return bool2policy_match(rc);\n\n    case CRITERIA_DIRCOUNT:\n        /* if the entry is not a dir, never match the dircount condition */\n        if (ATTR_MASK_TEST(p_entry_attr, type) &&\n            strcmp(ATTR(p_entry_attr, type), STR_TYPE_DIR) != 0)\n            return POLICY_NO_MATCH;\n\n        CHECK_ATTR(p_entry_attr, dircount, no_warning);\n\n        rc = int_compare(ATTR(p_entry_attr, dircount), p_triplet->op,\n                         p_triplet->val.integer);\n        return bool2policy_match(rc);\n\n    case CRITERIA_NLINK:\n        /* nlink is required */\n        CHECK_ATTR(p_entry_attr, nlink, no_warning);\n\n        rc = int_compare(ATTR(p_entry_attr, nlink), p_triplet->op,\n                         p_triplet->val.integer);\n        return bool2policy_match(rc);\n\n    case CRITERIA_LAST_ACCESS:\n        /* last_access is required */\n        CHECK_ATTR(p_entry_attr, last_access, no_warning);\n\n        rc = int_compare(time(NULL) - ATTR(p_entry_attr, last_access),\n                         p_triplet->op, time_modify(p_triplet->val.duration,\n                                                    p_pol_mod));\n        return bool2policy_match(rc);\n\n    case CRITERIA_LAST_MOD:\n        /* last_mod required */\n        CHECK_ATTR(p_entry_attr, last_mod, no_warning);\n\n        rc = int_compare(time(NULL) - ATTR(p_entry_attr, last_mod),\n                         p_triplet->op, time_modify(p_triplet->val.duration,\n                                                    p_pol_mod));\n        return bool2policy_match(rc);\n\n    case CRITERIA_CREATION:\n        /* creation_time is required */\n        CHECK_ATTR(p_entry_attr, creation_time, no_warning);\n\n        rc = int_compare(time(NULL) - ATTR(p_entry_attr, creation_time),\n                         p_triplet->op, time_modify(p_triplet->val.duration,\n                                                    p_pol_mod));\n        return bool2policy_match(rc);\n\n        break;\n\n    case CRITERIA_LAST_MDCHANGE:\n        /* last_mdchange (ctime) is required */\n        CHECK_ATTR(p_entry_attr, last_mdchange, no_warning);\n\n        rc = int_compare(time(NULL) - ATTR(p_entry_attr, last_mdchange),\n                         p_triplet->op, time_modify(p_triplet->val.duration,\n                                                    p_pol_mod));\n        return bool2policy_match(rc);\n\n        break;\n\n    case CRITERIA_RMTIME:\n        if (smi == NULL || !(smi->sm->flags & SM_DELETED)) {\n            DisplayLog(LVL_CRIT, POLICY_TAG,\n                       \"rm_time condition out of a 'remove' policy\");\n            return POLICY_ERR;\n        }\n\n        /* rm_time is required */\n        CHECK_ATTR(p_entry_attr, rm_time, no_warning);\n\n        rc = int_compare(time(NULL) - ATTR(p_entry_attr, rm_time),\n                         p_triplet->op, time_modify(p_triplet->val.duration,\n                                                    p_pol_mod));\n        return bool2policy_match(rc);\n\n        break;\n\n    case CRITERIA_FILECLASS:\n        /* compare with empty fileclass list? */\n        if (!ATTR_MASK_TEST(p_entry_attr, fileclass))\n            rc = match_fileclass_list(p_triplet->val.str, \"\");\n        else\n            /* match fileclass pattern against fileclass attr */\n            rc = match_fileclass_list(p_triplet->val.str,\n                                      ATTR(p_entry_attr, fileclass));\n\n        if (rc == POLICY_ERR)\n            return rc;\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return rc;\n        else\n            return negate_match(rc);\n\n#ifdef _LUSTRE\n    case CRITERIA_POOL:\n        /* /!\\ objects != file or dir don't have stripe info (never match) */\n        if (ATTR_MASK_TEST(p_entry_attr, type) &&\n            strcmp(ATTR(p_entry_attr, type), STR_TYPE_DIR) &&\n            strcmp(ATTR(p_entry_attr, type), STR_TYPE_FILE))\n            return POLICY_NO_MATCH;\n\n        /* pool name is required */\n        CHECK_ATTR(p_entry_attr, stripe_info, no_warning);\n\n        rc = TestRegexp(p_triplet->val.str,\n                        ATTR(p_entry_attr, stripe_info).pool_name,\n                        cmpflg2regexpflg(p_triplet->flags));\n\n#ifdef _DEBUG_POLICIES\n        if (rc)\n            printf(\"'%s' pool matches '%s'\\n\",\n                   ATTR(p_entry_attr, stripe_info).pool_name,\n                   p_triplet->val.str);\n#endif\n\n        if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n            return bool2policy_match(rc);\n        else\n            return bool2policy_match(!rc);\n\n    case CRITERIA_OST:\n        {\n            int i;\n\n            /* /!\\ objects != file don't have stripe items (never match) */\n            if (ATTR_MASK_TEST(p_entry_attr, type) &&\n                strcmp(ATTR(p_entry_attr, type), STR_TYPE_FILE))\n                return POLICY_NO_MATCH;\n\n            /* stripe items are needed */\n            CHECK_ATTR(p_entry_attr, stripe_items, no_warning);\n\n            for (i = 0; i < ATTR(p_entry_attr, stripe_items).count; i++) {\n                if (ATTR(p_entry_attr, stripe_items).stripe[i].ost_idx ==\n                    p_triplet->val.integer) {\n                    /* if comparator is ==, at least 1 OST must match,\n                     * if the cmp is !=, none must match */\n                    if (p_triplet->op == COMP_DIFF)\n                        return POLICY_NO_MATCH;\n                    else if (p_triplet->op == COMP_EQUAL)\n                        return POLICY_MATCH;\n                }\n            }\n            /* no matching OST:\n             * - if the operator is !=, the entry matches\n             * - else, the entry doesn't match */\n            if (p_triplet->op == COMP_DIFF)\n                return POLICY_MATCH;\n            else if (p_triplet->op == COMP_EQUAL)\n                return POLICY_NO_MATCH;\n            break;\n        }\n#endif\n\n    case CRITERIA_STATUS:\n        {\n            if (smi == NULL)\n                RBH_BUG\n                    (\"status criteria with no status manager in the context\");\n\n            if (!ATTR_MASK_STATUS_TEST(p_entry_attr, smi->smi_index)) {\n                /* compare with empty string */\n                rc = EMPTY_STRING(p_triplet->val.str);\n#ifdef _DEBUG_POLICIES\n                printf(DFID\" status is not set\\n\", PFID(p_entry_id));\n#endif\n            } else\n                rc = !strcmp(p_triplet->val.str,\n                             STATUS_ATTR(p_entry_attr, smi->smi_index));\n#ifdef _DEBUG_POLICIES\n        printf(DFID\" %s status %s\\n\", PFID(p_entry_id),\n               rc ? \"matches\" : \"doesn't match\", p_triplet->val.str);\n#endif\n            /* tolerant mode: if the value was missing and doesn't match:\n             * return POLICY_MISSING_ATTR */\n            if (rc == 0 && no_warning &&\n                !ATTR_MASK_STATUS_TEST(p_entry_attr, smi->smi_index))\n                return POLICY_MISSING_ATTR;\n\n            if (p_triplet->op == COMP_EQUAL)\n                return bool2policy_match(rc);\n            else\n                return bool2policy_match(!rc);\n\n        }\n        break;\n\n    case CRITERIA_SM_INFO:\n        {\n            void *val;\n            const sm_info_def_t *def;\n            unsigned int idx;\n\n            rc = sm_attr_get(smi, p_entry_attr, p_triplet->attr_name, &val,\n                             &def, &idx);\n            if (rc < 0)\n                val = NULL;\n\n            /* \"tolerant\" mode: the value is missing but the caller\n             * will consider it could match however */\n            if (val == NULL && no_warning)\n                return POLICY_MISSING_ATTR;\n\n            rc = compare_generic(def, p_triplet, val, p_pol_mod);\n\n            /* if the retrieved value was NULL and the return is\n             * 'missing attr' */\n            if (val == NULL && rc == POLICY_MISSING_ATTR) {\n                DisplayLog(no_warning ? LVL_DEBUG : LVL_EVENT, POLICY_TAG,\n                           \"Missing attribute '%s' for evaluating boolean expression on \"\n                           DFID, p_triplet->attr_name, PFID(p_entry_id));\n                return POLICY_MISSING_ATTR;\n            }\n            DisplayLog(LVL_FULL, POLICY_TAG, \"Matching '%s': rc=%d\",\n                       p_triplet->attr_name, rc);\n            return rc;\n        }\n\n    case CRITERIA_XATTR:\n        {\n            const char *entry_path;\n            char value[1024];\n#if (!defined(_LUSTRE) || !defined(_HAVE_FID))\n            /* fullpath needed to get xattr, except if fids are supported */\n            CHECK_ATTR(p_entry_attr, fullpath, no_warning);\n            entry_path = ATTR(p_entry_attr, fullpath);\n#else\n            if (p_entry_id) {\n                /* use fid path */\n                rc = BuildFidPath(p_entry_id, tmpbuff);\n                if (rc)\n                    return POLICY_ERR;\n                entry_path = tmpbuff;\n            } else if (ATTR_MASK_TEST(p_entry_attr, fullpath)) {\n                /* use posix path */\n                entry_path = ATTR(p_entry_attr, fullpath);\n            } else {\n                if (!no_warning)\n                    DisplayLog(LVL_DEBUG, POLICY_TAG,\n                               \"Missing fid or fullpath to evaluate boolean expression on xattr\");\n                return POLICY_MISSING_ATTR;\n            }\n#endif\n\n            /* retrieve xattr value */\n            rc = lgetxattr(entry_path, p_triplet->attr_name, value, 1024);\n            if (rc < 0) {\n                if (errno == ENODATA || errno == ENOENT)\n                    /* empty string == no attribute */\n                    strcpy(value, \"\");\n                else if (errno == ENOTSUP) {\n                    DisplayLog(LVL_CRIT, POLICY_TAG,\n                               \"Error: condition on extended attribute \"\n                               \"whereas this feature is not supported by the filesystem, or xattr name '%s' is invalid)\",\n                               p_triplet->attr_name);\n                    return POLICY_ERR;\n                } else {\n                    DisplayLog(LVL_CRIT, POLICY_TAG,\n                               \"Error getting xattr '%s' on '%s' : %s\",\n                               p_triplet->attr_name, entry_path,\n                               strerror(errno));\n                    return POLICY_ERR;\n                }\n            } else {\n                /* security: set byte n+1 to '\\0', to avoid overflows if attr\n                 * is not a string */\n                if (rc < 1024)\n                    value[rc] = '\\0';\n            }\n\n            DisplayLog(LVL_FULL, POLICY_TAG, \"<xattr>.%s = \\\"%s\\\" (%s)\",\n                       p_triplet->attr_name, value, entry_path);\n\n            /* compare attribute value */\n\n            rc = TestRegexp(p_triplet->val.str, value,\n                            cmpflg2regexpflg(p_triplet->flags));\n\n            if (p_triplet->op == COMP_EQUAL || p_triplet->op == COMP_LIKE)\n                return bool2policy_match(rc);\n            else\n                return bool2policy_match(!rc);\n\n            break;\n        }\n\n    default:\n        DisplayLog(LVL_CRIT, POLICY_TAG,\n                   \"This criteria (%#x) is not supported in this mode\",\n                   p_triplet->crit);\n        return POLICY_ERR;\n    }\n\n    return POLICY_ERR;\n\n}\n\n/* function for testing a boolean expression on a given entry */\nstatic policy_match_t _entry_matches(const entry_id_t *p_entry_id,\n                                     const attr_set_t *p_entry_attr,\n                                     const bool_node_t *p_node,\n                                     const time_modifier_t *p_pol_mod,\n                                     const sm_instance_t *smi,\n                                     int no_warning)\n{\n    policy_match_t rc;\n\n    if (!p_entry_id || !p_entry_attr || !p_node)\n        return POLICY_ERR;\n\n    switch (p_node->node_type) {\n    case NODE_UNARY_EXPR:\n\n        /* BOOL_NOT is the only supported unary operator */\n        if (p_node->content_u.bool_expr.bool_op != BOOL_NOT)\n            return POLICY_ERR;\n\n        rc = _entry_matches(p_entry_id, p_entry_attr,\n                            p_node->content_u.bool_expr.expr1, p_pol_mod,\n                            smi, no_warning);\n\n        return negate_match(rc);\n\n    case NODE_BINARY_EXPR:\n        /* always test the first expression */\n        rc = _entry_matches(p_entry_id, p_entry_attr,\n                            p_node->content_u.bool_expr.expr1, p_pol_mod,\n                            smi, no_warning);\n\n        /* in some cases, we can stop here */\n        if ((p_node->content_u.bool_expr.bool_op == BOOL_OR)\n            && (rc == POLICY_MATCH))\n            return POLICY_MATCH;\n        else if ((p_node->content_u.bool_expr.bool_op == BOOL_AND)\n                 && (rc == POLICY_NO_MATCH))\n            return POLICY_NO_MATCH;\n        else if (rc != POLICY_MATCH && rc != POLICY_NO_MATCH)\n            return rc;\n\n        /* compute the second expression */\n        return _entry_matches(p_entry_id, p_entry_attr,\n                              p_node->content_u.bool_expr.expr2,\n                              p_pol_mod, smi, no_warning);\n\n        break;\n\n    case NODE_CONDITION:\n        /* It's now time to test the value ! */\n        return eval_condition(p_entry_id, p_entry_attr,\n                              p_node->content_u.condition, p_pol_mod, smi,\n                              no_warning);\n        break;\n\n    case NODE_CONSTANT:\n        return bool2policy_match(p_node->content_u.constant);\n    }\n\n    return POLICY_ERR;\n\n}\n\npolicy_match_t entry_matches(const entry_id_t *p_entry_id,\n                             const attr_set_t *p_entry_attr,\n                             bool_node_t *p_node,\n                             const time_modifier_t *p_pol_mod,\n                             const sm_instance_t *smi)\n{\n    return _entry_matches(p_entry_id, p_entry_attr, p_node, p_pol_mod, smi,\n                          false);\n}\n\nstatic policy_match_t _is_whitelisted(const policy_descr_t *policy,\n                                      const entry_id_t *p_entry_id,\n                                      const attr_set_t *p_entry_attr,\n                                      fileset_item_t **fileset,\n                                      bool no_warning)\n{\n    unsigned int i, count;\n    policy_match_t rc = POLICY_NO_MATCH;\n    whitelist_item_t *list;\n    fileset_item_t **fs_list;\n\n    if (fileset != NULL)\n        *fileset = NULL;\n\n    /* /!\\ ignorelist is 'ignore_fileclass'\n     *     whitelist is 'ignore'\n     */\n    list = policy->rules.whitelist_rules;\n    count = policy->rules.whitelist_count;\n\n    for (i = 0; i < count; i++) {\n        switch (_entry_matches\n                (p_entry_id, p_entry_attr, &list[i].bool_expr, NULL,\n                 policy->status_mgr, no_warning)) {\n        case POLICY_MATCH:\n            /* TODO remember the entry is ignored for this policy? */\n            return POLICY_MATCH;\n        case POLICY_MISSING_ATTR:\n            if (!no_warning) {\n                char buff[1024];\n                BoolExpr2str(&list[i].bool_expr, buff, 1024);\n                DisplayLog(LVL_MAJOR, POLICY_TAG, DFID \": attribute is missing \"\n                           \"for checking whitelist rule '%s'\", PFID(p_entry_id),\n                           buff);\n            }\n            if (rc != POLICY_ERR)\n                rc = POLICY_MISSING_ATTR;\n            break;\n        case POLICY_ERR:\n            {\n                char buff[1024];\n                BoolExpr2str(&list[i].bool_expr, buff, 1024);\n                DisplayLog(LVL_CRIT, POLICY_TAG,\n                           DFID \": an error occurred while \"\n                           \"checking this whitelist rule: %s\", PFID(p_entry_id),\n                           buff);\n                rc = POLICY_ERR;\n                break;\n            }\n        case POLICY_NO_MATCH:\n            /* continue testing other whitelist rules */\n            break;\n        }\n    }\n\n    count = policy->rules.ignore_count;\n    fs_list = policy->rules.ignore_list;\n\n    for (i = 0; i < count; i++) {\n#ifdef _DEBUG_POLICIES\n        printf(\"Checking if entry matches whitelisted fileset %s...\\n\",\n               fs_list[i]->fileset_id);\n#endif\n        switch (_entry_matches\n                (p_entry_id, p_entry_attr, &fs_list[i]->definition, NULL,\n                 policy->status_mgr, no_warning)) {\n        case POLICY_MATCH:\n            {\n#ifdef _DEBUG_POLICIES\n                printf(\"   -> match\\n\");\n#endif\n                if (fileset != NULL)\n                    *fileset = fs_list[i];\n\n                /* TODO remember if the policy matches a ignore rule for this\n                 * policy? */\n                return POLICY_MATCH;\n            }\n        case POLICY_MISSING_ATTR:\n#ifdef _DEBUG_POLICIES\n            printf(\"   -> missing attr\\n\");\n#endif\n            if (!no_warning)\n                DisplayLog(LVL_MAJOR, POLICY_TAG, DFID \": attribute is missing \"\n                           \"for checking ignore_fileclass rule\",\n                           PFID(p_entry_id));\n            if (rc != POLICY_ERR)\n                rc = POLICY_MISSING_ATTR;\n            break;\n        case POLICY_ERR:\n#ifdef _DEBUG_POLICIES\n            printf(\"   -> error\\n\");\n#endif\n            DisplayLog(LVL_CRIT, POLICY_TAG, DFID \": an error occurred \"\n                       \"when checking ignore_fileclass rule\", PFID(p_entry_id));\n            rc = POLICY_ERR;\n            break;\n        case POLICY_NO_MATCH:\n#ifdef _DEBUG_POLICIES\n            printf(\"   -> no match\\n\");\n#endif\n            /* continue testing other whitelist rules */\n            break;\n        }\n    }\n\n    return rc;\n}\n\npolicy_match_t is_whitelisted(const policy_descr_t *policy,\n                              const entry_id_t *p_entry_id,\n                              const attr_set_t *p_entry_attr,\n                              fileset_item_t **fileset)\n{\n    return _is_whitelisted(policy, p_entry_id, p_entry_attr, fileset, false);\n}\n\n/** determine if a class is whitelisted for the given policy */\nbool class_is_whitelisted(const policy_descr_t *policy, const char *class_id)\n{\n    unsigned int i, count;\n    fileset_item_t **fs_list;\n\n    count = policy->rules.ignore_count;\n    fs_list = policy->rules.ignore_list;\n\n    for (i = 0; i < count; i++) {\n        if (!strcasecmp(fs_list[i]->fileset_id, class_id))\n            return true;\n    }\n    /* not found */\n    return false;\n}\n\n/* Match classes according to p_attrs_cached+p_attrs_new,\n * set the result in p_attrs_new->fileclass.\n */\nint match_classes(const entry_id_t *id, attr_set_t *p_attrs_new,\n                  const attr_set_t *p_attrs_cached)\n{\n    unsigned int i;\n    int ok = 0;\n    int left = sizeof(ATTR(p_attrs_new, fileclass));\n\n    /* initialize output fileclass */\n    char *pcur = ATTR(p_attrs_new, fileclass);\n    *pcur = '\\0';\n\n    attr_set_t attr_cp = ATTR_SET_INIT;\n\n    /* merge contents of the 2 input attr sets */\n    ListMgr_MergeAttrSets(&attr_cp, p_attrs_new, true);\n    if (p_attrs_cached != NULL)\n        ListMgr_MergeAttrSets(&attr_cp, p_attrs_cached, false);\n\n    for (i = 0; i < policies.fileset_count; i++) {\n        fileset_item_t *fset = &policies.fileset_list[i];\n\n        if (!fset->matchable) {\n            ok++;\n            continue;\n        }\n\n        switch (_entry_matches\n                (id, &attr_cp, &fset->definition, NULL, NULL, true)) {\n        case POLICY_MATCH:\n            ok++;\n            if (EMPTY_STRING(ATTR(p_attrs_new, fileclass))) {\n                strncpy(pcur, fset->fileset_id, left);\n                left -= strlen(pcur);\n                pcur += strlen(pcur);\n            } else if (left > 1) {\n                *pcur = LIST_SEP_CHAR;\n                pcur++;\n                strncpy(pcur, fset->fileset_id, left - 1);\n                left -= strlen(pcur) + 1;\n                pcur += strlen(pcur);\n            }\n            break;\n        case POLICY_MISSING_ATTR:\n            DisplayLog(LVL_EVENT, POLICY_TAG,\n                       DFID \": attribute is missing for checking fileset '%s'\",\n                       PFID(id), fset->fileset_id);\n            break;\n        case POLICY_ERR:\n            DisplayLog(LVL_CRIT, POLICY_TAG,\n                       DFID \": an error occurred when checking fileset '%s'\",\n                       PFID(id), fset->fileset_id);\n            break;\n        case POLICY_NO_MATCH:\n            ok++;\n            /* continue testing other file classes */\n            break;\n        }\n    }\n\n    /* no fileclass could be matched without an error */\n    if (policies.fileset_count != 0 && ok == 0) {\n        ATTR_MASK_UNSET(p_attrs_new, fileclass);\n    } else {\n        ATTR(p_attrs_new, class_update) = time(NULL);\n        ATTR_MASK_SET(p_attrs_new, fileclass);\n        ATTR_MASK_SET(p_attrs_new, class_update);\n    }\n\n    ListMgr_FreeAttrs(&attr_cp);\n    return 0;\n}\n\n/** get the first matching policy case for the given file */\nrule_item_t *policy_case(const policy_descr_t *policy,\n                         const entry_id_t *p_entry_id,\n                         const attr_set_t *p_entry_attr,\n                         fileset_item_t **pp_fileset)\n{\n    int count, i, j;\n    unsigned int default_index = ATTR_INDEX_FLG_UNSPEC;\n    rule_item_t *pol_list;\n\n    pol_list = policy->rules.rules;\n    count = policy->rules.rule_count;\n\n    /* for each policy (except default), check target filesets.\n     *   - if a fileset matches, return the associated policy.\n     *   - else, return defaut policy, if it is specified.\n     *   - else, write a warning.\n     */\n    for (i = 0; i < count; i++) {\n\n#ifdef _DEBUG_POLICIES\n        printf(\"Checking policy %s...\\n\", pol_list[i].rule_id);\n#endif\n\n        if (!strcasecmp(pol_list[i].rule_id, \"default\")) {\n            /* remember index of default policy */\n            default_index = i;\n            continue;\n        }\n\n        /* check filesets */\n\n        for (j = 0; j < pol_list[i].target_count; j++) {\n\n#ifdef _DEBUG_POLICIES\n            printf(\"    Checking file class %s\\n\",\n                   pol_list[i].target_list[j]->fileset_id);\n#endif\n\n            switch (entry_matches(p_entry_id, p_entry_attr,\n                                  &pol_list[i].target_list[j]->definition,\n                                  NULL, policy->status_mgr)) {\n            case POLICY_MATCH:\n                DisplayLog(LVL_FULL, POLICY_TAG,\n                           \"Entry \" F_ENT_ID\n                           \" matches target file class '%s' of policy '%s'\",\n                           P_ENT_ID(p_entry_id, p_entry_attr),\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n                if (pp_fileset)\n                    *pp_fileset = pol_list[i].target_list[j];\n                return &pol_list[i];\n\n            case POLICY_NO_MATCH:\n                break;\n\n            case POLICY_MISSING_ATTR:\n                DisplayLog(LVL_MAJOR, POLICY_TAG,\n                           \"Attributes are missing to check if entry \" F_ENT_ID\n                           \" matches file class '%s' (in policy '%s')\",\n                           P_ENT_ID(p_entry_id, p_entry_attr),\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n                break;\n\n            default:\n                DisplayLog(LVL_CRIT, POLICY_TAG,\n                           \"Error while checking if entry \" F_ENT_ID\n                           \" matches file class '%s' (in policy '%s')\",\n                           P_ENT_ID(p_entry_id, p_entry_attr),\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n            }\n        }\n    }\n\n    /* => entry matches no fileset in any policy */\n    if (pp_fileset)\n        *pp_fileset = NULL;\n\n    /* if there a default ? */\n    if (default_index != ATTR_INDEX_FLG_UNSPEC)\n        return &pol_list[default_index];\n\n    /* entry matches no policy => ignored */\n    DisplayLog(LVL_DEBUG, POLICY_TAG,\n               \"Entry \" F_ENT_ID\n               \" matches no policy case: not applying %s policy to it.\",\n               P_ENT_ID(p_entry_id, p_entry_attr), policy->name);\n\n    return NULL;\n}\n\n/** get the policy case for the given fileclass.\n *  \\param pp_fileset is set to the matching fileset\n *         or NULL for the default policy case\n */\nrule_item_t *class_policy_case(const policy_descr_t *policy,\n                               const char *class_id,\n                               fileset_item_t **pp_fileset)\n{\n    int count, i, j;\n    rule_item_t *pol_list;\n\n    count = policy->rules.rule_count;\n    pol_list = policy->rules.rules;\n\n    /* check name of target filesets for each policy.\n     * if name is 'default', return the default policy case.\n     * If policy case is not found, return NULL.\n     */\n    for (i = 0; i < count; i++) {\n        if (!strcasecmp(pol_list[i].rule_id, \"default\")) {\n            // XXX matches no fileclass\n            continue;\n#if 0\n            /* do we look for default case? */\n            if (!strcmp(class_id, CLASS_DEFAULT)) {\n                if (pp_fileset)\n                    *pp_fileset = NULL;\n                return &pol_list[i];\n            } else\n                continue;\n#endif\n        }\n\n        /* check filesets */\n\n        for (j = 0; j < pol_list[i].target_count; j++) {\n            if (!strcasecmp(class_id, pol_list[i].target_list[j]->fileset_id)) {\n                DisplayLog(LVL_FULL, POLICY_TAG,\n                           \"FileClass '%s' is a target of policy '%s'\",\n                           class_id, pol_list[i].rule_id);\n                if (pp_fileset)\n                    *pp_fileset = pol_list[i].target_list[j];\n                return &pol_list[i];\n            }\n        }\n    }\n\n    DisplayLog(LVL_MAJOR, POLICY_TAG,\n               \"Saved fileclass '%s' is no longer used in %s policy. Refresh needed.\",\n               class_id, policy->name);\n    return NULL;\n}\n\n/**\n *  Check if an entry has a chance to be matched in any policy condition.\n */\npolicy_match_t policy_match_all(const policy_descr_t *policy,\n                                const entry_id_t *p_entry_id,\n                                const attr_set_t *p_entry_attr,\n                                const time_modifier_t *time_mod,\n                                fileset_item_t **pp_fileset)\n{\n    bool could_not_match = false;\n    int count, i, j;\n    int default_index = -1;\n    rule_item_t *pol_list;\n\n    /* if it MATCHES any whitelist condition, return NO_MATCH\n     * else, it could potentially match a policy, so we must test them.\n     */\n    switch (_is_whitelisted(policy, p_entry_id, p_entry_attr, pp_fileset,\n            true)) {\n    case POLICY_MATCH:\n        return POLICY_NO_MATCH;\n    case POLICY_MISSING_ATTR:\n        could_not_match = true;\n        break;\n    case POLICY_NO_MATCH:\n        break;\n    default:\n        return POLICY_ERR;\n    }\n\n    pol_list = policy->rules.rules;\n    count = policy->rules.rule_count;\n\n    /* for each policy:\n     * - if we get NO_MATCH for all filesets of the policy,\n     *   no need to check the condition.\n     * - if we get MATCH for any fileset of the policy,\n     *   only test this condition.\n     * - if we get MISSING_ATTR  for any fileset of the policy,\n     *   test the condition.\n     *\n     * - if we get MATCH for the condition, return MATCH or MISSING_ATTR\n     *   if we got some previously\n     * - if we get NO_MATCH for the condition, this policy cannot be matched.\n     * - if we get MISSING_ATTR for the condition, return MISSING_ATTR.\n     */\n    for (i = 0; i < count; i++) {\n        bool match = false;\n        bool all_no_match = (pol_list[i].target_count > 0 ? true : false);\n        bool missing_attr = false;\n\n#ifdef _DEBUG_POLICIES\n        printf(\"Checking policy %s...\\n\", pol_list[i].rule_id);\n#endif\n\n        if (!strcasecmp(pol_list[i].rule_id, \"default\")) {\n            /* remember index of default policy */\n            default_index = i;\n            continue;\n        }\n\n        /* check filesets */\n\n        for (j = 0; j < pol_list[i].target_count; j++) {\n#ifdef _DEBUG_POLICIES\n            printf(\"    Checking file class %s\\n\",\n                   pol_list[i].target_list[j]->fileset_id);\n#endif\n\n            switch (_entry_matches(p_entry_id, p_entry_attr,\n                                   &pol_list[i].target_list[j]->definition,\n                                   time_mod, policy->status_mgr, true)) {\n            case POLICY_MATCH:\n                DisplayLog(LVL_FULL, POLICY_TAG,\n                           \"Entry matches target file class '%s' of policy '%s'\",\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n                all_no_match = false;\n                match = true;\n                break;\n\n            case POLICY_NO_MATCH:\n                break;\n\n            case POLICY_MISSING_ATTR:\n                all_no_match = false;\n                missing_attr = true;\n                DisplayLog(LVL_FULL, POLICY_TAG,\n                           \"Attributes are missing to check if entry\"\n                           \" matches file class '%s' (in policy '%s')\",\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n                break;\n\n            default:\n                DisplayLog(LVL_CRIT, POLICY_TAG, \"Error while checking if entry\"\n                           \" matches file class '%s' (in policy '%s')\",\n                           pol_list[i].target_list[j]->fileset_id,\n                           pol_list[i].rule_id);\n                return POLICY_ERR;\n            }\n\n            /* if entry matches or an attribute is missing, we can test the\n             * condition */\n            if (match || missing_attr)\n                break;\n        }\n\n#ifdef _DEBUG_POLICIES\n        printf(\"Summary for target filesets of policy %s: \"\n               \"match=%d, missing_attr=%d, all_no_match=%d\\n\",\n               pol_list[i].rule_id, match, missing_attr, all_no_match);\n#endif\n        /**\n         * - if we get MATCH for any fileset of the policy,\n         *   only test this condition.\n         * - if we get MISSING_ATTR  for any fileset of the policy,\n         *   test the condition.\n         */\n\n        /* if we got NO_MATCH for all filesets of the policy, no need to check\n         * the condition. */\n        if (all_no_match)\n            continue;\n\n        /* test the condition of the policy */\n\n        /* - if we get MATCH for the condition, return MATCH or MISSING_ATTR\n         *   if we got some previously\n         * - if we get NO_MATCH for the condition, this policy cannot be matched.\n         * - if we get MISSING_ATTR for the condition, return MISSING_ATTR.\n         */\n        switch (_entry_matches(p_entry_id, p_entry_attr,\n                               &pol_list[i].condition, time_mod,\n                               policy->status_mgr, true)) {\n        case POLICY_NO_MATCH:\n            /* the entry cannot match this item */\n            break;\n        case POLICY_MATCH:\n            /* return MATCH if we add not missing attrs previously,\n             * MISSING_ATTR else.\n             */\n            DisplayLog(LVL_DEBUG, POLICY_TAG,\n                       \"Entry matches the condition for policy '%s'.\",\n                       pol_list[i].rule_id);\n            if (missing_attr || could_not_match)\n                return POLICY_MISSING_ATTR;\n            else\n                return POLICY_MATCH;\n            break;\n        case POLICY_MISSING_ATTR:\n            return POLICY_MISSING_ATTR;\n        default:\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Error checking if entry matches the condition for policy '%s'.\",\n                       pol_list[i].rule_id);\n            return POLICY_ERR;\n        }\n\n        /*  if we get MATCH for any fileset of the policy,\n         *  only test this condition. */\n        if (match)\n            return POLICY_NO_MATCH;\n\n    }\n\n    /* at this point, we have no chance to match policies,\n     * now check default case.\n     */\n\n    if (default_index != -1) {\n        /* XXX assumes default rule is not LUA */\n\n        /* - if we get MATCH for the condition, return MATCH or MISSING_ATTR\n         *   if we got some previously\n         * - if we get NO_MATCH for the condition, no policy is matched.\n         * - if we get MISSING_ATTR for the condition, return MISSING_ATTR.\n         */\n        switch (_entry_matches(p_entry_id, p_entry_attr,\n                               &pol_list[default_index].condition,\n                               time_mod, policy->status_mgr, true)) {\n        case POLICY_NO_MATCH:\n            return POLICY_NO_MATCH;\n            break;\n        case POLICY_MATCH:\n            /* return MATCH if we add not missing attrs previously,\n             * MISSING_ATTR else.\n             */\n            DisplayLog(LVL_DEBUG, POLICY_TAG,\n                       \"Entry matches the condition for default policy\");\n            if (could_not_match)\n                return POLICY_MISSING_ATTR;\n            else\n                return POLICY_MATCH;\n            break;\n        case POLICY_MISSING_ATTR:\n            return POLICY_MISSING_ATTR;\n        default:\n            DisplayLog(LVL_MAJOR, POLICY_TAG,\n                       \"Error checking if entry matches the condition for default\");\n            return POLICY_ERR;\n        }\n\n    }\n\n    /* not matched */\n    return POLICY_NO_MATCH;\n}\n\npolicy_match_t match_scope(const policy_descr_t *pol, const entry_id_t *id,\n                           const attr_set_t *attrs, bool warn)\n{\n    return _entry_matches(id, attrs, &pol->scope, NULL, pol->status_mgr, !warn);\n}\n\n#define LOG_MATCH(_m, _id, _a, _p) do { \\\n        if (log_config.debug_level >= LVL_FULL) { \\\n            if ((_m) == POLICY_MATCH) \\\n                 DisplayLog(LVL_FULL, POLICY_TAG, \"entry \"DFID\" matches scope \"\\\n                           \"for policy %s\", PFID((_id)), (_p)->name);          \\\n            else if ((_m) == POLICY_NO_MATCH) \\\n                 DisplayLog(LVL_FULL, POLICY_TAG, \"entry \"DFID\" doesn't match \"\\\n                            \"scope for policy %s\", PFID((_id)), (_p)->name);   \\\n            else if ((_m) == POLICY_MISSING_ATTR) {\\\n                 attr_mask_t tmp = attr_mask_and_not(&(_p)->scope_mask,        \\\n                                   &(_a)->attr_mask); \\\n                DisplayLog(LVL_FULL, POLICY_TAG, \"missing attrs to determine \" \\\n                           \"if entry \"DFID\" matches scope for policy %s: \"     \\\n                           \"scope_mask=\"DMASK\", attr_mask=\"DMASK\", \"           \\\n                           \"missing=\"DMASK, PFID((_id)), (_p)->name,           \\\n                           PMASK(&(_p)->scope_mask), PMASK(&(_a)->attr_mask),  \\\n                           PMASK(&tmp)); \\\n            } else \\\n                 DisplayLog(LVL_FULL, POLICY_TAG, \"entry \"DFID\": error \"       \\\n                            \"matching scope for policy %s\", \\\n                            PFID((_id)), (_p)->name); \\\n        } \\\n    } while (0)\n\nvoid add_matching_scopes_mask(const entry_id_t *id, const attr_set_t *attrs,\n                              bool tolerant, uint32_t *status_mask)\n{\n    unsigned int i;\n    policy_match_t match;\n\n    for (i = 0; i < policies.policy_count; i++) {\n        uint32_t curr_mask;\n\n        /* no status */\n        if (policies.policy_list[i].status_mgr == NULL)\n            continue;\n\n        curr_mask = SMI_MASK(policies.policy_list[i].status_mgr->smi_index);\n\n        /* Avoid rematching if the status is already set in the mask,\n         * as it is already matched by another policy. */\n        if ((*status_mask) & curr_mask)\n            continue;\n\n        if (tolerant) {\n            match = match_scope(&policies.policy_list[i], id, attrs, false);\n            LOG_MATCH(match, id, attrs, &policies.policy_list[i]);\n\n            /* set the current attr bit if it is not sure it doesn't match */\n            if (match != POLICY_NO_MATCH)\n                *status_mask |= curr_mask;\n        } else {\n            match = match_scope(&policies.policy_list[i], id, attrs, true);\n            LOG_MATCH(match, id, attrs, &policies.policy_list[i]);\n\n            /* set the current attr bit if it is sure it matches */\n            if (match == POLICY_MATCH)\n                *status_mask |= curr_mask;\n        }\n    }\n}\n"
  },
  {
    "path": "src/policies/policy_run.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009-2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_run.h\"\n#include \"run_policies.h\"\n#include \"list_mgr.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"xplatform_print.h\"\n#include \"update_params.h\"\n#include \"status_manager.h\"\n#include \"policy_sched.h\"\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <errno.h>\n#include <pthread.h>\n\n#define CHECK_QUEUE_INTERVAL    1\n\n#define ignore_policies(_p) ((_p)->flags & RUNFLG_IGNORE_POL)\n#define dry_run(_p)         ((_p)->flags & RUNFLG_DRY_RUN)\n#define aborted(_p)         ((_p)->aborted)\n#define stopping(_p)        ((_p)->stopping)\n#define no_limit(_p)        ((_p)->flags & RUNFLG_NO_LIMIT)\n#define force_run(_p)       ((_p)->flags & RUNFLG_FORCE_RUN)\n#define tag(_p)             ((_p)->descr->name)\n\n#define TAG \"PolicyRun\"\n\ntypedef struct queue_item__ {\n    entry_id_t entry_id;\n    attr_set_t entry_attr;\n    unsigned long targeted;\n} queue_item_t;\n\n/**\n *  alloc a new worker item so it can be pushed to the worker queue.\n */\nstatic queue_item_t *entry2queue_item(entry_id_t *p_entry_id,\n                                      attr_set_t *p_attr_set,\n                                      unsigned long targeted)\n{\n    queue_item_t *new_entry;\n\n    new_entry = (queue_item_t *) MemAlloc(sizeof(queue_item_t));\n    if (!new_entry)\n        return NULL;\n\n    new_entry->entry_id = *p_entry_id;\n    new_entry->entry_attr = *p_attr_set;\n    new_entry->targeted = targeted;\n\n    return new_entry;\n}\n\n/**\n * Free a queue Item (and the resources of its entry_attr).\n */\nstatic void free_queue_item(queue_item_t *item)\n{\n    ListMgr_FreeAttrs(&item->entry_attr);\n    MemFree(item);\n}\n\ntypedef struct subst_args {\n    action_params_t *params;\n    const entry_id_t *id;\n    const attr_set_t *attrs;\n    const char **subst_array;\n    const sm_instance_t *smi;\n} subst_args_t;\n\n/** substitute placeholders in a param value */\nstatic int subst_one_param(const char *key, const char *val, void *udata)\n{\n    subst_args_t *args = (subst_args_t *) udata;\n    gchar *new_val;\n    char *descr = NULL;\n    int rc;\n\n    if (asprintf(&descr, \"parameter %s='%s'\", key, val) < 0) {\n        return -ENOMEM;\n    }\n    new_val = subst_params(val, descr, args->id, args->attrs, args->params,\n                           args->subst_array, args->smi, false, false);\n    free(descr);\n\n    if (!new_val)\n        return -EINVAL;\n\n    rc = rbh_param_set(args->params, key, new_val, true);\n\n    g_free(new_val);\n\n    return rc;\n}\n\nstatic void set_addl_params(const char *addl_params[], unsigned int size,\n                            const rule_item_t *rule,\n                            const fileset_item_t *fileset)\n{\n    int last_param_idx = 0;\n\n    if (rule != NULL) {\n        if (unlikely(size < last_param_idx + 2))\n            RBH_BUG(\"set_addl_params: array parameter too small\");\n\n        addl_params[last_param_idx] = \"rule\";\n        addl_params[last_param_idx + 1] = rule->rule_id;\n        last_param_idx += 2;\n    }\n\n    /* add params from fileclass (possibly override previous params) */\n    if (fileset != NULL) {\n        if (unlikely(size < last_param_idx + 2))\n            RBH_BUG(\"set_addl_params: array parameter too small\");\n\n        addl_params[last_param_idx] = \"fileclass\";\n        addl_params[last_param_idx + 1] = fileset->fileset_id;\n        last_param_idx += 2;\n    }\n\n    if (unlikely(size < last_param_idx + 1))\n        RBH_BUG(\"set_addl_params: array parameter too small\");\n\n    /* terminate the list of addl params */\n    addl_params[last_param_idx] = NULL;\n}\n\n/** maintain policy run information about a given entry */\ntypedef struct entry_context {\n    policy_info_t  *policy;         /**< The running policy information */\n    queue_item_t   *item;           /**< entry information (id and attrs\n                                         from DB) */\n    bool            free_item;      /**< true if item pointer must be freed */\n    rule_item_t    *rule;           /**< matched rule */\n    fileset_item_t *fileset;        /**< matched fileset */\n    attr_set_t      fresh_attrs;    /**< updated attrs */\n    attr_set_t      prev_attrs;     /**< attrs before action */\n    action_params_t params;         /**< action parameters */\n    post_action_e   after_action;   /**< what to do after action */\n    int             time_save;      /**< reference time for LRU */\n    int             curr_sched;     /**< current scheduler */\n} entry_context_t;\n\n/**\n * Build action parameters according to: (in growing priority)\n *  - policy action_params\n *  - policy rule action_params\n *  - action_params of the matched fileclass\n * @param(out)  params  the params struct to be set\n * @param(in)   policy  the policy to build params for\n * @param(in)   rule    the matched policy rule\n * @param(in)   fileset the matched fileset for the policy rule\n * @return  0 on success, a negative value on error.\n */\nstatic int build_action_params(entry_context_t *ectx)\n{\n    int rc = 0;\n    policy_info_t *pol = ectx->policy;\n    char const *addl_params[5]; /* 5 max: \"fileclass\" + its name,\n                                  \"rule\" + its name, NULL */\n    subst_args_t subst_param_args = {\n        .params = &ectx->params,\n        .id     = &ectx->item->entry_id,\n        .attrs  = &ectx->fresh_attrs,\n        .smi    = pol->descr->status_mgr\n    };\n\n    /* Merging parameters from:\n     * 1) policy\n     * 2) trigger\n     * 2) policy rule\n     * 3) fileclass\n     */\n    /* params from policy */\n    if (likely(pol->config != NULL)) {\n        rc = rbh_params_copy(&ectx->params, &pol->config->action_params);\n        if (rc)\n            goto err;\n    }\n\n    /* add params from trigger (possibly override previous params) */\n    if (pol->trigger_action_params != NULL) {\n        rc = rbh_params_copy(&ectx->params, pol->trigger_action_params);\n        if (rc)\n            goto err;\n    }\n\n    /* add params from rule (possibly override previous params) */\n    if (ectx->rule != NULL) {\n        rc = rbh_params_copy(&ectx->params, &ectx->rule->action_params);\n        if (rc)\n            goto err;\n    }\n\n    /* add params from fileclass (possibly override previous params) */\n    if (ectx->fileset != NULL) {\n        const action_params_t *fileset_params;\n\n        /* check if there are parameters for the given policy */\n        fileset_params = get_fileset_policy_params(ectx->fileset,\n                                                   pol->descr->name);\n        if (fileset_params != NULL) {\n            rc = rbh_params_copy(&ectx->params, fileset_params);\n            if (rc)\n                goto err;\n        }\n    }\n\n    set_addl_params(addl_params, sizeof(addl_params) / sizeof(char *),\n                    ectx->rule, ectx->fileset);\n    subst_param_args.subst_array = addl_params;\n\n    /* replace placeholders in action params */\n    rc = rbh_params_foreach(&ectx->params, subst_one_param, &subst_param_args);\n    if (rc)\n        goto err;\n\n    return 0;\n\n err:\n    rbh_params_free(&ectx->params);\n    return rc;\n}\n\n/** Execute a policy action. */\nstatic int policy_action(entry_context_t *ectx, match_source_t check_method)\n{\n    int rc = 0;\n    policy_info_t         *pol = ectx->policy;\n    const entry_id_t      *id  = &ectx->item->entry_id;\n    sm_instance_t         *smi = pol->descr->status_mgr;\n    const policy_action_t *actionp = NULL;\n\n    /* if attrs has not been refreshed, skip the db update by default */\n    if (check_method == MS_NONE || check_method == MS_CACHE_ONLY)\n        ectx->after_action = PA_NONE;\n    else\n        ectx->after_action = PA_UPDATE;\n\n    /* Get the action from policy rule, if defined.\n     * Else, get the default action for the policy. */\n    if (ectx->rule != NULL && ectx->rule->action.type != ACTION_UNSET)\n        actionp = &ectx->rule->action;\n    else\n        /* defaults to default_action from */\n        actionp = &pol->config->action;\n\n    /* log as DEBUG level if 'report_actions' is disabled */\n    DisplayLog(pol->config->report_actions ? LVL_EVENT : LVL_DEBUG,\n               tag(pol),\n               \"%sExecuting policy action on: \" DFID_NOBRACE \" (%s)\",\n               dry_run(pol) ? \"(dry-run) \" : \"\", PFID(id),\n               ATTR(&ectx->fresh_attrs, fullpath));\n\n    if (log_config.debug_level >= LVL_DEBUG) {\n        GString *str = g_string_new(\"\");\n\n        rc = rbh_params_serialize(&ectx->params, str, NULL, RBH_PARAM_CSV);\n        if (rc == 0)\n            DisplayLog(LVL_DEBUG, tag(pol), DFID \": action_params: %s\",\n                       PFID(id), str->str);\n        g_string_free(str, TRUE);\n    }\n\n    if (dry_run(pol))\n        return 0;\n\n    /* If the status manager has an 'executor', make it run the action.\n     * Else, run directly the action function. */\n    if (smi != NULL && smi->sm->executor != NULL) {\n        /* @TODO provide a DB callback */\n        rc = smi->sm->executor(smi, pol->descr->implements, actionp,\n                               id, &ectx->fresh_attrs, &ectx->params,\n                               &ectx->after_action, NULL, NULL);\n    } else {\n        switch (actionp->type) {\n        case ACTION_FUNCTION:\n            /* @TODO provide a DB callback */\n            DisplayLog(LVL_DEBUG, tag(pol), DFID \": action: %s\",\n                       PFID(id), actionp->action_u.func.name);\n            rc = actionp->action_u.func.call(id, &ectx->fresh_attrs,\n                                             &ectx->params, &ectx->after_action,\n                                             NULL, NULL);\n            break;\n        case ACTION_COMMAND:   /* execute custom action */\n            {\n                char *descr = NULL;\n                char **cmd;\n                char const *addl_params[5];\n\n                set_addl_params(addl_params,\n                                sizeof(addl_params) / sizeof(char *),\n                                ectx->rule, ectx->fileset);\n\n                if (asprintf(&descr, \"action command '%s'\",\n                             actionp->action_u.command[0]) < 0) {\n                    DisplayLog(LVL_CRIT, tag(pol),\n                               \"Could not allocate string for action command '%s'\",\n                               actionp->action_u.command[0]);\n                    rc = -ENOMEM;\n                    break;\n                }\n\n                /* replaces placeholders in command */\n                rc = subst_shell_params(actionp->action_u.command, descr,\n                                        id, &ectx->fresh_attrs, &ectx->params,\n                                        addl_params, smi, true, &cmd);\n                free(descr);\n                if (rc == 0) {\n                    /* call custom command */\n                    if (log_config.debug_level >= LVL_DEBUG) {\n                        char *log_cmd = concat_cmd(cmd);\n                        DisplayLog(LVL_DEBUG, tag(pol),\n                                   DFID \": action: cmd(%s)\", PFID(id), log_cmd);\n                        free(log_cmd);\n                    }\n\n                    rc = execute_shell_command(cmd, cb_stderr_to_log,\n                                               (void *)LVL_DEBUG);\n                    g_strfreev(cmd);\n                    /* @TODO handle other hardlinks to the same entry */\n                }\n\n                break;\n            }\n        case ACTION_UNSET:\n        case ACTION_NONE:\n            rc = 0;\n            break;\n        }\n\n        /* call action callback if there is no status manager executor to wrap\n         * actions */\n        if (smi != NULL && smi->sm->action_cb != NULL) {\n            int tmp_rc = smi->sm->action_cb(smi, pol->descr->implements,\n                                            rc, id, &ectx->fresh_attrs,\n                                            &ectx->after_action);\n            if (tmp_rc)\n                DisplayLog(LVL_MAJOR, tag(pol),\n                           \"Action callback failed for action '%s': rc=%d\",\n                           pol->descr->implements ?  pol->descr->implements\n                                : \"<null>\", tmp_rc);\n        }\n    }\n\n    return rc;\n}\n\n/**\n * Return the value of the current attribute for sorting the policy LRU.\n */\nstatic inline int get_sort_attr(policy_info_t *p, const attr_set_t *p_attrs)\n{\n    if (p->config->lru_sort_attr == LRU_ATTR_NONE)\n        return -1;\n\n    if (!attr_mask_test_index(&p_attrs->attr_mask, p->config->lru_sort_attr))\n        return -1;\n\n    if (is_sm_info(p->config->lru_sort_attr)) {\n        unsigned int idx = attr2sminfo_index(p->config->lru_sort_attr);\n\n        return *((unsigned int *)p_attrs->attr_values.sm_info[idx]);\n    }\n\n    switch (p->config->lru_sort_attr) {\n    case ATTR_INDEX_creation_time:\n        return ATTR(p_attrs, creation_time);\n    case ATTR_INDEX_last_mod:\n        return ATTR(p_attrs, last_mod);\n    case ATTR_INDEX_last_access:\n        return ATTR(p_attrs, last_access);\n    case ATTR_INDEX_rm_time:\n        return ATTR(p_attrs, rm_time);\n    case ATTR_INDEX_size:\n        return ATTR(p_attrs, size);\n    default:\n        return -1;\n    }\n}\n\n/** set dummy time attributes, to check 'end of list' criteria */\nstatic inline void set_max_time_attrs(policy_info_t *p, attr_set_t *p_attrs,\n                                      time_t value)\n{\n    switch (p->config->lru_sort_attr) {\n\n    case ATTR_INDEX_rm_time:\n        ATTR_MASK_SET(p_attrs, rm_time);\n        ATTR(p_attrs, rm_time) = value;\n\n        /* all times <= rm_time\n         * so, rm_time > x ago => last_access > x ago etc... */\n\n        /* /!\\ fall through */\n\n    case ATTR_INDEX_last_access:\n        ATTR_MASK_SET(p_attrs, last_access);\n        ATTR(p_attrs, last_access) = value;\n        /* in robinhood, lastmod <= last_access as\n         * last_access=MAX(atime,mtime) */\n\n        /* /!\\ fall through */\n\n    case ATTR_INDEX_last_mod:\n        ATTR_MASK_SET(p_attrs, last_mod);\n        ATTR(p_attrs, last_mod) = value;\n\n        /* cr_time always <= last_mod */\n\n        /* /!\\ fall through */\n\n    case ATTR_INDEX_creation_time:\n        ATTR_MASK_SET(p_attrs, creation_time);\n        ATTR(p_attrs, creation_time) = value;\n        break;\n\n    case ATTR_INDEX_size:\n        ATTR_MASK_SET(p_attrs, size);\n        ATTR(p_attrs, size) = value;\n        break;\n\n    default:\n        if (is_sm_info(p->config->lru_sort_attr)) {\n            int *dup = malloc(sizeof(int));\n\n            if (!dup)\n                return;\n            *dup = (int)value;\n\n            /* Don't know the implications of this attribute\n             * on other time attributes.\n             * So, just set its value and return. */\n            if (set_sm_info(p->descr->status_mgr, p_attrs,\n                            attr2sminfo_index(p->config->lru_sort_attr)\n                            - p->descr->status_mgr->sm_info_offset, dup))\n                free(dup);\n            return;\n        }\n\n        /* unsupported */\n        RBH_BUG(\"Unsupported LRU sort attribute\");\n    }\n\n#if 0   /* FIXME RBHv3: guess other times, depending on status scope? */\n    /* If entry is dirty (migration):\n     *      creation_time <= last_archive <= last mod (entry is dirty)\n     *                    <= last_access\n     * If entry is synchro (purge):\n     *      creation_time <= last_mod <= last_access\n     *                    <= last_archive (entry is synchro)\n     *      creation_time <= last_restore\n     *                    <= last_access (entry still not purged)\n     */\n\n    /* what about other times??? */\n    if entry is dirty: last_archive < last_mod < last_access\n#endif\n}\n\n/** return attribute name from its index. */\nstatic const char *pol_attrindex2name(int index)\n{\n    if (is_std_attr(index))\n        return field_infos[index].field_name;\n    else if (is_status(index))\n        return get_sm_instance(attr2status_index(index))->db_field;\n    else if (is_sm_info(index))\n        return sm_attr_info[attr2sminfo_index(index)].user_attr_name;\n\n    return \"?\";\n}\n\n/** return the name of the lru_sort_attr of the policy */\nstatic inline const char *sort_attr_name(const policy_info_t *pol)\n{\n    int attr = pol->config->lru_sort_attr;\n\n    if (attr == LRU_ATTR_NONE)\n        return \"none\";\n\n    return pol_attrindex2name(attr);\n}\n\n/**\n * Given the timestamp of the last processed entry, this tries to guess\n * if next entries still have a chance to match the policy.\n * Example: if entries are sorted by last access (from the older to the newer)\n * and if the last listed entry is too recent to match any policy,\n * then next entries won't match too.\n */\nstatic bool heuristic_end_of_list(policy_info_t *policy, time_t last_time)\n{\n    entry_id_t void_id;\n    attr_set_t void_attr = ATTR_SET_INIT;\n    bool rb = false;\n\n    /* list all files if policies are ignored */\n    if (ignore_policies(policy))\n        return false;\n\n    /* don't rely on fake times (0, 1 or in the future...) */\n    if (last_time <= 1 || last_time > time(NULL))\n        return false;\n\n    /* Optimization:\n     * we build a void entry with time attr = current sort attr\n     * If it doesn't match any policy, next entries won't match too\n     * because entries are sorted by this attribute, so it is not necessary\n     * to continue. */\n    memset(&void_id, 0, sizeof(entry_id_t));\n    memset(&void_attr, 0, sizeof(attr_set_t));\n    ATTR_MASK_INIT(&void_attr);\n\n    /* We set a max value for time conditions.\n     * In this case, if an entry matches age > x with\n     * its times attributes = max_value (the latest),\n     * it would also match for older times.\n     * So, set all times <= sort order of policy (depends on status scope?)\n     */\n    set_max_time_attrs(policy, &void_attr, last_time);\n\n    if (policy_match_all(policy->descr, &void_id, &void_attr,\n                         policy->time_modifier, NULL) == POLICY_NO_MATCH) {\n        DisplayLog(LVL_DEBUG, tag(policy),\n                   \"Optimization: entries with %s later than %lu cannot match \"\n                   \"any policy condition. Stop retrieving DB entries.\",\n                   sort_attr_name(policy), last_time);\n        rb = true;\n    } else {\n        rb = false;\n    }\n\n    ListMgr_FreeAttrs(&void_attr);\n\n    return rb;\n}\n\n/**\n *  Sum the number of acks from a status tab\n */\nstatic inline unsigned int ack_count(const unsigned int *status_tab)\n{\n    unsigned int i, sum;\n    sum = 0;\n\n    for (i = 0; i < AS_ENUM_COUNT; i++)\n        sum += status_tab[i];\n\n    return sum;\n}\n\n/**\n*  Sum the number of skipped entries from a status tab\n*/\nstatic inline unsigned int skipped_count(const unsigned int *status_tab)\n{\n    int i;\n    unsigned int nb = 0;\n\n    /* skipped if it has been accessed, has changed, is whitelisted,\n     * matches no policy, is in use, already running, type not supported...\n     * i.e. status in AS_ACCESSED to AS_ALREADY\n     */\n    for (i = AS_ACCESSED; i <= AS_ALREADY; i++)\n        nb += status_tab[i];\n\n    return nb;\n}\n\n/**\n *  Sum the number of errors from a status tab\n */\nstatic inline unsigned int error_count(const unsigned int *status_tab)\n{\n    int i;\n    unsigned int nb = 0;\n\n    /* next status are errors */\n    for (i = AS_MISSING_MD; i <= AS_ERROR; i++)\n        nb += status_tab[i];\n\n    return nb;\n}\n\n/**\n * Convert queue statistics to counters\n */\nstatic void queue_stats2counters(const unsigned long long *feedback_before,\n                                 const unsigned long long *feedback_after,\n                                 const unsigned int *status_tab_before,\n                                 const unsigned int *status_tab_after,\n                                 counters_t *ctr_ok, counters_t *ctr_nok,\n                                 unsigned int *ack, unsigned int *errors,\n                                 unsigned int *skipped)\n{\n    if (ctr_ok)\n        memset(ctr_ok, 0, sizeof(*ctr_ok));\n    if (ctr_nok)\n        memset(ctr_nok, 0, sizeof(*ctr_nok));\n    *ack = 0;\n    *errors = 0;\n    *skipped = 0;\n\n    if (feedback_before != NULL && feedback_after != NULL) {\n        if (ctr_ok) {\n            ctr_ok->count =\n                feedback_after[AF_NBR_OK] - feedback_before[AF_NBR_OK];\n            ctr_ok->vol =\n                feedback_after[AF_VOL_OK] - feedback_before[AF_VOL_OK];\n            ctr_ok->blocks =\n                feedback_after[AF_BLOCKS_OK] -\n                feedback_before[AF_BLOCKS_OK];\n            ctr_ok->targeted =\n                feedback_after[AF_TARGETED_OK] -\n                feedback_before[AF_TARGETED_OK];\n        }\n        if (ctr_nok) {\n            ctr_nok->count =\n                feedback_after[AF_NBR_NOK] - feedback_before[AF_NBR_NOK];\n            ctr_nok->vol =\n                feedback_after[AF_VOL_NOK] - feedback_before[AF_VOL_NOK];\n            ctr_nok->blocks =\n                feedback_after[AF_BLOCKS_NOK] -\n                feedback_before[AF_BLOCKS_NOK];\n            ctr_nok->targeted =\n                feedback_after[AF_TARGETED_NOK] -\n                feedback_before[AF_TARGETED_NOK];\n        }\n    }\n    if (status_tab_before != NULL && status_tab_after != NULL) {\n        *ack = ack_count(status_tab_after) - ack_count(status_tab_before);\n        *skipped =\n            skipped_count(status_tab_after) -\n            skipped_count(status_tab_before);\n        *errors =\n            error_count(status_tab_after) - error_count(status_tab_before);\n    }\n}\n\n/**\n * Test if the policy run limit has been reached,\n * or if the max error rate is reached.\n */\nstatic bool check_limit(policy_info_t *policy,\n                        const counters_t *ctr_ok,\n                        unsigned int errors, const counters_t *limit)\n{\n    unsigned int total;\n\n    /* --no-limit option specified? */\n    if (no_limit(policy))\n        return false;\n\n    /* counter of successful actions reached the limit? */\n    if (counter_reached_limit(ctr_ok, limit))\n        return true;\n\n    total = ctr_ok->count + errors;\n    if (total == 0)\n        return false;\n\n    /* stop if too many error occurred */\n    if ((policy->config->suspend_error_pct > 0.0)\n        && (policy->config->suspend_error_min > 0)\n        && (errors >= policy->config->suspend_error_min)) {\n        /* total >= errors >= suspend_error_min  > 0\n         * => total != 0 */\n        double pct = 100.0 * (float)errors / (float)total;\n        if (pct >= policy->config->suspend_error_pct) {\n            DisplayLog(LVL_EVENT, tag(policy),\n                       \"error count %u >= %u, error rate %.2f%% >= %.2f => suspending policy run\",\n                       errors, policy->config->suspend_error_min,\n                       pct, policy->config->suspend_error_pct);\n            return true;\n        }\n    }\n    return false;\n}\n\n/**\n * Compute an adaptive delay (in microseconds) to check if in flight requests exceed the limit.\n * The computed value is 10% of the estimated time to process the current queue.\n */\n#define USEC_PER_MSEC 1000  /* 1ms */\n#define USEC_PER_SEC  1000000   /* 1s */\n#define MIN_CHECK_DELAY   (10 * USEC_PER_MSEC)  /* 10ms */\n#define MAX_CHECK_DELAY   USEC_PER_SEC\nstatic unsigned long adaptive_check_delay_us(time_t policy_start,\n                                             unsigned long long nb_processed,\n                                             unsigned long long nb_in_flight)\n{\n    unsigned long check_delay;\n    unsigned long spent_us = USEC_PER_SEC * (time(NULL) - policy_start);\n    unsigned long us_per_ent;\n\n    /* compute check_delay depending on past processing speed */\n    if (spent_us == 0)\n        spent_us = 100 * USEC_PER_MSEC; /* default to 100ms */\n\n    if (nb_processed > 0) {\n        /* how much time to process these entries? */\n        us_per_ent = spent_us / nb_processed;\n        /* how much to process 10% of current queue? */\n        check_delay = (us_per_ent * nb_in_flight) / 10;\n        DisplayLog(LVL_FULL, __func__,\n                   \"%llu entries processed @ %.2f ms/ent, \"\n                   \"%llu in flight => check delay = 10%% x %llu ms = %lu ms\",\n                   nb_processed, (float)us_per_ent / USEC_PER_MSEC,\n                   nb_in_flight, (us_per_ent * nb_in_flight) / USEC_PER_MSEC,\n                   check_delay / USEC_PER_MSEC);\n    } else {\n        /* nothing was done so far (check again in 10% x spent) */\n        check_delay = spent_us / 10;\n        DisplayLog(LVL_FULL, __func__,\n                   \"No entry processed, %llu in flight \"\n                   \"=> check delay = 10%% x %lu ms\",\n                   nb_in_flight, spent_us / USEC_PER_MSEC);\n    }\n\n    if (check_delay > MAX_CHECK_DELAY)\n        check_delay = MAX_CHECK_DELAY;\n    else if (check_delay < MIN_CHECK_DELAY)\n        check_delay = MIN_CHECK_DELAY;\n\n    return check_delay;\n}\n\n/**\n * Check if enqueued entries reach the limit.\n * If so, wait a while to recheck after some entries have been processed.\n * return if no more entries are in flight,\n *     or if the limit is not reached\n *     or if the limit is definitely reached.\n * \\retval true if policy run must stop\n * \\retval false if policy run can continue\n */\n/* @TODO Implement over-provisioning? */\nstatic bool check_queue_limit(policy_info_t *pol,\n                              const counters_t *pushed,\n                              const unsigned long long *feedback_before,\n                              const unsigned int *status_before,\n                              const counters_t *target_ctr)\n{\n    unsigned long long feedback_after[AF_ENUM_COUNT];\n    unsigned int status_after[AS_ENUM_COUNT];\n\n    do {\n        counters_t ctr_ok, ctr_nok, ctr_in_flight, ctr_pot;\n        unsigned int errors, skipped, ack;\n\n        RetrieveQueueStats(&pol->queue, NULL, NULL, NULL, NULL, NULL,\n                           status_after, feedback_after);\n\n        queue_stats2counters(feedback_before, feedback_after,\n                             status_before, status_after,\n                             &ctr_ok, &ctr_nok, &ack, &errors, &skipped);\n\n        /* compute in-flight conters (pushed - done) */\n        ctr_in_flight = *pushed;\n        ctr_in_flight.count -= ack;\n        ctr_in_flight.vol -= ctr_ok.vol + ctr_nok.vol;\n        ctr_in_flight.blocks -= ctr_ok.blocks + ctr_nok.blocks;\n        ctr_in_flight.targeted -= ctr_ok.targeted + ctr_nok.targeted;\n\n        /* compute total counter */\n        counters_add(&ctr_ok, &pol->progress.action_ctr);\n        skipped += pol->progress.skipped;\n        errors += pol->progress.errors;\n\n        /* check the limit of all acknowledged status */\n        if (check_limit(pol, &ctr_ok, errors, target_ctr))\n            return true;\n\n        /* 2) queue is empty and limit is not reached */\n        if (ctr_in_flight.count == 0) {\n            DisplayLog(LVL_FULL, tag(pol), \"queue is empty\");\n            return false;\n        }\n\n        /* check the potential limit of successful + in flight */\n        ctr_pot = ctr_in_flight;\n        counters_add(&ctr_pot, &ctr_ok);\n        DisplayLog(LVL_FULL, tag(pol), \"requests: OK + in flight = %llu\",\n                   ctr_pot.count);\n\n        if (check_limit(pol, &ctr_pot, errors, target_ctr)) {\n            unsigned long check_delay =\n                adaptive_check_delay_us(pol->progress.policy_start,\n                                        ctr_ok.count + errors + skipped,\n                                        ctr_in_flight.count);\n            DisplayLog(LVL_DEBUG, tag(pol),\n                       \"Limit potentially reached (%llu requests successful, \"\n                       \"%llu requests in queue, volume: %llu done, %llu in flight), \"\n                       \"waiting %lums before re-checking.\", ctr_ok.count,\n                       ctr_in_flight.count, ctr_ok.vol, ctr_in_flight.vol,\n                       check_delay / USEC_PER_MSEC);\n            rh_usleep(check_delay);\n            continue;\n        } else {\n            return false;\n        }\n    } while (1);\n\n    RBH_BUG(\"This line should not be reached\");\n}\n\n/**\n * Count how many rules can be translated to SQL query\n */\nstatic int count_valid_rules(policy_info_t *policy, const policy_rules_t *rules)\n{\n    int valid_db_rules = 0;\n    int i;\n\n    for (i = 0; i < rules->rule_count; i++) {\n        int j, tc = 0;\n        /**\n         * Rule can be translated to SQL statement if any condition\n         * can be translated\n         * or targets class with report = yes\n         */\n        if (cond2sql_ok(&rules->rules[i].condition,\n                        policy->descr->status_mgr,\n                        policy->time_modifier))\n            valid_db_rules++;\n        else if (!policy->config->recheck_ignored_entries) {\n            for (j = 0; j < rules->rules[i].target_count; j++)\n                if (rules->rules[i].target_list[j]->matchable)\n                    tc++;\n            if (tc > 0)\n                valid_db_rules++;\n        }\n    }\n    return valid_db_rules;\n}\n\n/**\n * build a filter from from policy rules for DB query\n */\nstatic void set_rule_filters(policy_info_t *policy,\n                             lmgr_filter_t *p_filter)\n{\n    policy_rules_t *rules = &policy->descr->rules;\n    int valid_db_rules;\n    int i;\n    int actual_rules = 0;\n\n    /* each rule is a set of target fileclasses and conditions */\n    /* 'AND' with previous filters */\n    /* 'OR' between rules */\n    /* 'OR' between rule targets */\n    /* 'AND' rule targets and condition */\n\n    valid_db_rules = count_valid_rules(policy, rules);\n\n    if (valid_db_rules == 0)\n        return;\n\n    /* Always add opening/closing parenthesis, no matter if there is a single\n     * expression. This adds \"AND\" before the block.\n     * It make the code simpler and easier to follow. */\n    lmgr_simple_filter_add_block(p_filter, FILTER_FLAG_BEGIN_BLOCK);\n\n    /* rules are \"ORed\" together */\n    for (i = 0; i < rules->rule_count; i++) {\n        int j, tc = 0;\n        rule_item_t *rule = &rules->rules[i];\n        bool cond_valid = cond2sql_ok(&rule->condition,\n                                      policy->descr->status_mgr,\n                                      policy->time_modifier);\n\n        /* count matchables fileclasses for current rule */\n        if (!policy->config->recheck_ignored_entries) {\n            for (j = 0; j < rule->target_count; j++) {\n                if (rule->target_list[j]->matchable)\n                    tc++;\n            }\n        }\n\n        if (cond_valid == 0 && tc == 0)\n            continue;\n\n        /* The condition is valid or the fileclass is matchable:\n         * start a new block.\n         * 'OR' with previous blocks if is is not the first. */\n        lmgr_simple_filter_add_block(p_filter, actual_rules == 0 ?\n            FILTER_FLAG_BEGIN_BLOCK :\n            FILTER_FLAG_BEGIN_BLOCK | FILTER_FLAG_OR);\n        actual_rules ++;\n\n        /* If the condition is valid, add its SQL. We are in a dedicated sub-block\n         * and we want to \"AND\" with fileclass expression (if any). */\n        if (cond_valid) {\n            if (convert_boolexpr_to_simple_filter(&rule->condition,\n                                            p_filter, policy->descr->status_mgr,\n                                            policy->time_modifier,\n                                            policy->descr->manage_deleted ?\n                                                FILTER_FLAG_ALLOW_NULL : 0,\n                                            BOOL_AND)) {\n                DisplayLog(LVL_DEBUG, tag(policy),\n                           \"Could not convert condition of rule '%s' to \"\n                           \"simple filter.\", rule->rule_id);\n                if (tc == 0) {\n                    /* drop the last begin block */\n                    actual_rules --;\n                    p_filter->filter_simple.filter_count --;\n                    continue;\n                }\n            }\n        }\n\n        /* AND with fileclass criteria */\n        if (tc > 0) {\n            filter_value_t fval;\n\n            /* if tc > 1, make a subblock with ORed fielclasses */\n            if (tc > 1)\n                lmgr_simple_filter_add_block(p_filter, FILTER_FLAG_BEGIN_BLOCK);\n\n            memset(&fval, 0, sizeof(fval));\n\n            bool first = 1;\n            for (j = 0; j < rule->target_count; j++) {\n                if (rule->target_list[j]->matchable) {\n                    fval.value.val_str =\n                        rule->target_list[j]->fileset_id;\n                    lmgr_simple_filter_add(p_filter, ATTR_INDEX_fileclass,\n                                           EQUAL, fval,\n                                           first ? 0 : FILTER_FLAG_OR);\n                    first = 0;\n                }\n            }\n            if (tc > 1)\n                lmgr_simple_filter_add_block(p_filter, FILTER_FLAG_END_BLOCK);\n        }\n        lmgr_simple_filter_add_block(p_filter, FILTER_FLAG_END_BLOCK);\n    }\n    if (actual_rules > 0)\n        lmgr_simple_filter_add_block(p_filter, FILTER_FLAG_END_BLOCK);\n    else\n        /* drop last begin block */\n        p_filter->filter_simple.filter_count--;\n}\n\n/** Add DB filters according to 'ignore_fileclass' and 'ignore' statements */\nstatic void set_ignore_filters(policy_info_t *policy, lmgr_filter_t *filter)\n{\n    policy_rules_t *rules = &policy->descr->rules;\n    int i;\n\n    /* force checking ignored entries and fileclasses */\n    if (policy->config->recheck_ignored_entries)\n        return;\n\n    /* don't select files in ignored classes */\n    for (i = 0; i < rules->ignore_count; i++) {\n        filter_value_t fval;\n        int flags = 0;\n\n        fval.value.val_str = rules->ignore_list[i]->fileset_id;\n        if (i == 0)\n            /* XXX why \"NOT_END\" only for the first ignore_fileclass? */\n            flags = FILTER_FLAG_NOT_BEGIN | FILTER_FLAG_NOT_END;\n        else\n            flags = FILTER_FLAG_NOT;\n        lmgr_simple_filter_add(filter, ATTR_INDEX_fileclass, EQUAL,\n                               fval, flags);\n    }\n\n    /* don't select entries maching 'ignore' statements */\n    for (i = 0; i < rules->whitelist_count; i++) {\n        if (convert_boolexpr_to_simple_filter(\n            &rules->whitelist_rules[i].bool_expr, filter,\n            policy->descr->status_mgr, policy->time_modifier,\n            /* XXX Only ALLOW_NULL once (?) */\n            (policy->descr->manage_deleted && i == 0) ?\n             FILTER_FLAG_ALLOW_NULL | FILTER_FLAG_NOT\n             : FILTER_FLAG_NOT, BOOL_AND)) {\n            DisplayLog(LVL_DEBUG, tag(policy),\n                       \"Could not convert 'ignore' rule to simple filter.\");\n            DisplayLog(LVL_EVENT, tag(policy),\n                       \"Warning: 'ignore' rule is too complex and may \"\n                       \"affect policy run performance\");\n        }\n    }\n}\n\n/**\n * Return the required comparator to filter next entries, depending on the\n * sort order.\n */\nstatic filter_comparator_t policy_order_to_listmgr_comp(sort_order_t order)\n{\n    switch (order) {\n    case SORT_ASC:\n        return MORETHAN;\n    case SORT_DESC:\n        return LESSTHAN;\n    default:\n        RBH_BUG(\"Invalid policy order\");\n    }\n}\n\n/**\n * Add time filter based on sort order and last checked entry\n */\nstatic void set_optim_filter(policy_info_t *policy, lmgr_filter_t *filter)\n{\n    filter_value_t fval;\n    char datestr[128];\n    struct tm ts;\n    const char *sort_char;\n\n    /* no sort order or no previous filter: do nothing */\n    if (policy->config->lru_sort_attr == LRU_ATTR_NONE\n        || policy->first_eligible == 0)\n        return;\n\n    /* avoid re-checking all old whitelisted entries at the beginning\n     * of the list, so start from the first non-whitelisted file.\n     * restart from initial file when no migration could be done. */\n    fval.value.val_uint = policy->first_eligible;\n    lmgr_simple_filter_add(filter, policy->config->lru_sort_attr,\n                           policy_order_to_listmgr_comp(\n                                policy->config->lru_sort_order),\n                           fval, 0);\n\n    sort_char = policy->config->lru_sort_order == SORT_ASC ? \">=\" : \"<=\";\n\n    if (policy->config->lru_sort_attr == ATTR_INDEX_size)\n        snprintf(datestr, 128, \"%lu\", policy->first_eligible);\n    else\n        strftime(datestr, 128, \"%Y/%m/%d %T\",\n                 localtime_r(&policy->first_eligible, &ts));\n\n    DisplayLog(LVL_EVENT, tag(policy),\n               \"Optimization: considering entries with %s %s %s\",\n               sort_attr_name(policy), sort_char, datestr);\n}\n\n/**\n * report the current policy run progress at regular interval.\n */\nstatic void report_progress(policy_info_t *policy,\n                            const unsigned long long *pass_begin,\n                            const unsigned long long *pass_current,\n                            const unsigned int *status_tab_begin,\n                            const unsigned int *status_tab_current)\n{\n    counters_t curr_ctr;\n    unsigned int ack, nb_errors, nb_skipped;\n\n    /* get current pass counters */\n    queue_stats2counters(pass_begin, pass_current, status_tab_begin,\n                         status_tab_current, &curr_ctr, NULL, &ack,\n                         &nb_errors, &nb_skipped);\n    /* add counters of previous passes */\n    counters_add(&curr_ctr, &policy->progress.action_ctr);\n    nb_skipped += policy->progress.skipped;\n    nb_errors += policy->progress.errors;\n\n    /* say hello every runtime interval */\n    if (time(NULL) - policy->progress.last_report >=\n        policy->config->report_interval) {\n        char buf1[128];\n        char buf2[128];\n        char buf3[128];\n        unsigned int spent = time(NULL) - policy->progress.policy_start;\n        if (spent == 0)\n            return;\n        FormatDuration(buf1, 128, spent);\n        FormatFileSize(buf2, 128, curr_ctr.vol);\n        FormatFileSize(buf3, 128, curr_ctr.vol / spent);\n\n        DisplayLog(LVL_MAJOR, tag(policy),\n                   \"Policy is running (started %s ago): \"\n                   \"%llu actions succeeded (%.2f/sec); volume: %s (%s/sec); \"\n                   \"skipped: %u; errors: %u\", buf1, curr_ctr.count,\n                   (float)curr_ctr.count / (float)spent, buf2, buf3,\n                   nb_skipped, nb_errors);\n        policy->progress.last_report = time(NULL);\n    }\n}\n\n/**\n * Wait until the queue is empty or migrations timed-out.\n * \\retval 0 when the queue is empty\n * \\retval ETIME on timeout.\n */\nstatic int wait_queue_empty(policy_info_t *policy,\n                            unsigned int nb_submitted,\n                            const unsigned long long *feedback_init,\n                            const unsigned int *status_tab_init,\n                            unsigned long long *feedback_after,\n                            unsigned int *status_tab_after,\n                            bool long_sleep)\n{\n    unsigned int nb_in_queue, nb_action_in_flight;\n\n    /* Wait for end of policy pass */\n    do {\n        time_t last_push, last_pop, last_ack, last_activity;\n\n        last_push = last_pop = last_ack = last_activity = 0;\n\n        RetrieveQueueStats(&policy->queue, NULL, &nb_in_queue,\n                           &last_push, &last_pop, &last_ack,\n                           status_tab_after, feedback_after);\n\n        /* the last time a request was pushed/poped/acknowledged */\n        last_activity = MAX3(last_push, last_pop, last_ack);\n\n        /* nb of operation in flight\n           = nb_enqueued - (nb ack after - nb ack before) */\n        nb_action_in_flight = nb_submitted + ack_count(status_tab_init)\n                            - ack_count(status_tab_after);\n\n        if ((nb_in_queue > 0) || (nb_action_in_flight > 0)) {\n            /* abort this pass if the last action was done a too long time\n             * ago */\n            if ((policy->config->action_timeout != 0) &&\n                (time(NULL) - last_activity >\n                 policy->config->action_timeout)) {\n                DisplayLog(LVL_MAJOR, tag(policy), \"Policy run time-out: \"\n                           \"%u actions inactive for %us\", nb_action_in_flight,\n                           (unsigned int)(time(NULL) - last_activity));\n                /* don't wait for current actions to end, continue with\n                 * other entries */\n                return ETIME;\n            }\n\n            report_progress(policy, feedback_init, feedback_after,\n                            status_tab_init, status_tab_after);\n\n            DisplayLog(LVL_DEBUG, tag(policy),\n                       \"Waiting for the end of current pass: \"\n                       \"still %u entries in flight (%u in queue, %u being processed). \"\n                       \"Last action %us ago.\",\n                       nb_action_in_flight, nb_in_queue,\n                       nb_action_in_flight - nb_in_queue,\n                       (unsigned int)(time(NULL) - last_activity));\n\n            if (long_sleep)\n                rh_sleep(CHECK_QUEUE_INTERVAL);\n            else\n                rh_usleep(1000);\n        } else\n            DisplayLog(LVL_DEBUG, tag(policy), \"End of current pass\");\n\n    } while ((nb_in_queue != 0) || (nb_action_in_flight != 0));\n\n    return 0;\n}\n\n/** set the mask of attributes to be retrieved from db */\nstatic attr_mask_t db_attr_mask(policy_info_t *policy,\n                                const policy_param_t *param)\n{\n    attr_mask_t mask = { 0 };\n    attr_mask_t tmp;\n\n/* TODO depends on the prototype of the action to be taken + fileset mask\n*       + condition mask... */\n\n    /* needed for ListMgr_Remove() operations */\n#ifdef _HAVE_FID\n    mask.std |= ATTR_MASK_name | ATTR_MASK_parent_id;\n#endif\n    /* needed for posix operations, and for display */\n    mask.std |= ATTR_MASK_fullpath;\n\n    /* md_update and path_update are not present in SOFT_RM table */\n    if (!policy->descr->manage_deleted) {\n        /* needed if update params != never */\n        if (updt_params.md.when != UPDT_NEVER &&\n            updt_params.md.when != UPDT_ALWAYS)\n            mask.std |= ATTR_MASK_md_update;\n\n#ifdef _HAVE_FID\n        if (updt_params.path.when != UPDT_NEVER &&\n            updt_params.path.when != UPDT_ALWAYS)\n            mask.std |= ATTR_MASK_path_update;\n#endif\n    }\n\n    /* needed to check the entry order didn't change */\n    if (policy->config->lru_sort_attr != LRU_ATTR_NONE)\n        attr_mask_set_index(&mask, policy->config->lru_sort_attr);\n\n    /* needed for size counters and logging, or to verify the entry\n     * didn't change */\n    mask.std |= ATTR_MASK_size;\n    /* depends on policy params (limits) */\n    if (param->target_ctr.blocks != 0 || param->target_ctr.targeted != 0)\n        mask.std |= ATTR_MASK_blocks;\n#ifdef _LUSTRE\n    if (param->target == TGT_POOL || param->target == TGT_OST)\n        mask.std |= ATTR_MASK_stripe_info | ATTR_MASK_stripe_items;\n#endif\n\n    /* Get attrs to match policy scope */\n    mask = attr_mask_or(&mask, &policy->descr->scope_mask);\n\n    /* needed (cached) attributes to check status from scope */\n    tmp = attrs_for_status_mask(mask.status, false);\n    mask = attr_mask_or(&mask, &tmp);\n\n    /* needed attributes to check policy rules */\n    /* Note: mask for schedulers is part of policy's run_attr_mask */\n    mask = attr_mask_or(&mask, &policy->descr->rules.run_attr_mask);\n\n    /* this is needed to call match_classes */\n    mask = attr_mask_or(&mask, &policies.global_fileset_mask);\n\n    /* if the policy manages deleted entries, get all\n     * SOFTRM attributes for the current status manager */\n    if (policy->descr->manage_deleted\n        && (policy->descr->status_mgr != NULL))\n        mask =\n            attr_mask_or(&mask,\n                         &policy->descr->status_mgr->softrm_table_mask);\n\n    /* if depth is needed, need fullpath to compute it */\n    if (mask.std & ATTR_MASK_depth)\n        mask.std |= ATTR_MASK_fullpath;\n\n    return mask;\n}\n\n/** mask of fresh attributes to be retrieved */\nstatic attr_mask_t updt_attr_mask(const policy_info_t *policy)\n{\n    attr_mask_t mask = { 0 };\n    attr_mask_t tmp;\n\n    /* needed to check the entry order didn't change */\n    if (policy->config->lru_sort_attr != LRU_ATTR_NONE)\n        attr_mask_set_index(&mask, policy->config->lru_sort_attr);\n\n    /* Needed attrs to double check policy scope */\n    mask = attr_mask_or(&mask, &policy->descr->scope_mask);\n\n    /* Needed fresh attributes to check status from scope */\n    tmp = attrs_for_status_mask(mask.status, true);\n    mask = attr_mask_or(&mask, &tmp);\n\n    /* Needed attributes to check policy rules */\n    /* Note: mask for schedulers is part of policy's run_attr_mask */\n    mask = attr_mask_or(&mask, &policy->descr->rules.run_attr_mask);\n\n    /* if depth is needed, need fullpath to compute it */\n    if (mask.std & ATTR_MASK_depth)\n        mask.std |= ATTR_MASK_fullpath;\n\n    return mask;\n}\n\n/**\n * Compute the target amount for an entry.\n */\nstatic int entry2tgt_amount(const policy_param_t *p_param,\n                            const attr_set_t *attrs, counters_t *p_ctr)\n{\n    memset(p_ctr, 0, sizeof(*p_ctr));\n\n    p_ctr->count = 1;\n    if (ATTR_MASK_TEST(attrs, size))\n        p_ctr->vol = ATTR(attrs, size);\n    else if (ATTR_MASK_TEST(attrs, blocks))\n        p_ctr->vol = ATTR(attrs, blocks) * DEV_BSIZE;\n\n    if (ATTR_MASK_TEST(attrs, blocks))\n        p_ctr->blocks = ATTR(attrs, blocks);\n\n    if (p_param->target_ctr.targeted != 0) {\n        /* When the target amount is not count, vol or blocks\n         * This is the case for OST: the target is only a subset of the\n         * blocks.\n         */\n#ifdef _LUSTRE\n        /* When targeting a pool, the selected files are only striped on this\n         * pool, so takes the whole file into account */\n        if (p_param->target == TGT_POOL) {\n            p_ctr->targeted = ATTR(attrs, blocks);\n            return 0;\n        }\n\n        /* FIXME what about pool? */\n        if (p_param->target != TGT_OST && p_param->target)\n#else\n        if (p_param->target)\n#endif\n        {\n            DisplayLog(LVL_CRIT, \"PolicyRun\",\n                       \"unsupported targeted limit != OST\");\n            return -1;\n        }\n#ifdef _LUSTRE\n        p_ctr->targeted =\n            BlocksOnOST(p_ctr->blocks, p_param->optarg_u.index,\n                        &ATTR(attrs, stripe_info), &ATTR(attrs,\n                                                         stripe_items));\n#endif\n    }\n\n    return 0;\n}\n\n#ifdef HAVE_CHANGELOGS\n#define BUILD_LIST_MSG \"Building policy list - last full FS Scan:\"\n#else\n#define BUILD_LIST_MSG \"Building policy list from last full FS Scan:\"\n#endif\n\n/**\n * Check if a filesystem scan has ever been done.\n * \\retval ENOENT if no scan has been done (no complete filesystem list is\n *          available).\n */\nstatic int check_scan_done(const policy_info_t *pol, lmgr_t *lmgr)\n{\n    char timestamp[1024];\n\n    if (force_run(pol)) /* no check in that case */\n        return 0;\n\n    if (ListMgr_GetVar(lmgr, LAST_SCAN_END_TIME, timestamp,\n                       sizeof(timestamp)) != DB_SUCCESS) {\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Full FS Scan has never been done. Policy ordering would be done on a partial list \"\n                   \"(use --force to apply the policy anyway).\");\n        return ENOENT;\n    } else {\n        time_t last_scan = atoi(timestamp);\n        struct tm date;\n\n        localtime_r(&last_scan, &date);\n        DisplayLog(LVL_EVENT, tag(pol),\n                   BUILD_LIST_MSG \" %.4d/%.2d/%.2d %.2d:%.2d:%.2d\",\n                   1900 + date.tm_year, date.tm_mon + 1, date.tm_mday,\n                   date.tm_hour, date.tm_min, date.tm_sec);\n    }\n    return 0;\n}\n\n/**\n * Set a DB filter and attr mask depending on the specified target.\n * By the way, log target information for this run.\n * @return 0 on success, another value on error.\n */\nstatic int set_target_filter(const policy_info_t *pol,\n                             const policy_param_t *p_param,\n                             lmgr_filter_t *filter,\n                             attr_mask_t *attr_mask)\n{\n    filter_value_t fval;\n\n    switch (p_param->target) {\n    case TGT_FS:   /* apply policies to the filesystem */\n        DisplayLog(LVL_MAJOR, tag(pol), \"Starting policy run\");\n        return 0;\n\n#ifdef _LUSTRE\n    case TGT_OST:  /* apply policies to the specified OST */\n        DisplayLog(LVL_MAJOR, tag(pol), \"Starting policy run on OST #%d\",\n                   p_param->optarg_u.index);\n\n        /* retrieve stripe info and stripe items */\n        attr_mask->std |= ATTR_MASK_stripe_info | ATTR_MASK_stripe_items;\n\n        /* retrieve files from this OST */\n        fval.value.val_uint = p_param->optarg_u.index;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_stripe_items,\n                                      EQUAL, fval, 0);\n\n    case TGT_POOL: /* apply policies to the specified pool of OSTs */\n        DisplayLog(LVL_MAJOR, tag(pol), \"Starting policy run on pool '%s'\",\n                   p_param->optarg_u.name);\n\n        attr_mask->std |= ATTR_MASK_stripe_info | ATTR_MASK_stripe_items;\n\n        /* retrieve files from this pool */\n        fval.value.val_str = p_param->optarg_u.name;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_stripe_info,\n                                      WILDCARDS_IN(fval.value.\n                                                   val_str) ? LIKE : EQUAL,\n                                      fval, 0);\n\n    case TGT_PROJID:    /* apply policies to the specified projid */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Starting policy run on project #%u files\",\n                   p_param->optarg_u.index);\n\n        attr_mask->std |= ATTR_MASK_projid;\n\n        /* retrieve files for this projid */\n        fval.value.val_uint = p_param->optarg_u.index;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_projid, EQUAL,\n                                      fval, 0);\n#endif\n\n    case TGT_USER: /* apply policies to the specified user */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Starting policy run on '%s' user files\",\n                   p_param->optarg_u.name);\n\n        attr_mask->std |= ATTR_MASK_uid;\n\n        /* retrieve files for this owner */\n        if (set_uid_val(p_param->optarg_u.name, &fval.value))\n            return EINVAL;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_uid,\n                                      WILDCARDS_IN(p_param->optarg_u.\n                                                   name) ? LIKE : EQUAL,\n                                      fval, 0);\n\n    case TGT_GROUP:    /* apply policies to the specified group */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Starting policy run on '%s' group files\",\n                   p_param->optarg_u.name);\n\n        attr_mask->std |= ATTR_MASK_gid;\n\n        /* retrieve files for this group */\n        if (set_gid_val(p_param->optarg_u.name, &fval.value))\n            return EINVAL;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_gid,\n                                      WILDCARDS_IN(p_param->optarg_u.\n                                                   name) ? LIKE : EQUAL,\n                                      fval, 0);\n\n    case TGT_CLASS:    /* apply policies to the specified fileclass */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Starting policy run on fileclass '%s'\",\n                   p_param->optarg_u.name);\n\n        attr_mask->std |= ATTR_MASK_fileclass;\n\n        fval.value.val_str = p_param->optarg_u.name;\n        return lmgr_simple_filter_add(filter, ATTR_INDEX_fileclass,\n                                      WILDCARDS_IN(fval.value.\n                                                   val_str) ? LIKE : EQUAL,\n                                      fval, 0);\n\n    case TGT_FILE:\n        /* this is supposed to be handled by specific code:\n         * single_file_run() */\n        RBH_BUG(\"ERROR: file target type is supposed to be handled in \"\n                \"a different function\");\n        return ENOTSUP;\n\n    default:\n        DisplayLog(LVL_CRIT, tag(pol), \"ERROR: unhandled target type %u\\n\",\n                   p_param->target);\n        return EINVAL;\n    }\n}\n\n/** reset stats before a policy pass */\nstatic void init_pass_stats(policy_info_t *pol, counters_t *pushed_ctr,\n                            unsigned int *status_tab_before,\n                            unsigned int *status_tab_after,\n                            unsigned long long *feedback_before,\n                            unsigned long long *feedback_after)\n{\n    /* Retrieve stats before starting policy,\n     * for computing a delta later.\n     */\n    RetrieveQueueStats(&pol->queue, NULL, NULL, NULL, NULL, NULL,\n                       status_tab_before, feedback_before);\n\n    /* set pushed entries counter = 0 */\n    memset(pushed_ctr, 0, sizeof(*pushed_ctr));\n\n    /* reset after's */\n    memset(feedback_after, 0, AF_ENUM_COUNT * sizeof(*feedback_after));\n    memset(status_tab_after, 0, AS_ENUM_COUNT * sizeof(*status_tab_after));\n}\n\nstatic void update_pass_stats(policy_info_t *pol,\n                              unsigned int *status_tab_before,\n                              unsigned int *status_tab_after,\n                              unsigned long long *feedback_before,\n                              unsigned long long *feedback_after)\n{\n    /* how much has been processed, errors, skipped... */\n    pol->progress.action_ctr.count += feedback_after[AF_NBR_OK]\n        - feedback_before[AF_NBR_OK];\n    pol->progress.action_ctr.vol += feedback_after[AF_VOL_OK]\n        - feedback_before[AF_VOL_OK];\n    pol->progress.action_ctr.blocks += feedback_after[AF_BLOCKS_OK]\n        - feedback_before[AF_BLOCKS_OK];\n    pol->progress.action_ctr.targeted += feedback_after[AF_TARGETED_OK]\n        - feedback_before[AF_TARGETED_OK];\n    pol->progress.skipped += skipped_count(status_tab_after)\n        - skipped_count(status_tab_before);\n    pol->progress.errors += error_count(status_tab_after)\n        - error_count(status_tab_before);\n}\n\n/* these types allow generic iteration on std entries or removed entries */\n\ntypedef enum { IT_LIST, IT_RMD } it_type_e;\n\nstruct policy_iter {\n    it_type_e it_type;\n    union {\n        struct lmgr_iterator_t *std_iter;\n        struct lmgr_rm_list_t *rmd_iter;\n    } it;\n};\n\nstatic inline int iter_next(struct policy_iter *it, entry_id_t *p_id,\n                            attr_set_t *p_attrs)\n{\n    switch (it->it_type) {\n    case IT_LIST:\n        return ListMgr_GetNext(it->it.std_iter, p_id, p_attrs);\n    case IT_RMD:\n        return ListMgr_GetNextRmEntry(it->it.rmd_iter, p_id, p_attrs);\n    }\n    return DB_INVALID_ARG;\n}\n\nstatic inline void iter_close(struct policy_iter *it)\n{\n    switch (it->it_type) {\n    case IT_LIST:\n        if (it->it.std_iter == NULL)\n            return;\n        ListMgr_CloseIterator(it->it.std_iter);\n        it->it.std_iter = NULL;\n        break;\n    case IT_RMD:\n        if (it->it.rmd_iter == NULL)\n            return;\n        ListMgr_CloseRmList(it->it.rmd_iter);\n        it->it.rmd_iter = NULL;\n        break;\n    }\n}\n\nstatic inline int iter_open(lmgr_t *lmgr,\n                            it_type_e type,\n                            struct policy_iter *it,\n                            lmgr_filter_t *filter,\n                            const lmgr_sort_type_t *sort_type,\n                            const lmgr_iter_opt_t *opt)\n{\n    it->it_type = type;\n    switch (type) {\n    case IT_LIST:\n        it->it.std_iter = ListMgr_Iterator(lmgr, filter, sort_type, opt);\n        if (it->it.std_iter == NULL)\n            return DB_REQUEST_FAILED;\n        break;\n\n    case IT_RMD:\n        it->it.rmd_iter = ListMgr_RmList(lmgr, filter, sort_type);\n        if (it->it.rmd_iter == NULL)\n            return DB_REQUEST_FAILED;\n        break;\n    }\n    return DB_SUCCESS;\n}\n\n/** return codes of fill_workers_queue() */\ntypedef enum {\n    PASS_EOL,\n    PASS_LIMIT,\n    PASS_ABORTED,\n    PASS_ERROR,\n} pass_status_e;\n\n/**\n* Get entries from the DB and push them to the workers queue until:\n* - end of list is reached\n* or:\n* - the policy limit is potentially reached.\n* @param attr_mask  Mask of attrs to be retrieved from the DB,\n*                   to be able to match policy rules, scope...\n*/\nstatic pass_status_e fill_workers_queue(policy_info_t *pol,\n                                        const policy_param_t *p_param,\n                                        lmgr_t *lmgr,\n                                        struct policy_iter *it,\n                                        const lmgr_iter_opt_t *req_opt,\n                                        const lmgr_sort_type_t *sort_type,\n                                        lmgr_filter_t *filter,\n                                        attr_mask_t attr_mask,\n                                        int *last_sort_time,\n                                        unsigned int *db_current_list_count,\n                                        unsigned int *db_total_list_count)\n{\n    int rc;\n    pass_status_e st;\n    attr_set_t attr_set;\n    entry_id_t entry_id;\n    counters_t pushed_ctr;\n    filter_value_t fval;\n    unsigned long long feedback_before[AF_ENUM_COUNT];\n    unsigned long long feedback_after[AF_ENUM_COUNT];\n    unsigned int status_tab_before[AS_ENUM_COUNT];\n    unsigned int status_tab_after[AS_ENUM_COUNT];\n\n    init_pass_stats(pol, &pushed_ctr, status_tab_before, status_tab_after,\n                    feedback_before, feedback_after);\n\n    /* by default, exit the loop when check_queue_limit is true */\n    st = PASS_LIMIT;\n\n    /* List entries for policy */\n    do {\n        counters_t entry_amount;\n\n        /* reset attr_mask, if it was altered by last ListMgr_GetNext() */\n        memset(&attr_set, 0, sizeof(attr_set_t));\n        attr_set.attr_mask = attr_mask;\n\n        memset(&entry_id, 0, sizeof(entry_id_t));\n\n        rc = iter_next(it, &entry_id, &attr_set);\n\n        if (aborted(pol) || stopping(pol)) {\n            /* free the last returned entry */\n            if (rc == 0)\n                ListMgr_FreeAttrs(&attr_set);\n\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"Policy run %s, stop enqueuing requests.\",\n                       pol->aborted ? \"aborted\" : \"stopping\");\n            st = pol->aborted ? PASS_ABORTED : PASS_EOL;\n            break;\n        } else if (rc == DB_END_OF_LIST) {\n            *db_total_list_count += *db_current_list_count;\n\n            if (/* no entries returned => END OF LIST */\n                (*db_current_list_count == 0)\n                /* if limit = inifinite => END OF LIST */\n                || (req_opt->list_count_max == 0)\n                /* if returned count < limit => END OF LIST */\n                || ((req_opt->list_count_max > 0) &&\n                    (*db_current_list_count < req_opt->list_count_max))) {\n                DisplayLog(LVL_FULL, tag(pol), \"End of list \"\n                           \"(%u entries returned)\", *db_total_list_count);\n                st = PASS_EOL;\n                break;\n            }\n\n            /* no new useless request when entries are sorted\n             * and the max time is reached */\n            if ((pol->config->lru_sort_attr != LRU_ATTR_NONE)\n                && heuristic_end_of_list(pol, *last_sort_time)) {\n                st = PASS_EOL;\n                break;\n            }\n\n            /* Free previous iterator */\n            iter_close(it);\n\n            /* we must wait that migr. queue is empty,\n             * to prevent from processing the same entry twice\n             * (not safe until their md_update has not been updated).\n             */\n            wait_queue_empty(pol, pushed_ctr.count, feedback_before,\n                             status_tab_before, feedback_after,\n                             status_tab_after, false);\n\n            /* perform a new request with next entries */\n\n            /* /!\\ if there is already a filter on <sort_attr> or md_update\n             * only replace it, do not add a new filter.\n             */\n\n            /* no md_update filed in SOFT_RM */\n            if (!pol->descr->manage_deleted) {\n                /* don't retrieve just-updated entries\n                 * (update>=first_request_time) */\n                fval.value.val_int = pol->progress.policy_start;\n                rc = lmgr_simple_filter_add_or_replace(filter,\n                                                       ATTR_INDEX_md_update,\n                                                       LESSTHAN_STRICT, fval,\n                                                       FILTER_FLAG_ALLOW_NULL);\n                if (rc)\n                    return PASS_ERROR;\n            }\n\n            /* filter on <sort_time> */\n            if (pol->config->lru_sort_attr != LRU_ATTR_NONE) {\n                const char * sort_char;\n\n                fval.value.val_int = *last_sort_time;\n                rc = lmgr_simple_filter_add_or_replace(filter,\n                                               pol->config->lru_sort_attr,\n                                               policy_order_to_listmgr_comp(\n                                                   pol->config->lru_sort_order),\n                                               fval, FILTER_FLAG_ALLOW_NULL);\n                if (rc)\n                    return PASS_ERROR;\n\n                sort_char = pol->config->lru_sort_order == SORT_ASC ? \">=\" :\n                                                                      \"<=\";\n\n                DisplayLog(LVL_DEBUG, tag(pol),\n                           \"Performing new request with a limit of %u entries\"\n                           \" and %s %s %d and md_update < %ld \",\n                           req_opt->list_count_max, sort_attr_name(pol),\n                           sort_char, *last_sort_time,\n                           pol->progress.policy_start);\n            } else {\n                DisplayLog(LVL_DEBUG, tag(pol),\n                           \"Performing new request with a limit of %u entries\"\n                           \" and md_update < %ld \", req_opt->list_count_max,\n                           pol->progress.policy_start);\n            }\n\n            *db_current_list_count = 0;\n            rc = iter_open(lmgr, it->it_type, it, filter, sort_type,\n                           req_opt);\n            if (rc != DB_SUCCESS) {\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error %d retrieving list of candidates from \"\n                           \"database. Policy run cancelled.\", rc);\n                return PASS_ERROR;\n            }\n            continue;\n        } else if (rc != 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Error %d getting next entry of iterator\", rc);\n            st = PASS_ERROR;\n            break;\n        }\n\n        (*db_current_list_count)++;\n\n        rc = get_sort_attr(pol, &attr_set);\n        if (rc != -1)\n            *last_sort_time = rc;\n\n        rc = entry2tgt_amount(p_param, &attr_set, &entry_amount);\n        if (rc == -1) {\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"Failed to determine target amount for entry \" DFID,\n                       PFID(&entry_id));\n            /* handle next entries */\n            continue;\n        }\n\n        /* Insert candidate to workers queue */\n        rc = Queue_Insert(&pol->queue,\n                          entry2queue_item(&entry_id, &attr_set,\n                                           entry_amount.targeted));\n        if (rc)\n            return PASS_ERROR;\n\n        counters_add(&pushed_ctr, &entry_amount);\n\n    /* Enqueue entries to workers queue as long as the specified limit is\n     * not reached */\n    } while (!check_queue_limit(pol, &pushed_ctr, feedback_before,\n                                status_tab_before, &p_param->target_ctr));\n\n    /* Make sure the processing queue is empty. */\n    wait_queue_empty(pol, pushed_ctr.count, feedback_before,\n                     status_tab_before, feedback_after, status_tab_after,\n                     true);\n\n    update_pass_stats(pol, status_tab_before, status_tab_after,\n                      feedback_before, feedback_after);\n\n    return st;\n}\n\n/* forward declaration */\nstatic void process_entry(policy_info_t *pol, lmgr_t *lmgr,\n                          queue_item_t *p_item, bool free_item);\n/**\n* Apply policy to a single file.\n*/\nstatic int single_file_run(policy_info_t *pol, lmgr_t *lmgr,\n                           const policy_param_t *p_param,\n                           action_summary_t *p_summary)\n{\n    queue_item_t item;\n    int rc;\n    unsigned int status_before[AS_ENUM_COUNT];\n    unsigned int status_after[AS_ENUM_COUNT];\n    unsigned long long feedback_before[AF_ENUM_COUNT];\n    unsigned long long feedback_after[AF_ENUM_COUNT];\n\n    memset(&item, 0, sizeof(item));\n\n    RetrieveQueueStats(&pol->queue, NULL, NULL, NULL, NULL, NULL,\n                       status_before, feedback_before);\n\n    pol->progress.policy_start = pol->progress.last_report = time(NULL);\n\n    /* resolve the fid of the target */\n    rc = path2id(p_param->optarg_u.name, &item.entry_id, NULL);\n    if (rc)\n        return rc;\n\n    /* needed attributes to apply the policy */\n    item.entry_attr.attr_mask = db_attr_mask(pol, p_param);\n\n    /* get fid from DB */\n    rc = ListMgr_Get(lmgr, &item.entry_id, &item.entry_attr);\n    if (rc) {\n        if (rc == DB_NOT_EXISTS)\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"%s: this entry is not known in database\",\n                       p_param->optarg_u.name);\n        /* expect a posix error code */\n        rc = EINVAL;\n        return rc;\n    }\n\n    /* apply the policy to the entry */\n    process_entry(pol, lmgr, &item, false);\n\n    ListMgr_FreeAttrs(&item.entry_attr);\n\n    RetrieveQueueStats(&pol->queue, NULL, NULL, NULL, NULL, NULL,\n                       status_after, feedback_after);\n    update_pass_stats(pol, status_before, status_after,\n                      feedback_before, feedback_after);\n\n    if (p_summary)\n        *p_summary = pol->progress;\n\n    return 0;\n}\n\n/** Execute pre/post policy command */\nstatic int execute_prepost_run_command(const policy_info_t *policy,\n                                       char **command,\n                                       const char *pre)\n{\n    char *descr = NULL;\n    char **cmd;\n    char *log_cmd;\n    int rc;\n\n    if (command == NULL)\n        /* nothing to do */\n        return 0;\n\n    if (asprintf(&descr, \"%s_run_command '%s'\", pre, command[0]) < 0) {\n        DisplayLog(LVL_CRIT, tag(policy),\n                   \"Could not allocate string for %s_run_command '%s'\",\n                   pre, command[0]);\n        return -ENOMEM;\n    }\n\n    /* replaces placeholders in command */\n    rc = subst_shell_params(command, descr, NULL, NULL, NULL, NULL,\n                            policy->descr->status_mgr, true, &cmd);\n    free(descr);\n\n    if (rc) {\n        log_cmd = concat_cmd(command);\n        DisplayLog(LVL_MAJOR, tag(policy),\n                   \"Invalid %s_run_command: %s\", pre, log_cmd);\n        free(log_cmd);\n        return -EINVAL;\n    }\n\n    log_cmd = concat_cmd(cmd);\n    DisplayLog(LVL_MAJOR, tag(policy),\n               \"Executing %s_run_command: %s\", pre, log_cmd);\n    free(log_cmd);\n\n    rc = execute_shell_command(cmd, cb_stderr_to_log, (void *)LVL_EVENT);\n    g_strfreev(cmd);\n    if (rc)\n        DisplayLog(LVL_CRIT, tag(policy), \"%s_run_command failed: %s\",\n                   pre, strerror(-rc));\n\n    return rc;\n}\n\n/**\n * Convert policy scope to filter.\n * @param filter    Initialized listmgr filter\n */\nstatic void add_scope_filter(policy_info_t *pol, lmgr_filter_t *filter)\n{\n    DisplayLog(LVL_FULL, tag(pol), \"Converting scope to DB filter...\");\n    if (convert_boolexpr_to_simple_filter(&pol->descr->scope, filter,\n                                          pol->descr->status_mgr,\n                                          pol->time_modifier,\n                                          pol->descr->manage_deleted ?\n                                            FILTER_FLAG_ALLOW_NULL : 0,\n                                          BOOL_AND)) {\n        DisplayLog(LVL_DEBUG, tag(pol),\n                   \"Could not convert policy scope to simple filter.\");\n        DisplayLog(LVL_EVENT, tag(pol), \"Warning: scope definition is too \"\n                   \"complex and may affect policy run performance\");\n    }\n}\n\n/**\n* This is called by triggers (or manual policy runs) to run a pass of a policy.\n* @param[in,out] p_pol_info   policy information and resources\n* @param[in]     p_param      parameters of this run (target, limit, ...)\n* @param[out]    p_summary    summary of the policy run\n* @param[in]     lmgr         connection to the database\n*  \\return 0 on success, a POSIX error code else, -1 for internal failure.\n*  \\retval ENOENT if no file list is available.\n*/\nint run_policy(policy_info_t *p_pol_info, const policy_param_t *p_param,\n               action_summary_t *p_summary, lmgr_t *lmgr)\n{\n    struct policy_iter it = { 0 };\n    int rc;\n    pass_status_e st;\n    lmgr_filter_t filter;\n    filter_value_t fval;\n    lmgr_sort_type_t sort_type;\n    int last_sort_time = 0;\n    /* XXX first_request_start = policy_start */\n    attr_mask_t attr_mask;\n    unsigned int nb_returned, total_returned;\n    int i;\n\n    lmgr_iter_opt_t opt = LMGR_ITER_OPT_INIT;\n\n    if (!p_pol_info)\n        RBH_BUG(\"p_pol_info argument is NULL\");\n    if (!p_param)\n        RBH_BUG(\"p_param argument is NULL\");\n\n    p_pol_info->time_modifier = p_param->time_mod;\n    p_pol_info->trigger_action_params = p_param->action_params;\n    p_pol_info->aborted = false;\n    p_pol_info->stopping = false;\n\n    memset(&p_pol_info->progress, 0, sizeof(p_pol_info->progress));\n    if (p_summary)\n        memset(p_summary, 0, sizeof(*p_summary));\n\n    /* XXX previously here: interpreting target type and amount */\n\n    /* special case: apply policy on a single file */\n    if (p_param->target == TGT_FILE)\n        return single_file_run(p_pol_info, lmgr, p_param, p_summary);\n\n    /* record policy start time */\n    p_pol_info->progress.policy_start = time(NULL);\n\n    /* Do nothing if no previous scan was done\n     * (except if --force is specified). */\n    rc = check_scan_done(p_pol_info, lmgr);\n    if (rc)\n        return rc;\n\n    /* set attributes to be retrieved from DB */\n    attr_mask = db_attr_mask(p_pol_info, p_param);\n\n    /* sort by last access */\n    sort_type.attr_index = p_pol_info->config->lru_sort_attr;\n    sort_type.order = p_pol_info->config->lru_sort_order;\n\n    rc = lmgr_simple_filter_init(&filter);\n    if (rc)\n        return -1;\n\n    /* filter entries in the policy scope */\n    add_scope_filter(p_pol_info, &filter);\n\n    if (!p_pol_info->descr->manage_deleted) {\n        /* do not retrieve 'invalid' entries */\n        fval.value.val_bool = false;\n        rc = lmgr_simple_filter_add(&filter, ATTR_INDEX_invalid, EQUAL,\n                                    fval, FILTER_FLAG_ALLOW_NULL);\n        if (rc)\n            return -1;\n    }\n\n    /* set target filter and attr mask */\n    rc = set_target_filter(p_pol_info, p_param, &filter, &attr_mask);\n    if (rc)\n        return -1;\n\n    /* Flushing messages before performing the long DB sort query */\n    FlushLogs();\n\n    /* add optimisation filters based on policies */\n    if (!ignore_policies(p_pol_info)) {\n        /* set filters based on 'ignore' and 'ignore_fileclass' statements */\n        set_ignore_filters(p_pol_info, &filter);\n\n        /* Convert policy rules to filter */\n        set_rule_filters(p_pol_info, &filter);\n    }\n\n    set_optim_filter(p_pol_info, &filter);\n\n    /* Do not retrieve all entries at once, as the result may exceed\n     * the client memory! */\n    /* Except for SOFT_RM: we can't split the result as it has no md_update field. */\n    if (!p_pol_info->descr->manage_deleted)\n        opt.list_count_max = p_pol_info->config->db_request_limit;\n    nb_returned = 0;\n    total_returned = 0;\n\n    rc = iter_open(lmgr,\n                   p_pol_info->descr->manage_deleted ? IT_RMD : IT_LIST,\n                   &it, &filter, &sort_type, &opt);\n    if (rc != DB_SUCCESS) {\n        lmgr_simple_filter_free(&filter);\n        DisplayLog(LVL_CRIT, tag(p_pol_info),\n                   \"Error retrieving list of candidates from database. \"\n                   \"Policy run cancelled.\");\n        return -1;\n    }\n\n    /* Set last_report time after first DB query returns,\n     * makese sense to wait for first query to complete before\n     * printing any progress stat */\n    p_pol_info->progress.last_report = time(NULL);\n\n    /* reinit schedulers */\n    for (i = 0; i < p_pol_info->config->sched_count; i++) {\n        rc = sched_reinit(&p_pol_info->sched_res[i]);\n        if (rc) {\n            DisplayLog(LVL_CRIT, tag(p_pol_info),\n                       \"Failed to reinitialize scheduler #%d\", i);\n            if (p_summary)\n                *p_summary = p_pol_info->progress;\n            return rc;\n        }\n    }\n\n    /* execute pre_run_command before running the policy */\n    rc = execute_prepost_run_command(p_pol_info,\n                                     p_pol_info->config->pre_run_command,\n                                     \"pre\");\n    if (rc) {\n        DisplayLog(LVL_CRIT, tag(p_pol_info),\n                   \"Aborting policy run because pre_run_commmand failed\");\n        if (p_summary)\n            *p_summary = p_pol_info->progress;\n        return ECANCELED;\n    }\n\n    /* start alert batching in case the policy trigger alerts */\n    Alert_StartBatching();\n\n    /* loop on all policy passes */\n    do {\n        /* check if progress must be reported  */\n        report_progress(p_pol_info, NULL, NULL, NULL, NULL);\n\n        /* feed workers until the specified limit is reached or\n         * end of list is reached */\n        st = fill_workers_queue(p_pol_info, p_param, lmgr, &it, &opt,\n                                &sort_type, &filter, attr_mask,\n                                &last_sort_time, &nb_returned,\n                                &total_returned);\n        switch (st) {\n        case PASS_EOL:\n            rc = 0;\n            break;\n        case PASS_ABORTED:\n            rc = ECANCELED;\n            break;\n        case PASS_LIMIT:\n            rc = 0;\n            break;\n        case PASS_ERROR:\n            rc = -1;\n            break;\n        }\n\n    /* exit in all cases except pass_limit (double check the limit in this\n     * case): check the real amount of performed actions\n     * (progress.action_ctr) */\n    } while ((st == PASS_LIMIT) &&\n             !check_limit(p_pol_info, &p_pol_info->progress.action_ctr,\n                          p_pol_info->progress.errors,\n                          &p_param->target_ctr));\n\n    lmgr_simple_filter_free(&filter);\n    /* iterator may have been closed in fill_workers_queue() */\n    iter_close(&it);\n\n    /* flush pending alerts */\n    Alert_EndBatching();\n\n    if (p_summary)\n        *p_summary = p_pol_info->progress;\n\n    /* execute pre_run_command after running the policy */\n    execute_prepost_run_command(p_pol_info,\n                                p_pol_info->config->post_run_command,\n                                \"post\");\n\n    return rc;\n}\n\n/* If entries are accessed by FID, we can always get their status.\n* This is not the case for POSIX, because they may have moved.\n* In this case, the entry is tagged as 'invalid' in the DB\n* until we find it again during a next scan.\n*/\nstatic inline int invalidate_entry(const policy_info_t *pol, lmgr_t *lmgr,\n                                   entry_id_t *p_entry_id)\n{\n    attr_set_t new_attr_set = ATTR_SET_INIT;\n    int rc;\n\n    ATTR_MASK_INIT(&new_attr_set);\n    ATTR_MASK_SET(&new_attr_set, invalid);\n    ATTR(&new_attr_set, invalid) = true;\n\n    /* update the entry */\n    rc = ListMgr_Update(lmgr, p_entry_id, &new_attr_set);\n    if (rc)\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Error %d tagging entry as invalid in database.\", rc);\n    return rc;\n}\n\nstatic inline int update_entry(lmgr_t *lmgr, const entry_id_t *p_entry_id,\n                               const attr_set_t *p_attr_set)\n{\n    int rc;\n    attr_set_t tmp_attrset = *p_attr_set;\n\n    /* update classes according to new attributes */\n    match_classes(p_entry_id, &tmp_attrset, NULL);\n\n    /* /!\\ do not update stripe info */\n    /* @TODO actually, the best operation would be to update only\n     * attributes that changed */\n    ATTR_MASK_UNSET(&tmp_attrset, stripe_info);\n    ATTR_MASK_UNSET(&tmp_attrset, stripe_items);\n\n    /* also unset read only attrs */\n    attr_mask_unset_readonly(&tmp_attrset.attr_mask);\n\n    /* never update creation time */\n    ATTR_MASK_UNSET(&tmp_attrset, creation_time);\n\n    /* update DB and skip the entry */\n    rc = ListMgr_Update(lmgr, p_entry_id, &tmp_attrset);\n    if (rc)\n        DisplayLog(LVL_CRIT, TAG, \"Error %d updating entry in database.\",\n                   rc);\n\n    return rc;\n}\n\nstatic inline bool need_update(match_source_t check_method, uint32_t stdattr)\n{\n    return check_method == MS_FORCE_UPDT\n           || (check_method == MS_AUTO_ALL && stdattr != 0)\n           || (check_method == MS_AUTO_ATTRS &&\n                ((stdattr & ~(ATTR_MASK_fullpath | ATTR_MASK_name\n                              | ATTR_MASK_parent_id)) != 0));\n}\n\n/**\n * Returns the path to stat an entry (depending on posix/Lustre)\n * or NULL in case or error.\n * @param id        Points to the identifier of the entry.\n * @param id        Points entry attributes.\n * @param buffer    Buffer of RBH_PATH_MAX to store a path if needed.\n */\nstatic const char *check_stat_path(const entry_id_t *id,\n                                   const attr_set_t *attrs, char *buffer)\n{\n#ifdef _HAVE_FID\n    /* return path to <fs>/.lustre/fid/<fid> */\n    BuildFidPath(id, buffer);\n    return buffer;\n#else\n    /* Check if fullpath is set (if no fid support) */\n    if (!ATTR_MASK_TEST(attrs, fullpath))\n        return NULL;\n\n    return ATTR(attrs, fullpath);\n#endif\n}\n\n/** check that POSIX id is consistent.\n * @return true if it is not consistent.\n * */\nstatic bool check_posix_id(const entry_id_t *id, struct stat *md,\n                           const char *path)\n{\n#ifdef _HAVE_FID\n    return false; /* always OK */\n#else\n    /* Check entry id and fskey */\n    if ((md->st_ino != id->inode) || (get_fskey() != id->fs_key)) {\n        /* If it has changed, invalidate the entry (fullpath does not match\n         * entry_id, it will be updated or removed at next FS scan). */\n        DisplayLog(LVL_DEBUG, __func__,\n                   \"Inode of %s changed: old=<%llu,%llu>, \"\n                   \"new=<%llu,%llu>. Tagging it invalid.\",\n                   path,\n                   (unsigned long long)id->inode,\n                   (unsigned long long)id->fs_key,\n                   (unsigned long long)md->st_ino,\n                   (unsigned long long)get_fskey());\n        return true;\n    }\n    return false;\n#endif\n}\n\n/**\n* Check that entry still exists\n* @param new_attr_set   Updated entry MD if entry is valid.\n* @param check_method   Indicates what information is to be matched.\n*/\nstatic int check_entry(const policy_info_t *policy, lmgr_t *lmgr,\n                       queue_item_t *p_item, attr_set_t *new_attr_set,\n                       match_source_t check_method)\n{\n    char path_buff[RBH_PATH_MAX];\n    struct stat entry_md;\n    const char *stat_path;\n    int rc;\n    sm_instance_t *smi = policy->descr->status_mgr;\n    attr_mask_t    updt_mask = updt_attr_mask(policy);\n    bool           updated = false;\n\n    if (check_method == MS_NONE || check_method == MS_CACHE_ONLY)\n        return AS_OK;\n\n    DisplayLog(LVL_FULL, tag(policy), \"Updating info about \" DFID,\n               PFID(&p_item->entry_id));\n\n    /* 1) Build a path to access it */\n    stat_path = check_stat_path(&p_item->entry_id, &p_item->entry_attr,\n                                path_buff);\n    if (stat_path == NULL) {\n        DisplayLog(LVL_DEBUG, tag(policy),\n                   \"No path to access entry. Tagging it invalid.\");\n        /* File has been moved, might still be accessible with fid*/\n        invalidate_entry(policy, lmgr, &p_item->entry_id);\n        /* not enough metadata */\n        return AS_MISSING_MD;\n    }\n\n    /* creation time from DB has the priority on filesystem stat,\n     * whatever next operations */\n    if (ATTR_MASK_TEST(&p_item->entry_attr, creation_time)) {\n        ATTR_MASK_SET(new_attr_set, creation_time);\n        ATTR(new_attr_set, creation_time) =\n            ATTR(&p_item->entry_attr, creation_time);\n    }\n\n    /* lstat only if POSIX attrs are to be updated */\n    if (need_update(check_method, updt_mask.std & POSIX_ATTR_MASK)) {\n        DisplayLog(LVL_FULL, tag(policy), \"Updating POSIX info of \"DFID,\n                   PFID(&p_item->entry_id));\n        if (lstat(stat_path, &entry_md) != 0) {\n            rc = errno;\n            /* If lstat returns an error, skip the entry */\n            DisplayLog(LVL_DEBUG, tag(policy),\n                       \"lstat() failed on %s. Skipping it.\", stat_path);\n            invalidate_entry(policy, lmgr, &p_item->entry_id);\n\n            /* This entry has been processed and has probably removed */\n            if (rc == ENOENT)\n                return AS_MOVED;\n            else\n                return AS_STAT_FAILURE;\n        }\n\n        if (check_posix_id(&p_item->entry_id, &entry_md, stat_path)) {\n            /* If entry id is inconsistent, tag it invalid.\n             * it will be updated or removed at next FS scan). */\n            invalidate_entry(policy, lmgr, &p_item->entry_id);\n            /* This entry has been processed and has probably moved */\n            return AS_MOVED;\n        }\n\n        /* convert posix attributes to attr structure */\n        stat2rbh_attrs(&entry_md, new_attr_set, true);\n        updated = true;\n\n#ifdef _LUSTRE\n        if (global_config.lustre_projid) {\n            /* Lustre project id */\n            DisplayLog(LVL_FULL, tag(policy), \"Updating lustre projid of \"DFID,\n                       PFID(&p_item->entry_id));\n            rc = lustre_project_get_id(stat_path);\n            if (rc < 0)  {\n                DisplayLog(LVL_MAJOR, tag(policy),\n                           \"Failed to get lustre projid for %s: error %d\",\n                           stat_path, rc);\n            } else {\n                DisplayLog(LVL_FULL, tag(policy), \"Updating projid of \"DFID\": projid=%u\",\n                           PFID(&p_item->entry_id), rc);\n                ATTR_MASK_SET(new_attr_set, projid);\n                ATTR(new_attr_set, projid) = rc;\n            }\n        }\n#endif\n    }\n\n    /* get fullpath or name, if they are needed to apply the policy */\n    if (need_update(check_method, updt_mask.std &\n                        (ATTR_MASK_fullpath | ATTR_MASK_name))) {\n        DisplayLog(LVL_FULL, tag(policy), \"Updating path info of \"DFID,\n                   PFID(&p_item->entry_id));\n        switch (path_check_update(&p_item->entry_id, stat_path, new_attr_set,\n                                  updt_mask)) {\n        case PCR_UPDATED:\n            updated = true;\n            break;\n\n        case PCR_NO_CHANGE:\n            break;\n\n        case PCR_ORPHAN:\n            /* no path to access it, handle it as if it had been moved */\n            return AS_MOVED;\n        }\n    }\n\n    /* status matching must be done with up-to-date attrbutes,\n     * and missing attrs from DB */\n    ListMgr_MergeAttrSets(new_attr_set, &p_item->entry_attr, false);\n\n    /* retrieve up-to-date status from status manager if the scope relies\n     * on it */\n    if (smi != NULL && smi->sm->get_status_func != NULL\n        && need_update(check_method, updt_mask.status &\n                            SMI_MASK(smi->smi_index))) {\n        DisplayLog(LVL_FULL, tag(policy), \"Updating status info of \"DFID,\n                   PFID(&p_item->entry_id));\n        /* update entry status */\n        rc = smi->sm->get_status_func(smi, &p_item->entry_id, new_attr_set,\n                                      new_attr_set);\n        if (rc == -ENOTSUP) {\n            /* Entry is ignore for this policy: skipping it */\n            DisplayLog(LVL_DEBUG, tag(policy), \"Entry \"DFID\" ignored by %s \"\n                       \"status manager: skipping it.\", PFID(&p_item->entry_id),\n                       smi->sm->name);\n            return AS_BAD_TYPE;\n        } else if (rc != 0) {\n            DisplayLog(LVL_MAJOR, tag(policy),\n                       \"Failed to get status for \" DFID\n                       \" (%s status manager): error %d\",\n                       PFID(&p_item->entry_id), smi->sm->name, rc);\n            return AS_ERROR;\n        }\n        updated = true;\n    }\n\n    if (updated) {\n        /* generate virtual fields, if needed */\n        if (ListMgr_GenerateFields(new_attr_set, updt_mask))\n            DisplayLog(LVL_DEBUG, tag(policy),\n                       \"Failed to compute generated fields\");\n\n        /* set update time of the structure */\n        ATTR_MASK_SET(new_attr_set, md_update);\n        ATTR(new_attr_set, md_update) = time(NULL);\n    }\n\n    /* entry is valid */\n    return AS_OK;\n}\n\n/** check that time ordering did not change and that time attributes\n* are consistent. */\nstatic action_status_t check_entry_times(policy_info_t *pol, lmgr_t *lmgr,\n                                         const entry_id_t *p_id,\n                                         const attr_set_t *p_attrs_old,\n                                         const attr_set_t *p_attrs_new)\n{\n    if (pol->descr->manage_deleted) {\n        /* deleted entry: no new attrs */\n\n        /* if lru sort order is rmtime and rmtime is not set: missing MD */\n        if ((pol->config->lru_sort_attr == ATTR_INDEX_rm_time)\n            && !ATTR_MASK_TEST(p_attrs_old, rm_time)) {\n            /* cannot determine if sort criteria has changed */\n            DisplayLog(LVL_VERB, tag(pol),\n                       \"rm_time attribute is not set for deleted entry: skipping it\");\n            return AS_MISSING_MD;\n        }\n        return AS_OK;\n    }\n\n    /* If the policy application is ordered, make sure the value used for\n     * ordering did not change. If so, update the entry so it will be\n     * correctly ordered for the next pass. */\n    if (pol->config->lru_sort_attr != LRU_ATTR_NONE) {\n        int val1, val2;\n\n        val1 = get_sort_attr(pol, p_attrs_old);\n        val2 = get_sort_attr(pol, p_attrs_new);\n\n        if ((val1 == -1) || (val2 == -1)) {\n            /* cannot determine if sort criteria has changed */\n            DisplayLog(LVL_VERB, tag(pol),\n                       \"Cannot determine if sort criteria value\"\n                       \" changed (missing attribute '%s'): skipping entry.\",\n                       sort_attr_name(pol));\n            if (!pol->descr->manage_deleted)\n                update_entry(lmgr, p_id, p_attrs_new);\n            return AS_MISSING_MD;\n        } else if (val1 != val2) {\n            DisplayLog(LVL_DEBUG, tag(pol),\n                       \"%s has been accessed/modified since last md update. Skipping entry.\",\n                       ATTR(p_attrs_old, fullpath));\n            if (!pol->descr->manage_deleted)\n                update_entry(lmgr, p_id, p_attrs_new);\n            return AS_ACCESSED;\n        }\n\n        /* LRU on access/modification: size change detected? */\n        if ((pol->config->lru_sort_attr == ATTR_INDEX_last_access\n             || pol->config->lru_sort_attr == ATTR_INDEX_last_mod)\n            && ATTR_MASK_TEST(p_attrs_old, size)\n            && ATTR_MASK_TEST(p_attrs_new, size)\n            && (ATTR(p_attrs_old, size) != ATTR(p_attrs_new, size))) {\n            DisplayLog(LVL_DEBUG, tag(pol),\n                       \"%s has been modified since last md update (size changed). Skipping entry.\",\n                       ATTR(p_attrs_old, fullpath));\n            if (!pol->descr->manage_deleted)\n                update_entry(lmgr, p_id, p_attrs_new);\n            return AS_ACCESSED;\n        }\n    }\n    return AS_OK;\n}\n\n/** Display action success to log and report. */\nstatic void log_action_success(const policy_info_t *pol,\n                               const attr_set_t *attrs,\n                               const rule_item_t *rule,\n                               const fileset_item_t *fileset,\n                               int sort_time)\n{\n    GString *str = NULL;\n    GString *str_stripe = NULL;\n    char strsize[256];\n\n    /* display needed? */\n    if (log_config.debug_level < LVL_DEBUG && !pol->config->report_actions)\n        return;\n\n    str = g_string_new(NULL);\n    g_string_printf(str, \"%s success for '%s', matching rule '%s'\",\n                    tag(pol), ATTR(attrs, fullpath), rule->rule_id);\n\n    if (fileset)\n        g_string_append_printf(str, \" (fileset=%s)\", fileset->fileset_id);\n\n    if (pol->config->lru_sort_attr != LRU_ATTR_NONE) {\n        if (sort_time > 0) {\n            char strtime[256];\n\n            FormatDurationFloat(strtime, sizeof(strtime),\n                                time(NULL) - sort_time);\n            g_string_append_printf(str, \", %s %s ago\", sort_attr_name(pol),\n                                   strtime);\n        } else\n            g_string_append_printf(str, \", %s <none>\", sort_attr_name(pol));\n    }\n\n    FormatFileSize(strsize, sizeof(strsize), ATTR(attrs, size));\n\n#ifdef _LUSTRE\n    /* Only needed if trace level is DEBUG or if report_action\n     * is enabled */\n    if ((log_config.debug_level >= LVL_DEBUG || pol->config->report_actions)\n        && ATTR_MASK_TEST(attrs, stripe_items)) {\n        str_stripe = g_string_new(\"\");\n        append_stripe_list(str_stripe, &ATTR(attrs, stripe_items), false);\n    }\n#endif\n\n    DisplayLog(LVL_DEBUG, tag(pol), \"%s, size=%s%s%s\", str->str, strsize,\n               str_stripe ? \" stored on \" : \"\",\n               str_stripe ? str_stripe->str : \"\");\n\n    if (pol->config->report_actions) {\n        g_string_append_printf(str, \" | size=%\" PRI_SZ, ATTR(attrs, size));\n\n        if (pol->config->lru_sort_attr != LRU_ATTR_NONE)\n            g_string_append_printf(str, \", %s=%u\", sort_attr_name(pol),\n                                   sort_time);\n\n        if (str_stripe)\n            g_string_append_printf(str, \", stripes=%s\", str_stripe->str);\n\n        DisplayReport(\"%s\", str->str);\n    }\n    if (str_stripe != NULL)\n        g_string_free(str_stripe, TRUE);\n\n    g_string_free(str, TRUE);\n}\n\n/* acknowledging helper */\n#define policy_ack(_q, _status, _pattrs, _tgt)  do {                 \\\n            unsigned long long feedback[AF_ENUM_COUNT];              \\\n            memset(feedback, 0, sizeof(feedback));   \\\n            if ((_status) == AS_OK) {             \\\n                feedback[AF_NBR_OK] = 1;        \\\n                feedback[AF_VOL_OK] = ATTR_MASK_TEST(_pattrs, size) ? \\\n                                      ATTR(_pattrs, size) : 0;        \\\n                feedback[AF_TARGETED_OK] = _tgt;\\\n                feedback[AF_BLOCKS_OK] = ATTR_MASK_TEST(_pattrs, blocks) ? \\\n                                         ATTR(_pattrs, blocks) : 0; \\\n            } else {                            \\\n                feedback[AF_NBR_NOK] = 1;        \\\n                feedback[AF_VOL_NOK] = ATTR_MASK_TEST(_pattrs, size) ? \\\n                                       ATTR(_pattrs, size) : 0;        \\\n                feedback[AF_TARGETED_NOK] = _tgt;\\\n                feedback[AF_BLOCKS_NOK] = ATTR_MASK_TEST(_pattrs, blocks) ? \\\n                                          ATTR(_pattrs, blocks) : 0; \\\n            }                                   \\\n            Queue_Acknowledge(_q, _status, feedback, AF_ENUM_COUNT); \\\n       } while (0)\n\nstatic void free_entry_context(entry_context_t *ectx)\n{\n    rbh_params_free(&ectx->params);\n\n    ListMgr_FreeAttrs(&ectx->fresh_attrs);\n\n    if (ectx->free_item)\n        free_queue_item(ectx->item);\n\n    free(ectx);\n}\n\n/**\n * Finilize action processing after an action has been executed.\n * Update entry status in DB and release resources.\n */\nstatic void action_fini(int action_rc, lmgr_t *lmgr,\n                        entry_context_t *ectx)\n{\n    policy_info_t *pol = ectx->policy;\n    bool lastrm;\n    int  rc;\n\n    if (action_rc != 0) {\n        const char *err_str;\n\n        if (action_rc < 0)\n            err_str = strerror(-action_rc);\n        else\n            err_str = \"command execution failed\";\n\n        DisplayLog(LVL_DEBUG, tag(pol), \"Error applying action on entry %s: %s\",\n                   ATTR(&ectx->fresh_attrs, fullpath), err_str);\n\n        /* no update for deleted entries */\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n        policy_ack(&pol->queue, AS_ERROR, &ectx->item->entry_attr,\n                   ectx->item->targeted);\n        goto out_free;\n    }\n\n    log_action_success(pol, &ectx->prev_attrs, ectx->rule, ectx->fileset,\n                       ectx->time_save);\n\n    if (pol->descr->manage_deleted) {\n        if  (ectx->after_action == PA_RM_ONE\n             || ectx->after_action == PA_RM_ALL) {\n            rc = ListMgr_SoftRemove_Discard(lmgr, &ectx->item->entry_id);\n            if (rc)\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error %d removing entry from database.\", rc);\n        } /* else: ignore other actions about removed entries */\n    } else {\n        switch (ectx->after_action) {\n        case PA_NONE:\n            break;\n        case PA_UPDATE:\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n            break;\n\n        case PA_RM_ONE:\n            lastrm = ATTR_MASK_TEST(&ectx->prev_attrs, nlink) ?\n                     (ATTR(&ectx->prev_attrs, nlink) <= 1) : 0;\n\n            rc = ListMgr_Remove(lmgr, &ectx->item->entry_id,\n                /* must be based on the DB content = old attrs */\n                                &ectx->item->entry_attr, lastrm);\n            if (rc)\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error %d removing entry from database.\", rc);\n            break;\n\n        case PA_RM_ALL:\n            rc = ListMgr_Remove(lmgr, &ectx->item->entry_id,\n                 /* must be based on the DB content = old attrs */\n                                &ectx->item->entry_attr, 1);\n            if (rc)\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error %d removing entry from database.\", rc);\n            break;\n        }\n    }\n\n    /* TODO update targeted info */\n    policy_ack(&pol->queue, AS_OK, &ectx->fresh_attrs,\n               ectx->item->targeted);\n\nout_free:\n    free_entry_context(ectx);\n}\n\nstatic inline const char *match_source2str(match_source_t check_method)\n{\n    switch (check_method) {\n    case MS_NONE:\n        return \"none\";\n    case MS_CACHE_ONLY:\n        return \"cache_only\";\n    case MS_AUTO_ATTRS:\n        return \"auto_update_attrs\";\n    case MS_AUTO_ALL:\n        return \"auto_update_all\";\n    case MS_FORCE_UPDT:\n        return \"force_update\";\n    case MS_INVALID:\n        return \"?\";\n    }\n    return \"?\";\n}\n\n/**\n * Refresh entry attributes and match policy rules.\n */\nstatic int refresh_and_match_entry(lmgr_t *lmgr, entry_context_t *ectx,\n                                   match_source_t check_method)\n{\n    policy_match_t  match;\n    int             rc;\n    policy_info_t  *pol = ectx->policy;\n    const char     *path;\n\n    DisplayLog(LVL_FULL, tag(pol),\n               \"Checking if entry %s matches policy rules (mode=%s)\",\n               ATTR(&ectx->item->entry_attr, fullpath),\n               match_source2str(check_method));\n\n    if (!pol->descr->manage_deleted) {\n        rc = check_entry(pol, lmgr, ectx->item, &ectx->fresh_attrs,\n                         check_method);\n        if (rc != AS_OK)\n            return rc;\n    }\n    /* In any case, complete with missing attrs from database */\n    ListMgr_MergeAttrSets(&ectx->fresh_attrs, &ectx->item->entry_attr, false);\n    path = ATTR(&ectx->fresh_attrs, fullpath);\n\n#ifdef ATTR_INDEX_invalid\n    /* From here, assume that entry is valid */\n    ATTR_MASK_SET(&ectx->fresh_attrs, invalid);\n    ATTR(&ectx->fresh_attrs, invalid) = false;\n#endif\n\n    /* check the entry still matches the policy scope */\n    switch (match_scope(pol->descr, &ectx->item->entry_id, &ectx->fresh_attrs,\n                        !pol->descr->manage_deleted)) {\n    case POLICY_MATCH:\n        /* OK */\n        break;\n\n    case POLICY_NO_MATCH:\n        DisplayLog(LVL_DEBUG, tag(pol),\n                   \"Entry %s doesn't match scope of policy '%s'.\",\n                   path, tag(pol));\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n        return AS_OUT_OF_SCOPE;\n\n    default:\n        if (!pol->descr->manage_deleted) {\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"Warning: cannot determine if entry %s matches the \"\n                       \"scope of policy '%s': skipping it.\", path, tag(pol));\n\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n            return AS_MISSING_MD;\n        } else {\n            /* For deleted entries, we expect missing attributes.\n             * so, continue anyway. */\n            DisplayLog(LVL_DEBUG, tag(pol),\n                       \"Cannot determine if entry %s matches the \"\n                       \"scope of policy '%s'. Continuing anyway.\",\n                       path, tag(pol));\n        }\n    }\n\n    /* if ignore-policies flag is specified:\n     * - don't check rules\n     * - don't care about recent atime etc...\n     */\n    if (!ignore_policies(pol) && (check_method != MS_NONE)) {\n        /* 4) check whitelist rules */\n        match = is_whitelisted(pol->descr, &ectx->item->entry_id,\n                               &ectx->fresh_attrs, &ectx->fileset);\n\n        if (match == POLICY_MATCH) {\n            DisplayLog(LVL_DEBUG, tag(pol),\n                       \"Entry %s matches ignored target %s.\", path,\n                       ectx->fileset ? ectx->fileset->fileset_id :\n                           \"(ignore rule)\");\n\n            if (!pol->descr->manage_deleted)\n                update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n            return AS_WHITELISTED;\n        } else if (match != POLICY_NO_MATCH) {\n            /* Cannot determine if entry is whitelisted: skip it\n             * (do nothing in database) */\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"Warning: cannot determine if entry %s is whitelisted: \"\n                       \"skipping it.\", path);\n\n            if (!pol->descr->manage_deleted)\n                update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n            return AS_MISSING_MD;\n        }\n\n        /* check that time ordering did not change and that time attributes\n         * are consistent. */\n        rc = check_entry_times(pol, lmgr, &ectx->item->entry_id,\n                               &ectx->item->entry_attr, &ectx->fresh_attrs);\n        if (rc != AS_OK)\n            /* check_entry_times already updates the entry */\n            return rc;\n    } /* end if 'don't ignore policies' */\n\n    /* get policy rule for the entry */\n    ectx->rule = policy_case(pol->descr, &ectx->item->entry_id, &ectx->fresh_attrs,\n                             &ectx->fileset);\n    if (!ectx->rule) {\n        DisplayLog(LVL_DEBUG, tag(pol), \"Entry %s matches no policy rule\",\n                   path);\n\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n        return AS_NO_POLICY;\n    }\n\n    /* don't care about policy condition if 'ignore-policies' flag is\n     * specified */\n    if (ignore_policies(pol) || (check_method == MS_NONE))\n        return AS_OK;\n\n    /* check if the entry matches the policy condition */\n    match = entry_matches(&ectx->item->entry_id, &ectx->fresh_attrs,\n                          &ectx->rule->condition, pol->time_modifier,\n                          pol->descr->status_mgr);\n\n    switch (match) {\n    case POLICY_MATCH:\n        /* OK, entry matches */\n        DisplayLog(LVL_DEBUG, tag(pol),\n                   \"Entry %s matches the condition for policy rule '%s'.\",\n                   path, ectx->rule->rule_id);\n        break;\n\n    case POLICY_NO_MATCH:\n        /* entry is not eligible now */\n        DisplayLog(LVL_DEBUG, tag(pol),\n                   \"Entry %s doesn't match condition for policy rule '%s'\",\n                   path, ectx->rule->rule_id);\n\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n        return AS_WHITELISTED;\n\n    default:\n        /* Cannot determine if entry matches the policy condition */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Warning: cannot determine if entry %s matches the \"\n                   \"condition for policy rule '%s': skipping it.\",\n                   path, ectx->rule->rule_id);\n\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &ectx->item->entry_id, &ectx->fresh_attrs);\n\n        return AS_MISSING_MD;\n    }\n\n    return AS_OK;\n}\n\nstatic __thread lmgr_t *sched_db_conn = NULL;\n\nstatic int init_sched_thread_conn(void)\n{\n    int rc;\n\n    if (sched_db_conn != NULL)\n        return 0;\n\n    sched_db_conn = calloc(1, sizeof(*sched_db_conn));\n    if (!sched_db_conn)\n        return -ENOMEM;\n\n    rc = ListMgr_InitAccess(sched_db_conn);\n    if (rc)\n        DisplayLog(LVL_CRIT, __func__,\n                   \"Could not connect to database (error %d).\",\n                   rc);\n    else\n        DisplayLog(LVL_FULL, __func__, \"Initialized DB connection for \"\n                   \"thread %Lx\", (ull_t)pthread_self());\n\n    return rc;\n}\n\n/**\n * This function is called by schedulers, when then schedule\n * the processing of an entry, or skip them, etc...\n * In case multiple schedulers are defined, it passes\n * the entry from one scheduler to the next one.\n */\nstatic void run_sched_cb(void *udata, sched_status_e st)\n{\n    entry_context_t *ectx = udata;\n    policy_info_t *pol = ectx->policy;\n    int last_stop_sched = -1; /* not set */\n    int rc;\n    int i;\n\n    DisplayLog(LVL_DEBUG, tag(pol), \"Received callback from scheduler %d,\"\n               \" status = %d\", ectx->curr_sched, st);\n\n    init_sched_thread_conn();\n\n    rc = st;\n    if (rc == SCHED_OK) {\n        /* push to the next scheduler or run the action */\n        ectx->curr_sched++;\n\n        /* if this was the last scheduler, now run the action */\n        if (pol->config->sched_count == ectx->curr_sched) {\n            /* Final rule check before running the action */\n            rc = refresh_and_match_entry(sched_db_conn, ectx,\n                                         pol->config->post_sched_match);\n            if (rc != AS_OK) {\n                policy_ack(&pol->queue, rc, &ectx->fresh_attrs,\n                           ectx->item->targeted);\n                free_entry_context(ectx);\n                return;\n            }\n            rc = policy_action(ectx, pol->config->post_sched_match);\n            action_fini(rc, sched_db_conn, ectx);\n            return;\n        }\n        /* else, call the next scheduler */\n        rc = sched_push(&pol->sched_res[ectx->curr_sched],\n                        &ectx->item->entry_id, &ectx->fresh_attrs,\n                        run_sched_cb, ectx);\n    }\n\n    switch (rc) {\n    case SCHED_OK:\n        /* OK, submitted to scheduler: nothing more to do here */\n        return;\n\n    case SCHED_SKIP_ENTRY:\n        /* no particular processing */\n        break;\n\n    case SCHED_STOP_RUN:\n        pol->stopping = true;\n        /* kill previous schedulers only */\n        last_stop_sched = ectx->curr_sched - 1;\n        break;\n\n    case SCHED_KILL_RUN:\n        pol->stopping = true;\n        /* kill all schedulers */\n        last_stop_sched = pol->config->sched_count - 1;\n        break;\n\n    case SCHED_DELAY:\n        /* The scheduling layer is not supposed to return that to the policy\n         * run. These return codes are supposed to be handled in policy_sched.c\n         */\n        RBH_BUG(\"Scheduling layer is not supposed to return DELAY\");\n        break;\n    }\n\n    /* finalize current entry processing */\n    if (!pol->descr->manage_deleted)\n        update_entry(sched_db_conn, &ectx->item->entry_id,\n                     &ectx->fresh_attrs);\n    policy_ack(&pol->queue, AS_NOT_SCHEDULED, &ectx->item->entry_attr,\n               ectx->item->targeted);\n\n    free_entry_context(ectx);\n\n    /* nothing more to do */\n    if (last_stop_sched < 0)\n        return;\n\n    DisplayLog(LVL_DEBUG, tag(pol),\n               \"Flushing schedulers up to #%d\\n\", last_stop_sched);\n    for (i = 0 ; i <= last_stop_sched; i++) {\n        rc = sched_flush(&pol->sched_res[i]);\n        if (rc) {\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"Failed to flush queues of scheduler #%d (error %d)\",\n                       i, rc);\n        }\n    }\n}\n\nstatic void update_first_eligible(policy_info_t *pol, int val)\n{\n    if ((!pol->first_eligible) ||\n        (pol->config->lru_sort_order == SORT_ASC\n         && val < pol->first_eligible) ||\n        (pol->config->lru_sort_order == SORT_DESC\n         && val > pol->first_eligible))\n        pol->first_eligible = val;\n}\n\n/**\n* Manage an entry by path or by fid, depending on FS\n*/\nstatic void process_entry(policy_info_t *pol, lmgr_t *lmgr,\n                          queue_item_t *p_item, bool free_item)\n{\n    entry_context_t  *ectx;\n    int               rc;\n    match_source_t    check_method;\n\n    ectx = calloc(1, sizeof(entry_context_t));\n    if (!ectx) {\n        rc = AS_ERROR;\n        goto out_free;\n    }\n\n    ectx->policy = pol;\n    ectx->item = p_item;\n    ectx->free_item = free_item;\n\n    if (aborted(pol) || stopping(pol)) {\n        /* policy run has to stop, doesn't submit new migrations */\n        DisplayLog(LVL_FULL, tag(pol),\n                   \"Policy run %s: skipping pending requests\",\n                   pol->aborted ? \"aborted\" : \"stopping\");\n        policy_ack(&pol->queue, pol->aborted ? AS_ABORT : AS_NOT_SCHEDULED,\n                   &p_item->entry_attr, p_item->targeted);\n        rc = AS_ABORT;\n        goto out_free;\n    }\n\n    /* If there are schedulers, this is a prematching.\n     * Else, use the highest check level between pre/post-matching */\n    if (pol->config->sched_count > 0)\n        check_method = pol->config->pre_sched_match;\n    else\n        check_method = MAX(pol->config->pre_sched_match,\n                           pol->config->post_sched_match);\n\n    /* Refresh entry info and match policy rules.\n     * This is a precheck if there are schedulers */\n    rc = refresh_and_match_entry(lmgr, ectx, check_method);\n    if (rc != AS_OK) {\n        policy_ack(&pol->queue, rc, &p_item->entry_attr,\n                   p_item->targeted);\n        goto out_free;\n    }\n\n    /* it is the first matching entry? */\n    rc = get_sort_attr(pol, &p_item->entry_attr);\n    if (rc != -1)\n        update_first_eligible(pol, rc);\n\n    ectx->time_save = rc;\n\n    /* build action parameters */\n    rc = build_action_params(ectx);\n    if (rc) {\n        if (!pol->descr->manage_deleted)\n            update_entry(lmgr, &p_item->entry_id, &ectx->fresh_attrs);\n\n        policy_ack(&pol->queue, AS_ERROR, &p_item->entry_attr,\n                   p_item->targeted);\n        goto out_free;\n    }\n\n    /* save attributes before doing the action */\n    /* @FIXME this only save scalar value, not values in allocated structures */\n    ectx->prev_attrs = ectx->fresh_attrs;\n\n    /* if there is no scheduler, run the action directly */\n    if (pol->config->sched_count == 0) {\n\n        /* apply action to the entry! */\n        rc = policy_action(ectx, check_method);\n\n        action_fini(rc, lmgr, ectx);\n        return;\n    }\n\n    /* push entry to the first scheduler of the stack */\n    ectx->curr_sched = 0;\n    rc = sched_push(pol->sched_res, &ectx->item->entry_id, &ectx->fresh_attrs,\n                    run_sched_cb, ectx);\n    if (rc)\n        run_sched_cb(ectx, rc);\n    return;\n\nout_free:\n    free_entry_context(ectx);\n}\n\n/**\n*  Main routine of policy thread\n*/\nstatic void *thr_policy_run(void *arg)\n{\n    int rc;\n    lmgr_t lmgr;\n    void *p_queue_entry;\n    policy_info_t *pol = (policy_info_t *) arg;\n\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Could not connect to database (error %d). Exiting.\",\n                   rc);\n        exit(rc);\n    }\n\n    while (Queue_Get(&pol->queue, &p_queue_entry) == 0)\n        process_entry(pol, &lmgr, (queue_item_t *) p_queue_entry, true);\n\n    /* Error occurred in queue management... */\n    DisplayLog(LVL_CRIT, tag(pol),\n               \"An error occurred in policy run queue management. Exiting.\");\n    exit(-1);\n    return NULL;    /* for avoiding compiler warnings */\n}\n\nint start_worker_threads(policy_info_t *pol)\n{\n    unsigned int i;\n\n    pol->threads = (pthread_t *) MemCalloc(pol->config->nb_threads,\n                                           sizeof(pthread_t));\n    if (!pol->threads) {\n        DisplayLog(LVL_CRIT, tag(pol), \"Memory error in %s\", __func__);\n        return ENOMEM;\n    }\n\n    for (i = 0; i < pol->config->nb_threads; i++) {\n        if (pthread_create(&pol->threads[i], NULL, thr_policy_run, pol) !=\n            0) {\n            int rc = errno;\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Error %d creating policy threads in %s: %s\", rc,\n                       __func__, strerror(rc));\n            return rc;\n        }\n    }\n    return 0;\n}\n\n/**\n* Update the status of outstanding actions\n* \\param lmgr          [IN] connexion to database\n* \\param p_nb_reset    [OUT] number of actions reset\n* \\param p_nb_total    [OUT] total number of actions checked\n*\n* Note:   the timeout is in pol->config\n*/\nint check_current_actions(policy_info_t *pol, lmgr_t *lmgr,\n                          unsigned int *p_nb_reset,\n                          unsigned int *p_nb_total)\n{\n    int rc;\n    struct lmgr_iterator_t *it = NULL;\n\n    lmgr_filter_t filter;\n    filter_value_t fval;\n\n    queue_item_t q_item;\n\n    unsigned int nb_returned = 0;\n    unsigned int nb_aborted = 0;\n    attr_mask_t attr_mask_sav = { 0 };\n    attr_mask_t tmp;\n\n    /* do nothing if this policy applies to deleted entries */\n    if (pol->descr->manage_deleted)\n        return 0;\n\n    /* attributes to be retrieved */\n    ATTR_MASK_INIT(&q_item.entry_attr);\n    ATTR_MASK_SET(&q_item.entry_attr, fullpath);\n    ATTR_MASK_SET(&q_item.entry_attr, path_update);\n\n    /* Add attrs to match policy scope */\n    q_item.entry_attr.attr_mask = attr_mask_or(&q_item.entry_attr.attr_mask,\n                                               &pol->descr->scope_mask);\n\n    /* needed attributes from DB */\n    tmp = attrs_for_status_mask(q_item.entry_attr.attr_mask.status, false);\n    q_item.entry_attr.attr_mask =\n        attr_mask_or(&q_item.entry_attr.attr_mask, &tmp);\n\n    attr_mask_sav = q_item.entry_attr.attr_mask;\n\n    rc = lmgr_simple_filter_init(&filter);\n    if (rc)\n        return rc;\n\n    /* if timeout is > 0, only select entries whose last update\n     * is old enough (last_update <= now - timeout) or NULL*/\n    if (pol->config->action_timeout > 0) {\n        fval.value.val_int = time(NULL) - pol->config->action_timeout;\n        rc = lmgr_simple_filter_add(&filter, ATTR_INDEX_md_update, LESSTHAN,\n                                    fval, FILTER_FLAG_ALLOW_NULL);\n        if (rc)\n            return rc;\n    }\n\n    /* filter by status of current actions */\n    fval.value.val_str = pol->descr->status_current;\n    rc = lmgr_simple_filter_add(&filter,\n                                smi_status_index(pol->descr->status_mgr),\n                                EQUAL, fval, 0);\n    if (rc)\n        return rc;\n\n#ifdef ATTR_INDEX_invalid\n    /* don't retrieve invalid entries (allow entries with invalid == NULL) */\n    fval.value.val_int = true;\n    rc = lmgr_simple_filter_add(&filter, ATTR_INDEX_invalid, NOTEQUAL, fval,\n                                FILTER_FLAG_ALLOW_NULL);\n    if (rc)\n        return rc;\n#endif\n\n    it = ListMgr_Iterator(lmgr, &filter, NULL, NULL);\n\n    if (it == NULL) {\n        lmgr_simple_filter_free(&filter);\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Error retrieving the list of current actions.\"\n                   \"Recovery cancelled.\");\n        return -1;\n    }\n\n    memset(&q_item, 0, sizeof(queue_item_t));\n    q_item.entry_attr.attr_mask = attr_mask_sav;\n\n    while ((rc = ListMgr_GetNext(it, &q_item.entry_id, &q_item.entry_attr))\n           == DB_SUCCESS) {\n        nb_returned++;\n\n        if (ATTR_MASK_TEST(&q_item.entry_attr, fullpath))\n            DisplayLog(LVL_VERB, tag(pol), \"Updating status of '%s'...\",\n                       ATTR(&q_item.entry_attr, fullpath));\n\n        /* check entry (force retrieving fresh attributes) */\n        if (check_entry(pol, lmgr, &q_item, &q_item.entry_attr, MS_FORCE_UPDT)\n            == AS_OK) {\n            int smi_index = pol->descr->status_mgr->smi_index;\n\n            if (ATTR_MASK_STATUS_TEST(&q_item.entry_attr, smi_index)) {\n                if (strcmp(STATUS_ATTR(&q_item.entry_attr, smi_index),\n                           pol->descr->status_current)) {\n                    DisplayLog(LVL_EVENT, tag(pol),\n                               \"status of '%s' changed: now '%s'\",\n                               ATTR(&q_item.entry_attr, fullpath),\n                               STATUS_ATTR(&q_item.entry_attr, smi_index));\n                    nb_aborted++;\n                } else\n                    DisplayLog(LVL_EVENT, tag(pol),\n                               \"status of '%s' is still '%s'\",\n                               ATTR(&q_item.entry_attr, fullpath),\n                               STATUS_ATTR(&q_item.entry_attr, smi_index));\n            }\n\n            /* update entry status */\n            update_entry(lmgr, &q_item.entry_id, &q_item.entry_attr);\n        }\n\n        /* reset attr_mask, if it was altered by last ListMgr_GetNext() call */\n        memset(&q_item, 0, sizeof(queue_item_t));\n        q_item.entry_attr.attr_mask = attr_mask_sav;\n    }\n\n    lmgr_simple_filter_free(&filter);\n    ListMgr_CloseIterator(it);\n\n    if (p_nb_total)\n        *p_nb_total = nb_returned;\n\n    if (p_nb_reset)\n        *p_nb_reset = nb_aborted;\n\n    /* check rc */\n    if (rc != DB_END_OF_LIST) {\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Error %d getting next entry of iterator\", rc);\n        return -1;\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "src/policies/policy_run_cfg.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg_helpers.h\"\n#include \"run_policies.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"rbh_modules.h\"\n#include <errno.h>\n#include <ctype.h>\n\n#define PARAM_SUFFIX   \"_parameters\"\n#define TRIGGER_SUFFIX       \"_trigger\"\n\n#define TAG       \"PolicyRunCfg\"\n\n/* contains all run configs */\npolicy_run_config_list_t run_cfgs = { NULL, 0 };\n\nstatic int polrun_set_default(const policy_descr_t *pol,\n                              policy_run_config_t *cfg)\n{\n    memset(cfg, 0, sizeof(*cfg));\n\n    cfg->nb_threads = 4;\n    cfg->queue_size = 4096;\n    cfg->db_request_limit = 100000;\n    cfg->max_action_nbr = 0;    /* unlimited */\n    cfg->max_action_vol = 0;    /* unlimited */\n\n    cfg->trigger_list = NULL;\n    cfg->trigger_count = 0;\n\n    cfg->report_interval = 10 * 60; /* 10 min */\n\n    cfg->pre_maintenance_window = 0;    /* disabled */\n    cfg->maint_min_apply_delay = 30 * 60;   /* 30 min */\n\n    cfg->suspend_error_pct = 0.0;   /* disabled */\n    cfg->suspend_error_min = 0; /* disabled */\n\n    /* attr index of the sort order (e.g. last_mod, creation_time, ...) */\n    cfg->lru_sort_attr = pol->default_lru_sort_attr;\n    cfg->lru_sort_order = pol->default_lru_sort_order;\n\n    cfg->action = pol->default_action;\n    cfg->action_params.param_set = NULL;\n    cfg->run_attr_mask = null_mask;\n\n    cfg->check_action_status_on_startup = false;\n    cfg->check_action_status_delay = 0; /* no check */\n\n    cfg->action_timeout = 2 * 3600; /* 2h */\n\n    cfg->recheck_ignored_entries = false;\n    cfg->report_actions = true;\n\n    cfg->reschedule_delay_ms = 100; /* 100 ms */\n\n    cfg->sched_count = 0;\n    cfg->schedulers = NULL;\n    cfg->sched_cfg = NULL;\n\n    /* precheck with cached info only */\n    cfg->pre_sched_match = MS_CACHE_ONLY;\n    /* final check using required info (from cache+FS).\n     * Use path from cache */\n    cfg->post_sched_match = MS_AUTO_ATTRS;\n\n    cfg->pre_run_command = NULL;\n    cfg->post_run_command = NULL;\n\n    return 0;\n}\n\nstatic void policy_run_cfg_set_default(void *module_config)\n{\n    int i;\n    policy_run_config_list_t *cfg = (policy_run_config_list_t *) module_config;\n\n    for (i = 0; i < cfg->count; i++)\n        polrun_set_default(&policies.policy_list[i], &cfg->configs[i]);\n}\n\nstatic void *policy_run_cfg_new(void)\n{\n    policy_run_config_list_t *cfg;\n\n    cfg = calloc(1, sizeof(policy_run_config_list_t));\n    if (cfg == NULL)\n        return NULL;\n\n    /* safe because policies configuration is always parsed before\n     * policy run config */\n    cfg->count = policies.policy_count;\n    cfg->configs =\n        (policy_run_config_t *) calloc(cfg->count, sizeof(policy_run_config_t));\n    if (cfg->configs == NULL)\n        return NULL;\n\n    return cfg;\n}\n\nstatic void policy_run_cfg_write_default(FILE *output)\n{\n    print_begin_block(output, 0, \"<policy>\" PARAM_SUFFIX, NULL);\n    print_line(output, 1,\n               \"lru_sort_attr           : default_lru_sort_attr (from 'define_policy' block)\");\n    print_line(output, 1, \"schedulers              : none\");\n    print_line(output, 1, \"max_action_count        : 0 (unlimited)\");\n    print_line(output, 1, \"max_action_volume       : 0 (unlimited)\");\n    print_line(output, 1, \"pre_run_command         : none\");\n    print_line(output, 1, \"post_run_command        : none\");\n    print_line(output, 1, \"suspend_error_pct       : disabled (0)\");\n    print_line(output, 1, \"suspend_error_min       : disabled (0)\");\n    print_line(output, 1, \"report_interval         : 10min\");\n    print_line(output, 1, \"check_actions_on_startup: no\");\n    print_line(output, 1, \"check_actions_interval  : 0 (disabled)\");\n    print_line(output, 1, \"action_timeout          : 2h\");\n    print_line(output, 1, \"recheck_ignored_entries : no\");\n    print_line(output, 1, \"report_actions          : yes\");\n    print_line(output, 1, \"nb_threads              : 4\");\n    print_line(output, 1, \"reschedule_delay_ms     : 100\");\n    print_line(output, 1, \"queue_size              : 4096\");\n    print_line(output, 1, \"db_result_size_max      : 100000\");\n    print_line(output, 1, \"pre_maintenance_window  : 0 (disabled)\");\n    print_line(output, 1, \"maint_min_apply_delay   : 30min\");\n    print_line(output, 1, \"pre_sched_match         : cache_only\");\n    print_line(output, 1, \"post_sched_match        : auto_update\");\n    print_end_block(output, 0);\n    fprintf(output, \"\\n\");\n}\n\nstatic void policy_run_cfg_write_template(FILE *output)\n{\n    print_line(output, 0, \"#<policy>\" PARAM_SUFFIX \" {\");\n    print_line(output, 1, \"# sort order for applying the policy (overrides \");\n    print_line(output, 1, \"# default_lru_sort_attr from policy definition)\");\n    print_line(output, 1, \"#lru_sort_attr = last_access ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# maximum number of actions per policy run (default: no limit)\");\n    print_line(output, 1, \"#max_action_count = 100000 ;\");\n    print_line(output, 1,\n               \"# maximum volume of processed files per policy run (default: no limit)\");\n    print_line(output, 1, \"#max_action_volume = 10TB ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# nbr of threads to execute policy actions\");\n    print_line(output, 1, \"#nb_threads = 8;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# suspend current run if 50%% of actions fail (after 100 errors):\");\n    print_line(output, 1, \"#suspend_error_pct = 50%% ;\");\n    print_line(output, 1, \"#suspend_error_min = 100 ;\");\n    print_line(output, 1, \"# interval to report policy run progress:\");\n    print_line(output, 1, \"#report_interval = 10min;\");\n    print_line(output, 1, \"# cancel an action after a given time:\");\n    print_line(output, 1, \"#action_timeout = 2h;\");\n    print_line(output, 1, \"# interval to check the status of started actions\");\n    print_line(output, 1, \"#check_actions_interval = 30min;\");\n    print_line(output, 1,\n               \"# check the status of previously started actions on startup:\");\n    print_line(output, 1, \"#check_actions_on_startup = yes;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1,\n               \"# When applying policies, recheck entries that were previously\");\n    print_line(output, 1,\n               \"# ignored. Enable it after changing fileclass definitions,\");\n    print_line(output, 1, \"# or if entries move from one class to another.\");\n    print_line(output, 1,\n               \"# This can significantly slow down policy application.\");\n    print_line(output, 1, \"#recheck_ignored_entries = no;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# report actions to report log file?\");\n    print_line(output, 1, \"# report_actions = yes;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# pre-maintenance feature parameters\");\n    print_line(output, 1, \"# 0 to disable this feature\");\n    print_line(output, 1, \"#pre_maintenance_window = 24h;\");\n    print_line(output, 1, \"#maint_min_apply_delay = 30min;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# delay for rescheduling a delayed entry\");\n    print_line(output, 1, \"#reschedule_delay_ms = 100;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Command to execute before each run:\");\n    print_line(output, 1, \"# pre_run_command = \\\"/path/to/script.sh -f {cfg} \"\n                          \"-p {fspath}\\\" ;\");\n    print_line(output, 1, \"# Command to execute after each run:\");\n    print_line(output, 1, \"# post_run_command = \\\"/path/to/script.sh -f {cfg} \"\n                          \"-p {fspath}\\\" ;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# internal/tuning parameters\");\n    print_line(output, 1, \"#queue_size = 4096;\");\n    print_line(output, 1, \"#db_result_size_max = 100000;\");\n    fprintf(output, \"\\n\");\n    print_line(output, 1, \"# Indicate what attributes are used to match policy rules\");\n    print_line(output, 1, \"# before the scheduling step.\");\n    print_line(output, 1, \"# Possible values are 'none' (no rule check), 'cache_only', \");\n    print_line(output, 1, \"# 'auto_update', 'force_update'\");\n    print_line(output, 1, \"#pre_sched_match = cache_only;\");\n    print_line(output, 1, \"# Same as previous parameter, for final check before running\");\n    print_line(output, 1, \"# the policy action\");\n    print_line(output, 1, \"#post_sched_match = auto_update;\");\n    print_line(output, 0, \"#}\");\n    fprintf(output, \"\\n\");\n\n    /* @TODO Add template triggers */\n}\n\n# if 0  // @TODO RBHv3: write template triggers\n\nfprintf(output, \"\\n\");\n#ifdef _LUSTRE\nprint_line(output, 0, \"# Trigger purge on individual OST usage\");\nprint_begin_block(output, 0, TRIGGER_BLOCK, NULL);\nprint_line(output, 1, \"trigger_on         = OST_usage ;\");\nprint_line(output, 1, \"high_threshold_pct = 85%% ;\");\nprint_line(output, 1, \"low_threshold_pct  = 80%% ;\");\nprint_line(output, 1, \"check_interval     = 5min ;\");\nprint_end_block(output, 0);\nfprintf(output, \"\\n\");\n#endif\n\nprint_line(output, 0, \"# Trigger purge on filesystem usage\");\nprint_begin_block(output, 0, TRIGGER_BLOCK, NULL);\nprint_line(output, 1, \"trigger_on         = global_usage ;\");\nprint_line(output, 1, \"high_threshold_pct = 90%% ;\");\nprint_line(output, 1, \"low_threshold_pct  = 85%% ;\");\nprint_line(output, 1, \"check_interval     = 5min ;\");\nprint_line(output, 1, \"# raise an alert when the high threshold is reached\");\nprint_line(output, 1, \"alert_high         = yes ;\");\nprint_line(output, 1, \"# raise an alert if not enough data can be purged\");\nprint_line(output, 1, \"# to reach the low threshold\");\nprint_line(output, 1, \"alert_low          = yes ;\");\nprint_end_block(output, 0);\n\nfprintf(output, \"\\n\");\n\n/** @TODO not implemented yet */\n\n#if 0\n#ifdef _LUSTRE\nprint_line(output, 1, \"# Trigger purge on pool usage\");\nprint_begin_block(output, 1, TRIGGER_BLOCK);\nprint_line(output, 2, \"trigger_on         = pool_usage(pool1,pool2) ;\");\nprint_line(output, 2, \"high_threshold_pct = 85%% ;\");\nprint_line(output, 2, \"low_threshold_pct  = 80%% ;\");\nprint_line(output, 2, \"check_interval     = 5min ;\");\nprint_end_block(output, 1);\nfprintf(output, \"\\n\");\n#endif\n\n#endif\n\nprint_line(output, 0, \"# Trigger purge of charlie's or foo's files\");\nprint_line(output, 0, \"# if they use more than a TB (check twice a day)\");\nprint_begin_block(output, 0, TRIGGER_BLOCK, NULL);\nprint_line(output, 1, \"trigger_on         = user_usage(charlie,foo) ;\");\nprint_line(output, 1, \"high_threshold_vol = 1TB ;\");\nprint_line(output, 1, \"low_threshold_vol  = 950GB ;\");\nprint_line(output, 1, \"check_interval     = 12h ;\");\nprint_line(output, 1, \"# send an alert when the quota is reached\");\nprint_line(output, 1, \"alert_high         = yes ;\");\nprint_end_block(output, 0);\n\n#endif\n\nstruct trig_target_def {\n    const char *name;\n    trigger_type_t type;\n    policy_target_t target;\n    bool allow_args;\n};\n\n/** list of possible triggers and their caracteristics */\nstatic const struct trig_target_def target_defs[] = {\n    /* periodic and scheduled are synonymes */\n    {\"periodic\", TRIG_ALWAYS, TGT_FS, false},\n    {\"scheduled\", TRIG_ALWAYS, TGT_FS, false},\n\n    {\"global_usage\", TRIG_CONDITION, TGT_FS, false},\n    {\"user_usage\", TRIG_CONDITION, TGT_USER, true},\n    {\"group_usage\", TRIG_CONDITION, TGT_GROUP, true},\n#ifdef _LUSTRE\n    {\"ost_usage\", TRIG_CONDITION, TGT_OST, false},\n    {\"pool_usage\", TRIG_CONDITION, TGT_POOL, true},\n#endif\n\n    {NULL, 0, 0, false} /* terminal element */\n};\n\nstatic const struct trig_target_def *str2trigger_def(const char *str)\n{\n    const struct trig_target_def *def;\n\n    for (def = target_defs; def->name != NULL; def++) {\n        if (!strcasecmp(def->name, str))\n            return def;\n    }\n    return NULL;\n}\n\n/** fills the target fields of a trigger item */\nstatic int set_trigger_target(trigger_item_t *p_trigger,\n                              const struct trig_target_def *def,\n                              char **args, unsigned int arg_count,\n                              char *msg_out)\n{\n    int i;\n\n    p_trigger->trigger_type = def->type;\n    p_trigger->target_type = def->target;\n\n    /* default: alert enabled if LW cannot be reached.\n     * No thresholds for TRIG_ALWAYS. */\n    p_trigger->alert_lw = ((def->type == TRIG_ALWAYS) ? false : true);\n\n    /* optional arguments: target list (user list, group list, pool list) */\n    if (arg_count > 0) {\n        if (!def->allow_args) {\n            sprintf(msg_out, \"No extra argument expected for trigger type\"\n                    \" '%s': %u argument(s) found.\", def->name, arg_count);\n            return EINVAL;\n        }\n\n        p_trigger->list = (char **)calloc(arg_count, sizeof(char *));\n        p_trigger->list_size = arg_count;\n        for (i = 0; i < arg_count; i++) {\n            p_trigger->list[i] = strdup(args[i]);\n            if (p_trigger->list[i] == NULL) {\n                sprintf(msg_out, \"Failed to allocate memory: %s\",\n                        strerror(errno));\n                return ENOMEM;\n            }\n        }\n    }\n    return 0;\n}\n\nstatic inline const char *mk_threshold_param(const char *prefix,\n                                             const char *suffix, char *buff)\n{\n    sprintf(buff, \"%s_threshold_%s\", prefix, suffix);\n    return buff;\n}\n\n/** read thresholds params, check their consistency and fills the trigger item.\n *  @param[in] prefix \"high\" or \"low\"\n *  @param[in] p_trigger to check compatibility with trigger type and target.\n */\nstatic int read_threshold_params(config_item_t config_blk,\n                                 const char *block_name, const char *prefix,\n                                 const trigger_item_t *p_trigger,\n                                 trigger_value_type_t *type, threshold_u *val,\n                                 char *msg_out)\n{\n    unsigned int cnt = 0;\n    uint64_t tmpval;\n    int rc;\n    char buff[128]; /* oversized for param name */\n\n    rc = GetFloatParam(config_blk, block_name,\n                       mk_threshold_param(prefix, \"pct\", buff),\n                       PFLG_POSITIVE | PFLG_ALLOW_PCT_SIGN, &val->percent,\n                       NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))    /* real error */\n        return rc;\n    if (rc == 0) {\n        *type = PCT_THRESHOLD;\n        cnt++;\n    }\n\n    rc = GetSizeParam(config_blk, block_name,\n                      mk_threshold_param(prefix, \"vol\", buff),\n                      PFLG_POSITIVE, &val->volume, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc == 0) {\n        *type = VOL_THRESHOLD;\n        cnt++;\n    }\n\n    rc = GetFloatParam(config_blk, block_name,\n                       mk_threshold_param(prefix, \"cntpct\", buff),\n                       PFLG_POSITIVE | PFLG_ALLOW_PCT_SIGN, &val->percent,\n                       NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))    /* real error */\n        return rc;\n    if (rc == 0) {\n        *type = CNTPCT_THRESHOLD;\n        cnt++;\n    }\n\n    rc = GetInt64Param(config_blk, block_name,\n                       mk_threshold_param(prefix, \"cnt\", buff),\n                       PFLG_POSITIVE, &tmpval, NULL, NULL, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc == 0) {\n        *type = COUNT_THRESHOLD;\n        /* unsigned long long to uint64_t */\n        val->count = (unsigned long long)tmpval;\n        cnt++;\n    }\n\n    /* check params consistency */\n    if (p_trigger->trigger_type == TRIG_ALWAYS) {\n        if (cnt > 0) {\n            /* in case of 'periodic' triggers, no thresholds are expected */\n            strcpy(msg_out,\n                   \"No high/low threshold expected for 'periodic' trigger\");\n            return EINVAL;\n        }\n        /* no extra check needed */\n        return 0;\n    }\n\n    if (cnt > 1) {\n        sprintf(msg_out, \"Multiple %s_threshold parameters in trigger\", prefix);\n        return EINVAL;\n    }\n\n    if (cnt == 0) {\n        sprintf(msg_out, \"No %s_threshold found in trigger (mandatory): \"\n                \" '%s_threshold_pct', '%s_threshold_vol', '%s_threshold_cntpct\"\n                \"or '%s_threshold_cnt' expected\", prefix, prefix,\n                prefix, prefix, prefix);\n        return ENOENT;\n    }\n\n    return 0;\n}\n\n/** parse a trigger block from configuration and fills a trigger item */\nstatic int parse_trigger_block(config_item_t config_blk, const char *block_name,\n                               trigger_item_t *p_trigger_item, char *msg_out)\n{\n    int rc;\n    char tmpstr[1024];\n    char **arg_tab;\n    unsigned int arg_count;\n    config_item_t params_block;\n    bool unique;\n\n    const struct trig_target_def *def;\n\n    static const char *trigger_expect[] = {\n        \"trigger_on\", \"check_interval\",\n        \"high_threshold_pct\", \"low_threshold_pct\",\n        \"high_threshold_vol\", \"low_threshold_vol\",\n        \"high_threshold_cnt\", \"low_threshold_cnt\",\n        \"high_threshold_cntpct\", \"low_threshold_cntpct\",\n        \"alert_high\", \"alert_low\", \"post_trigger_wait\",\n        \"action_params\", \"max_action_count\", \"max_action_volume\",\n        NULL\n    };\n\n    const cfg_param_t cfg_params[] = {\n        {\"max_action_count\", PT_INT, PFLG_POSITIVE,\n         &p_trigger_item->max_action_nbr, 0},\n        {\"max_action_volume\", PT_SIZE, PFLG_POSITIVE,\n         &p_trigger_item->max_action_vol, 0},\n        {\"check_interval\", PT_DURATION,\n         PFLG_POSITIVE | PFLG_NOT_NULL | PFLG_MANDATORY,\n         &p_trigger_item->check_interval, 0},\n        {\"alert_high\", PT_BOOL, 0,\n         &p_trigger_item->alert_hw, 0},\n        {\"alert_low\", PT_BOOL, 0,\n         &p_trigger_item->alert_lw, 0},\n        {\"post_trigger_wait\", PT_DURATION, 0,\n         &p_trigger_item->post_trigger_wait, 0},\n        END_OF_PARAMS\n    };\n\n    memset(p_trigger_item, 0, sizeof(*p_trigger_item));\n\n    /* retrieve special parameters */\n    rc = GetStringParam(config_blk, block_name, \"trigger_on\",\n                        PFLG_MANDATORY | PFLG_NO_WILDCARDS, tmpstr,\n                        sizeof(tmpstr), &arg_tab, &arg_count, msg_out);\n    if (rc) /* even ENOENT retruns an error because trigger_on is mandatory */\n        return rc;\n\n    /* initialize list of optional args */\n    p_trigger_item->list = NULL;\n    p_trigger_item->list_size = 0;\n\n    def = str2trigger_def(tmpstr);\n    if (def == NULL) {\n        sprintf(msg_out, \"Unexpected value for 'trigger_on' parameter: %s.\",\n                tmpstr);\n        return EINVAL;\n    }\n    rc = set_trigger_target(p_trigger_item, def, arg_tab, arg_count, msg_out);\n    if (rc)\n        return rc;\n\n    /* retrieve high and low thresholds params and check their compatibility */\n    rc = read_threshold_params(config_blk, block_name, \"high\", p_trigger_item,\n                               &p_trigger_item->hw_type, &p_trigger_item->hw_u,\n                               msg_out);\n    if (rc)\n        return rc;\n\n    rc = read_threshold_params(config_blk, block_name, \"low\", p_trigger_item,\n                               &p_trigger_item->lw_type, &p_trigger_item->lw_u,\n                               msg_out);\n    if (rc)\n        return rc;\n\n    if ((p_trigger_item->trigger_type != TRIG_ALWAYS)\n        && (p_trigger_item->hw_type != p_trigger_item->lw_type)) {\n        strcpy(msg_out, \"Incompatible high/low threshold types\");\n        return EINVAL;\n    }\n    /** FIXME RBHv3 count threshold for HSM systems should only match online\n     *  files (not released) */\n\n    /* retrieve other scalar parameters */\n    rc = read_scalar_params(config_blk, block_name, cfg_params, msg_out);\n    if (rc)\n        return rc;\n\n    /* get action_params subblock */\n    unique = true;\n    params_block = rh_config_GetItemByName(config_blk, \"action_params\",\n                                           &unique);\n\n    if (params_block != NULL) {\n        if (!unique) {\n            sprintf(msg_out, \"Found duplicate block '%s' in '%s' line %d.\",\n                    \"action_params\", block_name,\n                    rh_config_GetItemLine(params_block));\n            return EEXIST;\n        }\n        if (rh_config_ItemType(params_block) != CONFIG_ITEM_BLOCK) {\n            sprintf(msg_out,\n                    \"A block is expected for configuration item '%s::action_params', line %d.\",\n                    block_name, rh_config_GetItemLine(params_block));\n            return EINVAL;\n        }\n#ifdef _DEBUG_POLICIES\n        fprintf(stderr, \"processing parameters for trigger '%s'\\n\", block_name);\n#endif\n        rc = read_action_params(params_block, &p_trigger_item->action_params,\n                                &p_trigger_item->params_mask, msg_out);\n        if (rc)\n            return rc;\n    }\n\n    CheckUnknownParameters(config_blk, block_name, trigger_expect);\n\n    return 0;\n}\n\n#define SCHED_PARAM_NAME \"schedulers\"\n\nstatic void skip_spaces(const char **c)\n{\n    while (isspace(**c)) {\n        (*c)++;\n    }\n}\n\n/**\n * Load configuration of a scheduler.\n * @param[in]  funcs  Configuration helpers provided by the scheduler.\n * @param[out] pcfg   Points to the address of the configuration struture.\n */\nstatic int load_sched_cfg(const ctx_cfg_funcs_t *funcs,\n                          config_item_t parent_block,\n                          void **pcfg, char *msg_out)\n{\n    void *cfg;\n    int rc;\n\n    if (funcs == NULL) {\n        /* scheduler has no configuration */\n        *pcfg = NULL;\n        return 0;\n    }\n\n    cfg = funcs->new();\n    if (cfg == NULL) {\n        sprintf(msg_out,\n                \"Not enough memory to allocate configuration for %s\",\n                funcs->module_name);\n        return -ENOMEM;\n    }\n\n    funcs->set_default(cfg);\n\n    DisplayLog(LVL_DEBUG, \"CfgLoader\", \"Loading %s config\", funcs->module_name);\n\n    rc = funcs->read_from_block(parent_block, cfg, msg_out);\n    if (rc)\n        goto err_free;\n\n    *pcfg = cfg;\n    return 0;\n\nerr_free:\n    funcs->free(cfg);\n    return rc;\n}\n\n/** parse scheduler config and load their configuration */\nstatic int parse_schedulers(config_item_t cfg_block, char *param_value,\n                            policy_run_config_t *conf, char *msg_out)\n{\n    const char *curr;\n    char *ptr = NULL;\n    int i;\n\n    for (curr = strtok_r(param_value, \",\", &ptr);\n         curr != NULL;\n         curr = strtok_r(NULL, \",\", &ptr)) {\n\n        skip_spaces(&curr);\n        if (*curr == '\\0')\n            break;\n\n        /* extend scheduler arrays */\n        conf->sched_count++;\n        conf->schedulers = realloc(conf->schedulers, conf->sched_count *\n                                        sizeof(action_scheduler_t));\n        conf->sched_cfg = realloc(conf->sched_cfg, conf->sched_count *\n                                        sizeof(void *));\n        if (conf->sched_cfg == NULL || conf->schedulers == NULL)\n            return ENOMEM;\n\n        i = conf->sched_count - 1;\n        /* load the related scheduler */\n        DisplayLog(LVL_DEBUG, TAG, \"Loading scheduler '%s'\", curr);\n\n        conf->schedulers[i] = module_get_scheduler(curr);\n        if (conf->schedulers[i] == NULL) {\n            DisplayLog(LVL_CRIT, TAG, \"Failed to load scheduler '%s'\",\n                       curr);\n            return EINVAL;\n        }\n\n        /* add mask of needed attributes to policy run mask */\n        conf->run_attr_mask = attr_mask_or(&conf->run_attr_mask,\n                                         &conf->schedulers[i]->sched_attr_mask);\n\n        /* now load its configuration */\n        /** TODO manage/check reload case */\n        if (load_sched_cfg(conf->schedulers[i]->sched_cfg_funcs,\n                           cfg_block, &conf->sched_cfg[i],\n                           msg_out) != 0)\n            return EINVAL;\n    }\n    return 0;\n}\n\nstatic match_source_t str2match_source(const char *tmp)\n{\n    if (!strcasecmp(tmp, \"none\"))\n        return MS_NONE;\n    if (!strcasecmp(tmp, \"cache_only\"))\n        return MS_CACHE_ONLY;\n    /* auto_update for backward compat */\n    if (!strcasecmp(tmp, \"auto_update\")\n        || !strcasecmp(tmp, \"auto_update_attrs\"))\n        return MS_AUTO_ATTRS;\n    if (!strcasecmp(tmp, \"auto_update_all\"))\n        return MS_AUTO_ALL;\n    if (!strcasecmp(tmp, \"force_update\"))\n        return MS_FORCE_UPDT;\n\n    return MS_INVALID;\n}\n\n#define critical_err_check(_ptr_, _blkname_) do { if (!(_ptr_)) {\\\n          sprintf(msg_out, \"Internal error reading %s block in config file\",\\\n                  (_blkname_)); \\\n          return EFAULT; \\\n        }\\\n    } while (0)\n\nstatic int polrun_read_config(config_file_t config, const char *policy_name,\n                              const struct sm_instance *smi,\n                              policy_run_config_t *conf, char *msg_out)\n{\n    int rc;\n    char block_name[1024];\n    char tmp[1024];\n    config_item_t param_block, action_params_block;\n    char **extra = NULL;\n    unsigned int extra_cnt = 0;\n\n    /* parameter for CheckUnknownParams() */\n    static const char *allowed[] = {\n        \"lru_sort_attr\", \"max_action_count\",\n        \"max_action_volume\", \"nb_threads\", \"suspend_error_pct\",\n        \"suspend_error_min\", \"report_interval\", \"action_timeout\",\n        \"check_actions_interval\", \"check_actions_on_startup\",\n        \"recheck_ignored_entries\", \"report_actions\",\n        \"pre_maintenance_window\", \"maint_min_apply_delay\", \"queue_size\",\n        \"db_result_size_max\", \"action_params\", \"action\", SCHED_PARAM_NAME,\n        \"pre_sched_match\", \"post_sched_match\", \"reschedule_delay_ms\",\n        \"pre_run_command\", \"post_run_command\",\n        \"recheck_ignored_classes\",  /* for compat */\n        NULL\n    };\n\n    /* parameter for read_scalar_params() */\n    const cfg_param_t cfg_params[] = {\n        {\"max_action_count\", PT_INT, PFLG_POSITIVE,\n         &conf->max_action_nbr, 0},\n        {\"max_action_volume\", PT_SIZE, PFLG_POSITIVE,\n         &conf->max_action_vol, 0},\n        {\"nb_threads\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->nb_threads, 0},\n        {\"suspend_error_pct\", PT_FLOAT, PFLG_POSITIVE | PFLG_ALLOW_PCT_SIGN,\n         &conf->suspend_error_pct, 0},\n        {\"suspend_error_min\", PT_INT, PFLG_POSITIVE,\n         &conf->suspend_error_min, 0},\n        {\"report_interval\", PT_DURATION, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->report_interval, 0},\n        {\"action_timeout\", PT_DURATION, PFLG_POSITIVE,\n         &conf->action_timeout, 0},\n        {\"check_actions_interval\", PT_DURATION, PFLG_POSITIVE,\n         &conf->check_action_status_delay, 0},\n        {\"check_actions_on_startup\", PT_BOOL, 0,\n         &conf->check_action_status_on_startup, 0},\n        {\"recheck_ignored_entries\", PT_BOOL, 0,\n         &conf->recheck_ignored_entries, 0},\n        {\"report_actions\", PT_BOOL, 0, &conf->report_actions, 0},\n        {\"pre_maintenance_window\", PT_DURATION, PFLG_POSITIVE,\n         &conf->pre_maintenance_window, 0},\n        {\"maint_min_apply_delay\", PT_DURATION, PFLG_POSITIVE,\n         &conf->maint_min_apply_delay, 0},\n        {\"queue_size\", PT_INT, PFLG_POSITIVE | PFLG_NOT_NULL,\n         &conf->queue_size, 0},\n        {\"db_result_size_max\", PT_INT, PFLG_POSITIVE,\n         &conf->db_request_limit, 0},\n        {\"reschedule_delay_ms\", PT_INT, PFLG_POSITIVE,\n         &conf->reschedule_delay_ms, 0},\n        {\"pre_run_command\", PT_CMD, 0, &conf->pre_run_command, 0},\n        {\"post_run_command\", PT_CMD, 0, &conf->post_run_command, 0},\n\n        {NULL, 0, 0, NULL, 0}\n    };\n    snprintf(block_name, sizeof(block_name), \"%s\" PARAM_SUFFIX, policy_name);\n\n    /* get <policy>_parameters block */\n    rc = get_cfg_block(config, block_name, &param_block, msg_out);\n    if (rc)\n        return rc == ENOENT ? 0 : rc;   /* not mandatory */\n\n    /* check deprecated parameters */\n    rc = GetBoolParam(param_block, block_name, \"recheck_ignored_classes\", 0,\n                      &conf->recheck_ignored_entries, NULL, NULL, msg_out);\n    if (rc == 0)\n        DisplayLog(LVL_CRIT, TAG, \"WARNING: parameter %s::%s' is deprecated. \"\n                   \"Use 'recheck_ignored_entries' instead.\",\n                   block_name, \"recheck_ignored_classes\");\n\n    /* read all scalar params */\n    rc = read_scalar_params(param_block, block_name, cfg_params, msg_out);\n    if (rc)\n        return rc;\n\n    /* read specific parameters */\n\n    extra = NULL;\n    extra_cnt = 0;\n    /* 'lru_sort_attr' overrides 'default_lru_sort_attr' from 'define_policy' */\n    rc = GetStringParam(param_block, block_name, \"lru_sort_attr\",\n                        PFLG_NO_WILDCARDS, tmp, sizeof(tmp), &extra, &extra_cnt,\n                        msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        /* is it a sortable attribute? */\n        rc = str2lru_attr(tmp, smi);\n        if (rc == LRU_ATTR_INVAL) {\n            strcpy(msg_out, \"sortable attribute expected for 'lru_sort_attr': \"\n                   ALLOWED_LRU_ATTRS_STR \"...\");\n            return EINVAL;\n        }\n        conf->lru_sort_attr = rc;\n\n        /* check extra parameter (allowed values are \"asc\" or \"desc\"). */\n        /* Leave unchanged (default to \"asc\") if no parameter is specified. */\n        if (extra_cnt > 1) {\n            sprintf(msg_out, \"Too many parameters found for lru_sort_attr = \"\n                    \" '%s' in block '%s': '(asc)' or '(desc)' expected\",\n                    tmp, block_name);\n            return EINVAL;\n        }\n        if (extra_cnt == 1) {\n            rc = str2sort_order(extra[0]);\n            if (rc < 0) {\n                sprintf(msg_out, \"Invalid sort order '%s' in block '%s':\"\n                        \" 'asc' or 'desc' expected\", extra[0], block_name);\n                return EINVAL;\n            }\n            conf->lru_sort_order = rc;\n        }\n    }\n    /* if sort attr = NONE, set sort_type = NONE */\n    if (conf->lru_sort_attr == LRU_ATTR_NONE)\n        conf->lru_sort_order = SORT_NONE;\n\n    /* 'action' overrides 'default_action' from 'define_policy' */\n    rc = GetStringParam(param_block, block_name, \"action\",\n                        0, tmp, sizeof(tmp), &extra, &extra_cnt, msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        rc = parse_policy_action(\"action\", tmp, extra, extra_cnt,\n                                 &conf->action, &conf->run_attr_mask, msg_out);\n        if (rc)\n            return rc;\n    }\n\n    /* get the list of schedulers */\n    rc = GetStringParam(param_block, block_name, SCHED_PARAM_NAME,\n                        PFLG_NO_WILDCARDS, tmp, sizeof(tmp), NULL, NULL,\n                        msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        rc = parse_schedulers(param_block, tmp, conf, msg_out);\n        if (rc)\n            return rc;\n    }\n\n    /* parse the value of pre_sched_match */\n    rc = GetStringParam(param_block, block_name, \"pre_sched_match\",\n                        PFLG_NO_WILDCARDS, tmp, sizeof(tmp), NULL, NULL,\n                        msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        match_source_t ms;\n\n        /* interpret its value */\n        ms = str2match_source(tmp);\n        if (ms == MS_INVALID) {\n            strcpy(msg_out, \"Wrong value for pre_sched_match. Expected: 'none', \"\n                   \"'cache_only', 'auto_update', 'force_update'.\");\n            return EINVAL;\n        }\n        conf->pre_sched_match = ms;\n    }\n\n    /* parse the value of  post_sched_match */\n    rc = GetStringParam(param_block, block_name, \"post_sched_match\",\n                        PFLG_NO_WILDCARDS, tmp, sizeof(tmp), NULL, NULL,\n                        msg_out);\n    if ((rc != 0) && (rc != ENOENT))\n        return rc;\n    if (rc != ENOENT) {\n        match_source_t ms;\n\n        /* interpret its value */\n        ms = str2match_source(tmp);\n        if (ms == MS_INVALID) {\n            strcpy(msg_out, \"Wrong value for post_sched_match. Expected: 'none', \"\n                   \"'cache_only', 'auto_update', 'force_update'.\");\n            return EINVAL;\n        }\n        conf->post_sched_match = ms;\n    }\n\n    /* get subblock */\n    bool unique = true;\n    action_params_block =\n        rh_config_GetItemByName(param_block, \"action_params\", &unique);\n    if (action_params_block != NULL) {\n        if (!unique) {\n            sprintf(msg_out, \"Found duplicate block '%s' in '%s' line %d.\",\n                    \"action_params\", block_name,\n                    rh_config_GetItemLine(action_params_block));\n            return EEXIST;\n        }\n        if (rh_config_ItemType(action_params_block) != CONFIG_ITEM_BLOCK) {\n            sprintf(msg_out,\n                    \"A block is expected for configuration item '%s::action_params', line %d.\",\n                    block_name, rh_config_GetItemLine(action_params_block));\n            return EINVAL;\n        }\n#ifdef _DEBUG_POLICIES\n        fprintf(stderr, \"processing parameters in '%s'\\n\", block_name);\n#endif\n        rc = read_action_params(action_params_block, &conf->action_params,\n                                &conf->run_attr_mask, msg_out);\n        if (rc)\n            return rc;\n    }\n\n    /* warn for unknown parameters */\n    CheckUnknownParameters(param_block, block_name, allowed);\n\n    return 0;\n}\n\nstatic int polrun_read_triggers(config_file_t config, const char *policy_name,\n                                policy_run_config_t *conf, char *msg_out)\n{\n    int rc;\n    unsigned int blk_index;\n    char block_name[1024];\n\n    /* get TRIGGER blocks */\n    snprintf(block_name, sizeof(block_name), \"%s\" TRIGGER_SUFFIX, policy_name);\n\n    for (blk_index = 0; blk_index < rh_config_GetNbBlocks(config);\n         blk_index++) {\n        char *curr_bname;\n\n        config_item_t curr_item = rh_config_GetBlockByIndex(config, blk_index);\n        critical_err_check(curr_item, \"root\");\n\n        if (rh_config_ItemType(curr_item) != CONFIG_ITEM_BLOCK)\n            continue;\n\n        curr_bname = rh_config_GetBlockName(curr_item);\n        critical_err_check(curr_bname, \"root\");\n\n        if (!strcasecmp(curr_bname, block_name)) {\n            conf->trigger_count++;\n            /* realloc behaves as malloc when trigger_list is NULL */\n            conf->trigger_list = (trigger_item_t *) realloc(conf->trigger_list,\n                                                            conf->\n                                                            trigger_count *\n                                                            sizeof\n                                                            (trigger_item_t));\n            if (conf->trigger_list == NULL)\n                return ENOMEM;\n\n            /* analyze trigger block */\n            rc = parse_trigger_block(curr_item, curr_bname,\n                                     &conf->trigger_list[conf->trigger_count -\n                                                         1], msg_out);\n            if (rc)\n                return rc;\n        }\n    }\n\n    return 0;\n}\n\n/* read the run cfg for all policies */\nstatic int policy_run_cfg_read(config_file_t config, void *module_config,\n                               char *msg_out)\n{\n    int i, rc = 0;\n    policy_run_config_list_t *allconf =\n        (policy_run_config_list_t *) module_config;\n\n    /* allconf->count is supposed to be set by set_default and configs must\n     * be allocated.\n     * double check by comparing policy count and policy_run count */\n    if (allconf->count != policies.policy_count)\n        RBH_BUG(\"Unexpected policy_run_cfg count != policy count\");\n\n    for (i = 0; i < allconf->count; i++) {\n        rc = polrun_read_config(config, policies.policy_list[i].name,\n                                policies.policy_list[i].status_mgr,\n                                &allconf->configs[i], msg_out);\n        if (rc)\n            return rc;\n\n        rc = polrun_read_triggers(config, policies.policy_list[i].name,\n                                  &allconf->configs[i], msg_out);\n        if (rc)\n            return rc;\n    }\n    return 0;\n}\n\nstatic inline void no_trig_updt_msg(const char *what)\n{\n    DisplayLog(LVL_MAJOR, TAG, \"%s changed in config file but cannot be \"\n               \"modified dynamically: trigger update cancelled\", what);\n}\n\nstatic void update_triggers(trigger_item_t *trigger_tgt,\n                            unsigned int count_tgt,\n                            trigger_item_t *trigger_new,\n                            unsigned int count_new, bool *check_interval_chgd)\n{\n    unsigned int i;\n    *check_interval_chgd = false;\n\n    if (count_new != count_tgt) {\n        /* skip trigger checking & update */\n        no_trig_updt_msg(\"Trigger count\");\n        return;\n    }\n\n    /* check trigger types */\n    for (i = 0; i < count_new; i++) {\n        if (trigger_new[i].trigger_type != trigger_tgt[i].trigger_type\n            || trigger_new[i].target_type != trigger_tgt[i].target_type) {\n            no_trig_updt_msg(\"Trigger type\");\n            return;\n        }\n        if ((trigger_new[i].trigger_type != TRIG_ALWAYS) &&\n                   (trigger_new[i].hw_type != trigger_tgt[i].hw_type)) {\n            no_trig_updt_msg(\"High threshold type\");\n            return;\n        }\n        if ((trigger_new[i].trigger_type != TRIG_ALWAYS) &&\n                   (trigger_new[i].lw_type != trigger_tgt[i].lw_type)) {\n            no_trig_updt_msg(\"Low threshold type\");\n            return;\n        }\n    }\n\n    /* triggers have the same type: update simple parameters:\n     * max_action_count, max_action_volume, check_interval, alert_high,\n     * alert_low, post_trigger_wait */\n    for (i = 0; i < count_new; i++) {\n        char tname[256];\n\n        snprintf(tname, sizeof(tname), \"#%u (%s): \", i,\n                 trigger2str(&trigger_tgt[i]));\n\n        if (trigger_new[i].check_interval != trigger_tgt[i].check_interval) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"check_interval updated for trigger %s: %lu->%lu\", tname,\n                       trigger_tgt[i].check_interval,\n                       trigger_new[i].check_interval);\n            trigger_tgt[i].check_interval = trigger_new[i].check_interval;\n            *check_interval_chgd = true;\n        }\n\n        if (trigger_new[i].max_action_nbr != trigger_tgt[i].max_action_nbr) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"max_action_count updated for trigger %s: %u entries ->%u entries\",\n                       tname, trigger_tgt[i].max_action_nbr,\n                       trigger_new[i].max_action_nbr);\n            trigger_tgt[i].max_action_nbr = trigger_new[i].max_action_nbr;\n        }\n\n        if (trigger_new[i].max_action_vol != trigger_tgt[i].max_action_vol) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"max_action_volume updated for trigger %s: %llu bytes->%llu bytes\",\n                       tname, trigger_tgt[i].max_action_vol,\n                       trigger_new[i].max_action_vol);\n            trigger_tgt[i].max_action_vol = trigger_new[i].max_action_vol;\n        }\n\n        if (trigger_new[i].post_trigger_wait !=\n            trigger_tgt[i].post_trigger_wait) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"post_trigger_wait updated for trigger %s: %lu->%lu\",\n                       tname, trigger_tgt[i].post_trigger_wait,\n                       trigger_new[i].post_trigger_wait);\n            trigger_tgt[i].post_trigger_wait = trigger_new[i].post_trigger_wait;\n        }\n\n        if (trigger_new[i].alert_hw != trigger_tgt[i].alert_hw) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"alert_high updated for trigger %s: %s->%s\", tname,\n                       bool2str(trigger_tgt[i].alert_hw),\n                       bool2str(trigger_new[i].alert_hw));\n            trigger_tgt[i].alert_hw = trigger_new[i].alert_hw;\n        }\n\n        if (trigger_new[i].alert_lw != trigger_tgt[i].alert_lw) {\n            DisplayLog(LVL_EVENT, TAG,\n                       \"alert_low updated for trigger %s: %s->%s\", tname,\n                       bool2str(trigger_tgt[i].alert_lw),\n                       bool2str(trigger_new[i].alert_lw));\n            trigger_tgt[i].alert_lw = trigger_new[i].alert_lw;\n        }\n\n        if (trigger_new[i].trigger_type == TRIG_ALWAYS)\n            /* no threshold for 'periodic' triggers */\n            continue;\n\n        switch (trigger_new[i].hw_type) {\n        case PCT_THRESHOLD:\n            if (trigger_new[i].hw_u.percent != trigger_tgt[i].hw_u.percent) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"High threshold updated for trigger %s: \"\n                           \"%.2f%%->%.2f%%\", tname, trigger_tgt[i].hw_u.percent,\n                           trigger_new[i].hw_u.percent);\n                trigger_tgt[i].hw_u.percent = trigger_new[i].hw_u.percent;\n            }\n            break;\n\n        case VOL_THRESHOLD:\n            if (trigger_new[i].hw_u.volume != trigger_tgt[i].hw_u.volume) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"High threshold updated for trigger %s: %llu bytes->%llu bytes\",\n                           tname, trigger_tgt[i].hw_u.volume,\n                           trigger_new[i].hw_u.volume);\n                trigger_tgt[i].hw_u.volume = trigger_new[i].hw_u.volume;\n            }\n            break;\n\n        case COUNT_THRESHOLD:\n            if (trigger_new[i].hw_u.count != trigger_tgt[i].hw_u.count) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"High threshold updated for trigger %s: %llu entries ->%llu entries\",\n                           tname, trigger_tgt[i].hw_u.count,\n                           trigger_new[i].hw_u.count);\n                trigger_tgt[i].hw_u.count = trigger_new[i].hw_u.count;\n            }\n            break;\n        case CNTPCT_THRESHOLD:\n            if (trigger_new[i].hw_u.percent != trigger_tgt[i].hw_u.percent) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"High threshold updated for trigger %s: \"\n                           \"%.2f%%->%.2f%%\", tname, trigger_tgt[i].hw_u.percent,\n                           trigger_new[i].hw_u.percent);\n                trigger_tgt[i].hw_u.percent = trigger_new[i].hw_u.percent;\n            }\n            break;\n        }\n\n        switch (trigger_new[i].lw_type) {\n        case PCT_THRESHOLD:\n            if (trigger_new[i].lw_u.percent != trigger_tgt[i].lw_u.percent) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"Low threshold updated for trigger %s: %.2f%%->%.2f%%\",\n                           tname, trigger_tgt[i].lw_u.percent,\n                           trigger_new[i].lw_u.percent);\n                trigger_tgt[i].lw_u.percent = trigger_new[i].lw_u.percent;\n            }\n            break;\n\n        case VOL_THRESHOLD:\n            if (trigger_new[i].lw_u.volume != trigger_tgt[i].lw_u.volume) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"Low threshold updated for trigger %s: %llu bytes->%llu bytes\",\n                           tname, trigger_tgt[i].lw_u.volume,\n                           trigger_new[i].lw_u.volume);\n                trigger_tgt[i].lw_u.volume = trigger_new[i].lw_u.volume;\n            }\n            break;\n\n        case COUNT_THRESHOLD:\n            if (trigger_new[i].lw_u.count != trigger_tgt[i].lw_u.count) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"Low threshold updated for trigger %s: %llu entries->%llu entries\",\n                           tname, trigger_tgt[i].lw_u.count,\n                           trigger_new[i].lw_u.count);\n                trigger_tgt[i].lw_u.count = trigger_new[i].lw_u.count;\n            }\n            break;\n        case CNTPCT_THRESHOLD:\n            if (trigger_new[i].lw_u.percent != trigger_tgt[i].lw_u.percent) {\n                DisplayLog(LVL_EVENT, TAG,\n                           \"Low threshold updated for trigger %s: %.2f%%->%.2f%%\",\n                           tname, trigger_tgt[i].lw_u.percent,\n                           trigger_new[i].lw_u.percent);\n                trigger_tgt[i].lw_u.percent = trigger_new[i].lw_u.percent;\n            }\n            break;\n        }\n    }\n\n    /* update global interval check (GCD of all check intervals) if one of\n     * them changed */\n// TODO move to caller\n//    if (check_interval_chgd)\n//        ResMon_UpdateCheckInterval();\n\n    /* triggers have been updated */\n    return;\n}\n\nstatic void free_triggers(trigger_item_t *p_triggers, unsigned int count)\n{\n    unsigned int i, j;\n    for (i = 0; i < count; i++) {\n        if ((p_triggers[i].list_size > 0) && (p_triggers[i].list != NULL)) {\n            /* free the strings */\n            for (j = 0; j < p_triggers[i].list_size; j++) {\n                if (p_triggers[i].list[j] != NULL)\n                    free(p_triggers[i].list[j]);\n            }\n\n            /* free the arg list */\n            free(p_triggers[i].list);\n\n            /* free action_params */\n            rbh_params_free(&p_triggers[i].action_params);\n        }\n    }\n\n    /* free the trigger list */\n    if ((count > 0) && (p_triggers != NULL))\n        free(p_triggers);\n}\n\nstatic inline void no_param_updt_msg(const char *blk, const char *name)\n{\n    DisplayLog(LVL_MAJOR, TAG, \"%s::%s changed in config file, but cannot be \"\n               \"modified dynamically\", blk, name);\n}\n\n#define PARAM_UPDT_MSG(_blk, _name, _format, _v1, _v2) \\\n        DisplayLog(LVL_EVENT, TAG, \"%s::%s updated: \"_format\"->\"_format, \\\n                   _blk, _name, _v1, _v2)\n\n/** reload parameters for a single policy */\nstatic int polrun_reload(const char *blkname, policy_run_config_t *cfg_tgt,\n                         policy_run_config_t *cfg_new,\n                         bool *recompute_interval)\n{\n    /* parameters that can't be modified dynamically */\n    if (cfg_tgt->nb_threads != cfg_new->nb_threads)\n        no_param_updt_msg(blkname, \"nb_threads\");\n\n    if (cfg_tgt->queue_size != cfg_new->queue_size)\n        no_param_updt_msg(blkname, \"queue_size\");\n\n// FIXME can change action functions, but not cmd string\n//    if (strcmp(cfg_new->default_action, cfg_tgt->default_action))\n//        no_param_updt_msg(blkname, \"default_action\");\n//  TODO parse action and action_params\n//       and set params_attr_mask accordingly.\n\n    if (cfg_tgt->lru_sort_attr != cfg_new->lru_sort_attr)\n        no_param_updt_msg(blkname, \"lru_sort_attr\");\n\n    /* dynamic parameters */\n    if (cfg_tgt->max_action_nbr != cfg_new->max_action_nbr) {\n        PARAM_UPDT_MSG(blkname, \"max_action_count\", \"%u\",\n                       cfg_tgt->max_action_nbr, cfg_new->max_action_nbr);\n        cfg_tgt->max_action_nbr = cfg_new->max_action_nbr;\n    }\n\n    if (cfg_tgt->max_action_vol != cfg_new->max_action_vol) {\n        PARAM_UPDT_MSG(blkname, \"max_action_volume\", \"%llu\",\n                       cfg_tgt->max_action_vol, cfg_new->max_action_vol);\n        cfg_tgt->max_action_vol = cfg_new->max_action_vol;\n    }\n\n    if (cfg_tgt->suspend_error_pct != cfg_new->suspend_error_pct) {\n        PARAM_UPDT_MSG(blkname, \"suspend_error_pct\", \"%.2f%%\",\n                       cfg_tgt->suspend_error_pct, cfg_new->suspend_error_pct);\n        cfg_tgt->suspend_error_pct = cfg_new->suspend_error_pct;\n    }\n\n    if (cfg_tgt->suspend_error_min != cfg_new->suspend_error_min) {\n        PARAM_UPDT_MSG(blkname, \"suspend_error_min\", \"%u\",\n                       cfg_tgt->suspend_error_min, cfg_new->suspend_error_min);\n        cfg_tgt->suspend_error_min = cfg_new->suspend_error_min;\n    }\n\n    if (cfg_tgt->report_interval != cfg_new->report_interval) {\n        PARAM_UPDT_MSG(blkname, \"report_interval\", \"%lu\",\n                       cfg_tgt->report_interval, cfg_new->report_interval);\n        cfg_tgt->report_interval = cfg_new->report_interval;\n    }\n\n    if (cfg_tgt->action_timeout != cfg_new->action_timeout) {\n        PARAM_UPDT_MSG(blkname, \"action_timeout\", \"%lu\",\n                       cfg_tgt->action_timeout, cfg_new->action_timeout);\n        cfg_tgt->action_timeout = cfg_new->action_timeout;\n    }\n\n    if (cfg_tgt->check_action_status_delay !=\n        cfg_new->check_action_status_delay) {\n        PARAM_UPDT_MSG(blkname, \"check_actions_interval\", \"%lu\",\n                       cfg_tgt->check_action_status_delay,\n                       cfg_new->check_action_status_delay);\n        cfg_tgt->check_action_status_delay = cfg_new->check_action_status_delay;\n    }\n\n    if (cfg_tgt->db_request_limit != cfg_new->db_request_limit) {\n        PARAM_UPDT_MSG(blkname, \"db_result_size_max\", \"%u\",\n                       cfg_tgt->db_request_limit, cfg_new->db_request_limit);\n        cfg_tgt->db_request_limit = cfg_new->db_request_limit;\n    }\n\n    if (cfg_tgt->pre_maintenance_window != cfg_new->pre_maintenance_window) {\n        PARAM_UPDT_MSG(blkname, \"pre_maintenance_window\", \"%lu\",\n                       cfg_tgt->pre_maintenance_window,\n                       cfg_new->pre_maintenance_window);\n        cfg_tgt->pre_maintenance_window = cfg_new->pre_maintenance_window;\n    }\n\n    if (cfg_tgt->maint_min_apply_delay != cfg_new->maint_min_apply_delay) {\n        PARAM_UPDT_MSG(blkname, \"maint_min_apply_delay\", \"%lu\",\n                       cfg_tgt->maint_min_apply_delay,\n                       cfg_new->maint_min_apply_delay);\n        cfg_tgt->maint_min_apply_delay = cfg_new->maint_min_apply_delay;\n    }\n\n    if (cfg_tgt->check_action_status_on_startup !=\n        cfg_new->check_action_status_on_startup) {\n        PARAM_UPDT_MSG(blkname, \"check_actions_on_startup\", \"%s\",\n                       bool2str(cfg_tgt->check_action_status_on_startup),\n                       bool2str(cfg_new->check_action_status_on_startup));\n        cfg_tgt->check_action_status_on_startup =\n            cfg_new->check_action_status_on_startup;\n    }\n\n    if (cfg_tgt->recheck_ignored_entries != cfg_new->recheck_ignored_entries) {\n        PARAM_UPDT_MSG(blkname, \"recheck_ignored_entries\", \"%s\",\n                       bool2str(cfg_tgt->recheck_ignored_entries),\n                       bool2str(cfg_new->recheck_ignored_entries));\n        cfg_tgt->recheck_ignored_entries = cfg_new->recheck_ignored_entries;\n    }\n\n    if (cfg_tgt->report_actions != cfg_new->report_actions) {\n        PARAM_UPDT_MSG(blkname, \"report_actions\", \"%s\",\n                       bool2str(cfg_tgt->report_actions),\n                       bool2str(cfg_new->report_actions));\n        cfg_tgt->report_actions = cfg_new->report_actions;\n    }\n\n    update_triggers(cfg_tgt->trigger_list, cfg_tgt->trigger_count,\n                    cfg_new->trigger_list, cfg_new->trigger_count,\n                    recompute_interval);\n    return 0;\n}\n\n/** reload cfg for all policies */\nstatic int policy_run_cfg_reload(policy_run_config_list_t *conf)\n{\n    int i, rc;\n    int err = 0;\n\n    if (conf->count != run_cfgs.count) {\n        DisplayLog(LVL_MAJOR, TAG, \"New policy count doesn't match previous \"\n                   \"policy count (%u vs %u): skipping config update.\",\n                   conf->count, run_cfgs.count);\n        return 0;\n    }\n    for (i = 0; i < conf->count; i++) {\n        bool chgd = false;\n        char block_name[256];\n        const char *pname = policies.policy_list[i].name;\n        snprintf(block_name, sizeof(block_name), \"%s\" PARAM_SUFFIX, pname);\n\n        rc = polrun_reload(block_name, &run_cfgs.configs[i], &conf->configs[i],\n                           &chgd);\n        if (rc) {\n            DisplayLog(LVL_MAJOR, TAG,\n                       \"Failed to reload parameters for policy %s (rc=%d)\",\n                       pname, rc);\n            if (rc > err)\n                err = rc;\n        } else {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"Successfully reloaded config for policy %s\", pname);\n        }\n    }\n\n    /* policy runs may not be in the same order as policies and run_cfgs */\n//    FIXME RBHv3\n//    if (chgd && policy_runs.runs != NULL)\n//        policy_module_update_check_interval(&policy_runs.runs[i]);\n\n    return err;\n}\n\nstatic int policy_run_cfg_set(void *config, bool reload)\n{\n    policy_run_config_list_t *cfg = (policy_run_config_list_t *) config;\n\n    if (reload)\n        return policy_run_cfg_reload(cfg);\n\n    run_cfgs = *cfg;\n    return 0;\n}\n\nstatic void policy_run_cfg_free(void *config)\n{\n    policy_run_config_list_t *cfg = (policy_run_config_list_t *) config;\n\n    if (cfg != NULL) {\n        int i;\n\n        if (cfg->configs != NULL) {\n            for (i = 0; i < cfg->count; i++) {\n                if (cfg->configs[i].trigger_list != NULL)\n                    free_triggers(cfg->configs[i].trigger_list,\n                                  cfg->configs[i].trigger_count);\n                rbh_params_free(&cfg->configs[i].action_params);\n            }\n            free(cfg->configs);\n        }\n        free(cfg);\n    }\n}\n\nmod_cfg_funcs_t policy_run_cfg_hdlr = {\n    .module_name = \"policy run\",\n    .new = policy_run_cfg_new,\n    .free = policy_run_cfg_free,\n    .set_default = policy_run_cfg_set_default,\n    .read = policy_run_cfg_read,\n    .set_config = policy_run_cfg_set,\n    .write_default = policy_run_cfg_write_default,\n    .write_template = policy_run_cfg_write_template\n};\n"
  },
  {
    "path": "src/policies/policy_sched.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_sched.h\"\n#include \"rbh_misc.h\"\n\nstruct sched_q_item {\n    const entry_id_t   *id;\n    const attr_set_t   *attrs;\n    sched_cb_t          cb;\n    void               *udata;\n};\n\n/** Thread to submit entries to the scheduler */\nstatic void *enqueue_thr(void *arg)\n{\n    struct sched_res_t *sched_res = arg;\n\n    while (1) {\n        struct sched_q_item *item;\n        int rc;\n\n        item = g_async_queue_pop(sched_res->sched_queue);\n\n        if (unlikely(item == NULL))\n            RBH_BUG(\"Dequeued item is not supposed to be NULL\");\n\nretry:\n        DisplayLog(LVL_DEBUG, __func__, \"Submitting entry to scheduler\");\n        /* push to the scheduler */\n        rc = sched_res->sched_desc->sched_schedule(sched_res->sched_data,\n                                                   item->id, item->attrs,\n                                                   item->cb, item->udata);\n        switch (rc) {\n        case SCHED_OK:\n            /* OK, continue enqueuing other entries */\n            break;\n\n        case SCHED_DELAY:\n            /* Wait a while before submitting any new entry */\n            if (sched_res->run_cfg->reschedule_delay_ms > 0) {\n                DisplayLog(LVL_DEBUG, __func__,\n                           \"Waiting %u ms before submitting new entries\",\n                           sched_res->run_cfg->reschedule_delay_ms);\n                rh_usleep(1000 * sched_res->run_cfg->reschedule_delay_ms);\n            }\n            goto retry;\n\n        case SCHED_SKIP_ENTRY:\n            /* skip the entry for the current run */\n            item->cb(item->udata, SCHED_SKIP_ENTRY);\n            break;\n\n        case SCHED_STOP_RUN:\n        case SCHED_KILL_RUN:\n            /* stop submitting entries:\n             * - acknowledge/free all entries\n             *   and signal 'STOP' of the policy run\n             * - free current item\n             */\n\n            /* no more push */\n            sched_res->terminate = true;\n\n            /* unqueue and acknowledge all */\n            do {\n                item->cb(item->udata, rc);\n                free(item);\n            } while ((item = g_async_queue_try_pop(sched_res->sched_queue)) != NULL);\n\n            DisplayLog(LVL_VERB, __func__, \"Stop submitting entries for \"\n                   \"current policy run\");\n            break;\n\n        default:\n            /* Error or unhandled return code */\n            DisplayLog(LVL_VERB, __func__, \"Unexpected error %d\", rc);\n            item->cb(item->udata, rc);\n            break;\n        }\n        free(item);\n    }\n    UNREACHED();\n}\n\n/**\n * Initialize scheduler resources.\n */\nint sched_init(struct sched_res_t *sched_res,\n               const action_scheduler_t *sched_desc,\n               void *sched_cfg, policy_run_config_t *run_cfg)\n{\n    int rc;\n\n    if (!sched_res)\n        return -EINVAL;\n\n    sched_res->sched_desc = sched_desc;\n    sched_res->sched_data = NULL;\n    sched_res->terminate = false;\n    sched_res->run_cfg = run_cfg;\n\n    /* initialize the scheduler */\n    if (sched_desc->sched_init_func != NULL) {\n        rc = sched_desc->sched_init_func(sched_cfg, &sched_res->sched_data);\n        if (rc)\n            return rc;\n    }\n\n    sched_res->sched_queue = g_async_queue_new_full(free);\n\n    /* start the enqueuer thread now everything is initialized */\n    if (pthread_create(&sched_res->sched_thread, NULL, enqueue_thr,\n                       sched_res) != 0) {\n        rc = -errno;\n        goto err_free;\n    }\n\n    return 0;\n\nerr_free:\n    /* TODO destroy scheduler resources */\n    g_async_queue_unref(sched_res->sched_queue);\n    return rc;\n}\n\n/** Reinitialize scheduling for a new policy run */\nint sched_reinit(struct sched_res_t *sched_res)\n{\n    int rc;\n\n    /* error if queue is not empty */\n    if (g_async_queue_length(sched_res->sched_queue) > 0) {\n        DisplayLog(LVL_MAJOR, __func__, \"Trying to reinitialize a non-empty queue\");\n        return -EINVAL;\n    }\n\n    /* reset scheduler */\n    if (sched_res->sched_desc->sched_reset_func != NULL) {\n        rc = sched_res->sched_desc->sched_reset_func(sched_res->sched_data);\n        if (rc)\n            return rc;\n    }\n\n    sched_res->terminate = false;\n\n    return 0;\n}\n\n/**\n * Submit a new entry for scheduling.\n */\nint sched_push(struct sched_res_t *sched_res,\n               const entry_id_t *id, const attr_set_t *attrs,\n               sched_cb_t cb, void *udata)\n{\n    struct sched_q_item *item;\n    int rc;\n\n    if (sched_res->terminate)\n    /* a stop in already pending, so don't trigger one again */\n    /* Simply skip the entry. */\n        return SCHED_SKIP_ENTRY;\n\n    /* if the queue is empty, directly push to the scheduler */\n    if (g_async_queue_length(sched_res->sched_queue) <= 0) {\n        rc = sched_res->sched_desc->sched_schedule(sched_res->sched_data,\n                                                   id, attrs, cb, udata);\n\n        /* if the entry must be delayed, then we have to push it to the queue */\n        if (rc != SCHED_DELAY)\n            return rc;\n    }\n\n    /* if the entry is to be delayed, or if queue is not empty, push to the queue */\n    item = calloc(1, sizeof (*item));\n    if (!item)\n        return -ENOMEM;\n\n    item->id = id;\n    item->attrs = attrs;\n    item->cb = cb;\n    item->udata = udata;\n\n    DisplayLog(LVL_DEBUG, __func__, \"Entry waiting to be submitted to \"\n               \"the scheduler\");\n    g_async_queue_push(sched_res->sched_queue, item);\n    return 0;\n}\n\n/**\n * Flush all pending entries.\n */\nint sched_flush(struct sched_res_t *sched_res);\n\n/**\n * Drop any pending entry from the scheduler and the wait queue.\n */\nint sched_flush(struct sched_res_t *sched_res)\n{\n    struct sched_q_item *item;\n    int rc;\n\n    /* no more push */\n    sched_res->terminate = true;\n\n    /* unqueue and acknowledge all */\n    while ((item = g_async_queue_try_pop(sched_res->sched_queue)) != NULL) {\n        /* If we are called, a stopping process is already pending.\n         * don't trigger more of them. */\n        item->cb(item->udata, SCHED_SKIP_ENTRY);\n        free(item);\n    }\n\n    /* reset items in scheduler */\n    if (sched_res->sched_desc->sched_reset_func != NULL) {\n        rc = sched_res->sched_desc->sched_reset_func(sched_res->sched_data);\n        if (rc)\n            return rc;\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "src/policies/policy_sched.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#include \"policy_run.h\"\n\n#include <glib.h>\n\n/**\n * Scheduler resources.\n */\nstruct sched_res_t {\n    /** Items waiting to be submitted to the schduler */\n    GAsyncQueue *sched_queue;\n    /** Thread to submit entries to the scheduler */\n    pthread_t    sched_thread;\n    /** Scheduler description */\n    const action_scheduler_t *sched_desc;\n    /** Scheduler private context */\n    void        *sched_data;\n    /** termination state */\n    bool         terminate;\n    /** pointer to policy run configuration */\n    policy_run_config_t *run_cfg;\n};\n\n/**\n * Initialize scheduler resources.\n */\nint sched_init(struct sched_res_t *sched_res,\n               const action_scheduler_t *sched_desc,\n               void *sched_cfg, policy_run_config_t *run_cfg);\n\n/** Reinitialize scheduling for a new policy run */\nint sched_reinit(struct sched_res_t *sched_res);\n\n/**\n * Submit a new entry for scheduling.\n */\nint sched_push(struct sched_res_t *sched_res,\n               const entry_id_t *id, const attr_set_t *attrs,\n               sched_cb_t cb, void *udata);\n\n/**\n * Drop any entry from the scheduler and the wait queue.\n */\nint sched_flush(struct sched_res_t *sched_res);\n"
  },
  {
    "path": "src/policies/policy_triggers.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#define TAG \"Triggers\"\n#define tag(_p)             ((_p)->descr->name)\n\n#include \"rbh_cfg.h\"\n#include \"rbh_misc.h\"\n#include \"policy_run.h\"\n#include \"run_policies.h\"\n#include \"policy_sched.h\"\n#include \"queue.h\"\n#include \"Memory.h\"\n#include \"xplatform_print.h\"\n#include <errno.h>\n#include <pthread.h>\n#include <unistd.h>\n\n#ifdef __FreeBSD__\n#include <sys/param.h>\n#include <sys/mount.h>\n#else /* Linux */\n#include <sys/vfs.h>\n#endif\n\n/* ------------ Types and global variables ------------ */\n\n#define is_count_trigger(_t_) ((_t_)->hw_type == COUNT_THRESHOLD || \\\n                               (_t_)->hw_type == CNTPCT_THRESHOLD)\n#define check_only(_p) ((_p)->flags & RUNFLG_CHECK_ONLY)\n#define one_shot(_p) ((_p)->flags & RUNFLG_ONCE)\n\nstatic void update_trigger_status(policy_info_t *pol, int i,\n                                  trigger_status_t state)\n{\n    if (i < 0)\n        return;\n\n    pol->trigger_info[i].status = state;\n\n    if (state == TRIG_BEING_CHECKED)\n        pol->trigger_info[i].last_check = time(NULL);\n}\n\n/**\n * Function for checking that filesystem hasn't been unmounted\n */\nstatic bool CheckFSDevice(policy_info_t *pol)\n{\n    struct stat root_md;\n\n    /* retrieve device of filesystem, to compare it to initial device id */\n\n    if (stat(global_config.fs_path, &root_md) == -1) {\n        DisplayLog(LVL_CRIT, tag(pol), \"Stat on '%s' failed! Error %d: %s\",\n                   global_config.fs_path, errno, strerror(errno));\n        return false;\n    }\n    if (root_md.st_dev != pol->fs_dev) {\n        /* manage dev id change after umount/mount */\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"WARNING: Filesystem device id changed (old=%\" PRI_DT\n                   \", new=%\" PRI_DT \"): \" \"checking if it has been remounted\",\n                   pol->fs_dev, root_md.st_dev);\n        if (ResetFS()) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Filesystem was unmounted!!! EXITING!\");\n            Exit(1);\n        }\n        /* update current fsdev */\n        pol->fs_dev = get_fsdev();\n    }\n    return true;\n}\n\nstatic unsigned long long FSInfo2Blocs512(unsigned long long nb_blocks,\n                                          unsigned long long sz_blocks)\n{\n    uint64_t total_sz;\n    unsigned long long nb_blocks_512;\n    unsigned long long rest;\n\n    /* avoid useless computations */\n    if (sz_blocks == DEV_BSIZE)\n        return nb_blocks;\n\n    /* if block size is different from 512 */\n    total_sz = nb_blocks * sz_blocks;\n    nb_blocks_512 = total_sz / DEV_BSIZE;\n    rest = total_sz % DEV_BSIZE;\n\n    if (rest == 0)\n        return nb_blocks_512;\n    else\n        return nb_blocks_512 + 1;\n}\n\n/* ------------ Functions for checking each type of trigger ------------ */\n\nstatic inline int statfs2usage(const struct statfs *p_statfs,\n                               unsigned long long *used_vol,\n                               double *used_pct,\n                               unsigned long long *total_blocks,\n                               const char *storage_descr)\n{\n    /* check df consistency:\n     * used = total - free = f_blocks - f_bfree\n     * if used + available <= 0, there's something wrong\n     */\n    if (p_statfs->f_blocks + p_statfs->f_bavail - p_statfs->f_bfree <= 0) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR: statfs on %s returned inconsistent values!!!\",\n                   storage_descr);\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Detail: blks=%\" PRIu64 \" avail=%\" PRIu64 \" free=%\" PRIu64,\n                   p_statfs->f_blocks, p_statfs->f_bavail, p_statfs->f_bfree);\n        return -EIO;\n    }\n\n    /* number of blocks available to users */\n    *total_blocks = (p_statfs->f_blocks + p_statfs->f_bavail\n                     - p_statfs->f_bfree);\n    *used_pct = 100.0 * ((double)p_statfs->f_blocks - (double)p_statfs->f_bfree)\n        / ((double)(*total_blocks));\n    *used_vol = (p_statfs->f_blocks - p_statfs->f_bfree) * p_statfs->f_bsize;\n\n    return 0;\n}\n\n/** function for checking thresholds (for global FS, single OST,...)\n * @return negative value on error\n * @retval 0 on success (in this case, to_be_purged gives the number\n *                       of blocks to be purged)\n */\nstatic int check_blocks_thresholds(trigger_item_t *p_trigger,\n                                   const char *storage_descr,\n                                   const struct statfs *p_statfs,\n                                   unsigned long long *to_be_purged_512,\n                                   double *p_used_pct)\n{\n    unsigned long long total_user_blocks = 0, block_target = 0, used_vol = 0;\n    double used_pct = 0;\n    char tmp1[128];\n    char tmp2[128];\n    char buff[1024];\n    int rc;\n\n    *to_be_purged_512 = 0;  /* FIXME 'purged' is policy specific */\n\n    if ((rc = statfs2usage(p_statfs, &used_vol, &used_pct, &total_user_blocks,\n                           storage_descr)))\n        return rc;\n\n    /* return last usage */\n    if (p_used_pct)\n        *p_used_pct = used_pct;\n\n    /* is this a condition on volume or percentage ? */\n    if (p_trigger->hw_type == VOL_THRESHOLD) {\n        FormatFileSize(tmp1, sizeof(tmp1), used_vol);\n        FormatFileSize(tmp2, sizeof(tmp2), p_trigger->hw_u.volume);\n\n        /* compare used volume to threshold */\n        DisplayLog(LVL_VERB, TAG, \"%s usage: %s / high threshold: %s\",\n                   storage_descr, tmp1, tmp2);\n\n        if (used_vol < p_trigger->hw_u.volume) {\n            DisplayLog(LVL_DEBUG, TAG,\n                       \"%s usage is under high threshold: nothing to do.\",\n                       storage_descr);\n            return 0;\n        } else if (p_trigger->alert_hw) {\n            snprintf(buff, sizeof(buff), \"High threshold reached on %s\",\n                     storage_descr);\n            RaiseAlert(buff, \"%s\\nspaced used: %s (%.2f%%), high threshold: %s\",\n                       buff, tmp1, used_pct, tmp2);\n        } else {\n            DisplayLog(LVL_MAJOR, TAG, \"High threshold reached on %s (%s): \"\n                       \"spaced used: %s (%.2f%%), high threshold: %s\",\n                       storage_descr, global_config.fs_path, tmp1, used_pct,\n                       tmp2);\n        }\n    } else if (p_trigger->hw_type == PCT_THRESHOLD) {\n        unsigned long long used_hw =\n            (unsigned long long)((p_trigger->hw_u.percent * total_user_blocks) /\n                                 100.0);\n\n        DisplayLog(LVL_VERB, TAG,\n                   \"%s usage: %.2f%% (%\" PRIu64\n                   \" blocks) / high threshold: %.2f%% (%llu blocks)\",\n                   storage_descr, used_pct,\n                   p_statfs->f_blocks - p_statfs->f_bfree,\n                   p_trigger->hw_u.percent, used_hw);\n\n        if (used_pct < p_trigger->hw_u.percent) {\n            DisplayLog(LVL_DEBUG, TAG, \"%s usage is under high threshold: \"\n                       \"nothing to do.\", storage_descr);\n            return 0;\n        } else if (p_trigger->alert_hw) {\n            FormatFileSize(tmp1, sizeof(tmp1), used_vol);\n            snprintf(buff, sizeof(buff), \"High threshold reached on %s\",\n                     storage_descr);\n            RaiseAlert(buff,\n                       \"%s\\nspaced used: %s (%.2f%%), high threshold: %.2f%%\",\n                       buff, tmp1, used_pct, p_trigger->hw_u.percent);\n        } else {\n            FormatFileSize(tmp1, sizeof(tmp1), used_vol);\n            DisplayLog(LVL_MAJOR, TAG, \"High threshold reached on %s (%s): \"\n                       \"spaced used: %s (%.2f%%), high threshold: %.2f%%\",\n                       storage_descr, global_config.fs_path, tmp1, used_pct,\n                       p_trigger->hw_u.percent);\n        }\n    }\n\n    /* if we reach this point, high threshold is exceeded.\n     * compute the amount of data for reaching low threshold */\n\n    if (p_trigger->lw_type == VOL_THRESHOLD) {\n        block_target = (p_trigger->lw_u.volume / p_statfs->f_bsize);\n        if (p_trigger->lw_u.volume % p_statfs->f_bsize)\n            block_target++;\n        DisplayLog(LVL_VERB, TAG, \"Target usage volume: %s (%llu blocks)\",\n                   FormatFileSize(tmp1, sizeof(tmp1), p_trigger->lw_u.volume),\n                   block_target);\n    } else if (p_trigger->lw_type == PCT_THRESHOLD) {\n        block_target = (unsigned long long)((p_trigger->lw_u.percent\n                       * (double)total_user_blocks) / 100.0);\n        DisplayLog(LVL_VERB, TAG,\n                   \"Target usage percentage: %.2f%% (%llu blocks)\",\n                   p_trigger->lw_u.percent, block_target);\n    } else {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Unexpected Low Threshold type %d. Trigger skipped.\",\n                   p_trigger->lw_type);\n        return -EINVAL;\n    }\n\n    if (p_statfs->f_blocks - p_statfs->f_bfree <= block_target) {\n        DisplayLog(LVL_EVENT, TAG,\n                   \"Usage is already under low threshold. Do nothing.\");\n        return 0;\n    }\n\n    /* to be purged= blocks used - block_target */\n    *to_be_purged_512 =\n        FSInfo2Blocs512((p_statfs->f_blocks - p_statfs->f_bfree) - block_target,\n                        p_statfs->f_bsize);\n\n    DisplayLog(LVL_EVENT, TAG,\n               \"%llu blocks (x%u) must be processed on %s (used=%\" PRIu64\n               \", target=%llu, block size=%zu)\", *to_be_purged_512, DEV_BSIZE,\n               storage_descr, p_statfs->f_blocks - p_statfs->f_bfree,\n               block_target, p_statfs->f_bsize);\n\n    return 0;\n}\n\n/** helper for checking inode LW/HW for both count and percentage triggers */\nstatic\nint check_inode_count(unsigned long long inode_used,\n                      unsigned long long inode_lw,\n                      unsigned long long inode_hw,\n                      unsigned long long inode_total,\n                      unsigned long long *to_purge,\n                      bool               alert_hw,\n                      const char*        descr)\n{\n    double inode_used_pct = 100.0 * ((double)inode_used)/((double)inode_total);\n    double inode_hw_pct = 100.0 * ((double)inode_hw)/((double)inode_total);\n\n    DisplayLog(LVL_EVENT, TAG,\n           \"%s entry count: %llu (%.2f%%) / high threshold: %llu (%.2f%%)\",\n           descr, inode_used, inode_used_pct, inode_hw, inode_hw_pct);\n\n    if (inode_used < inode_hw) {\n        DisplayLog(LVL_VERB, TAG,\n                   \"%s inode count is under high threshold: nothing to do.\",\n                   descr);\n        return 0;\n    }\n\n    if (alert_hw) {\n        char *msg = NULL;\n\n        if (asprintf(&msg, \"High threshold reached on %s\", descr) <= 0\n            || msg == NULL)\n            return -ENOMEM;\n\n        RaiseAlert(msg, \"%s\\nentry count: %llu (%.2f%%), \"\n                   \"high threshold: %llu (%.2f%%)\", msg, inode_used,\n                   inode_used_pct, inode_hw, inode_hw_pct);\n        free(msg);\n    }\n\n    /* if we reach this point, high threshold is exceeded compute the amount\n    * of data for reaching low threshold */\n    DisplayLog(LVL_VERB, TAG, \"Target entry count: %llu\", inode_lw);\n    if (inode_used <= inode_lw) {\n        DisplayLog(LVL_EVENT, TAG,\n                 \"Inode count is already under low threshold. Doing nothing.\");\n        return 0;\n    }\n    *to_purge = inode_used - inode_lw;\n    DisplayLog(LVL_EVENT, TAG,\n               \"%llu entries must be processed in %s (used=%llu (%.2f%%), \"\n               \"target=%llu)\", *to_purge, descr, inode_used,\n               inode_used_pct, inode_lw);\n    return 0;\n}\n\n/** function for checking inode count thresholds\n * @return negative value on error\n * @retval 0 on success (in this case, to_be_purged gives the number of\n *                       entries to be purged)\n */\nstatic int check_count_thresholds(trigger_item_t *p_trigger,\n                                  const char *storage_descr,\n                                  const struct statfs *p_statfs,\n                                  unsigned long long *to_be_purged,\n                                  double *p_count_pct_used)\n{\n    unsigned long long lw_count, hw_count;\n\n    *to_be_purged = 0;\n\n    /* check df consistency: free < total */\n    if (p_statfs->f_ffree > p_statfs->f_files) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR: statfs on %s returned inconsistent values!!!\",\n                   storage_descr);\n        DisplayLog(LVL_CRIT, TAG, \"Detail: total=%\" PRIu64 \", free=%\" PRIu64,\n                   p_statfs->f_files, p_statfs->f_ffree);\n        return -EIO;\n    }\n\n    /* expected to be != NULL */\n    if (p_count_pct_used == NULL)\n        RBH_BUG(\"Unexpected NULL pointer as parameter\");\n\n    /* check it is a condition on inode count or in count percentage */\n    if ((p_trigger->hw_type != COUNT_THRESHOLD\n           && p_trigger->hw_type != CNTPCT_THRESHOLD)) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Unexpected threshold type h=%d. Trigger skipped.\",\n                   p_trigger->hw_type);\n        return -EINVAL;\n    }\n\n    switch (p_trigger->lw_type) {\n    case COUNT_THRESHOLD:\n        lw_count = p_trigger->lw_u.count;\n        hw_count = p_trigger->hw_u.count;\n        break;\n    case CNTPCT_THRESHOLD:\n        lw_count = (p_trigger->lw_u.percent * (double)p_statfs->f_files)/100.0;\n        hw_count = (p_trigger->hw_u.percent * (double)p_statfs->f_files)/100.0;\n        break;\n    default:\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Unexpected threshold type l=%d. Trigger skipped.\",\n                   p_trigger->lw_type);\n        return -EINVAL;\n    }\n\n    return check_inode_count(p_statfs->f_files - p_statfs->f_ffree,\n                             lw_count, hw_count, p_statfs->f_files,\n                             to_be_purged, p_trigger->alert_hw, storage_descr);\n}\n\n/** get the total number of usable blocks in the filesystem */\nstatic int total_blocks(unsigned long long *total_user_blocks,\n                        unsigned long long *bsize)\n{\n    struct statfs stfs;\n    char traverse_path[RBH_PATH_MAX];\n    int rc;\n\n    rc = snprintf(traverse_path, RBH_PATH_MAX, \"%s/.\", global_config.fs_path);\n    if (rc >= RBH_PATH_MAX) {\n        DisplayLog(LVL_MAJOR, TAG, \"Path too long: %s/.\",\n                   global_config.fs_path);\n        return ENAMETOOLONG;\n    }\n\n    if (statfs(traverse_path, &stfs) != 0) {\n        int err = errno;\n\n        DisplayLog(LVL_CRIT, TAG, \"Could not make a 'df' on %s: error %d: %s\",\n                   global_config.fs_path, err, strerror(err));\n        return err;\n    }\n    /* On some buggy systems, statfs() reports no error but the returned\n     * structure is inconsistent or is filled with zeros... */\n    if (stfs.f_blocks + stfs.f_bavail - stfs.f_bfree <= 0) {\n        DisplayLog(LVL_CRIT, TAG,\n                   \"ERROR: statfs on %s returned inconsistent values!!!\",\n                   global_config.fs_path);\n        DisplayLog(LVL_CRIT, TAG,\n                   \"Detail: blks=%\" PRIu64 \" avail=%\" PRIu64 \" free=%\" PRIu64,\n                   stfs.f_blocks, stfs.f_bavail, stfs.f_bfree);\n        return EIO;\n    }\n\n    /* number of blocks available to users */\n    *total_user_blocks = (stfs.f_blocks + stfs.f_bavail - stfs.f_bfree);\n    *bsize = stfs.f_bsize;\n    return 0;\n}\n\nstatic int get_fs_usage(policy_info_t *pol, struct statfs *stfs)\n{\n    char traverse_path[RBH_PATH_MAX];\n    int rc;\n\n    rc = snprintf(traverse_path, RBH_PATH_MAX, \"%s/.\", global_config.fs_path);\n    if (rc >= RBH_PATH_MAX) {\n        DisplayLog(LVL_MAJOR, tag(pol), \"Path too long: %s/.\",\n                   global_config.fs_path);\n        return ENAMETOOLONG;\n    }\n\n    if (!CheckFSDevice(pol))\n        return ENODEV;\n\n    /* retrieve filesystem usage info */\n    if (statfs(traverse_path, stfs) != 0) {\n        int err = errno;\n\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Could not make a 'df' on %s: error %d: %s\",\n                   global_config.fs_path, err, strerror(err));\n        return err;\n    }\n    return 0;\n}\n\n/**\n * @return boolean to indicate if we are in a maintenance window.\n */\nstatic bool check_maintenance_mode(policy_info_t *pol, time_modifier_t *p_mod)\n{\n    struct tm dt;\n    time_t next_maint, now;\n    char varstr[128];\n    char datestr[128];\n    char leftstr[128];\n\n    if (pol->config->pre_maintenance_window == 0)\n        return false;\n\n    /* check maintenance mode */\n    if ((ListMgr_GetVar(&pol->lmgr, NEXT_MAINT_VAR, varstr, sizeof(varstr)) !=\n         DB_SUCCESS)\n        || EMPTY_STRING(varstr))\n        return false;\n\n    next_maint = str2int(varstr);\n    if (next_maint <= 0)    /* invalid value, or disabled */\n        return false;\n\n    /* build maintenance date */\n    strftime(datestr, sizeof(datestr), \"%Y/%m/%d %T\",\n             localtime_r(&next_maint, &dt));\n\n    now = time(NULL);\n    if (next_maint < now) {\n        DisplayLog(LVL_DEBUG, TAG, \"Maintenance time is in the past (%s): \"\n                   \"no time modifier\", datestr);\n        return false;\n    } else if (now < next_maint - pol->config->pre_maintenance_window) {\n        FormatDuration(leftstr, sizeof(leftstr), next_maint -\n                       pol->config->pre_maintenance_window - now);\n\n        DisplayLog(LVL_VERB, TAG, \"Maintenance time is set (%s): \"\n                   \"maintenance window will start in %s\", datestr, leftstr);\n        return false;\n    } else {    /* this is the pre maintenance window! */\n\n        /* linear function to compute time modifier */\n        p_mod->time_factor = ((double)(next_maint - now)) /\n            (double)pol->config->pre_maintenance_window;\n        p_mod->time_min = pol->config->maint_min_apply_delay;\n\n        FormatDuration(leftstr, sizeof(leftstr), next_maint - now);\n        DisplayLog(LVL_MAJOR, TAG, \"Currently in maintenance mode \"\n                   \"(maintenance is in %s): time modifier = %.2f%%\",\n                   leftstr, 100.0 * p_mod->time_factor);\n        return true;\n    }\n}\n\nstatic int check_trigger_type(trigger_item_t *t)\n{\n    if ((t->target_type == TGT_NONE || t->target_type == TGT_FS\n#ifdef _LUSTRE\n        /* XXX does not suport lists of target osts */\n        || t->target_type == TGT_OST\n#endif\n        ) && (t->list_size != 0))\n        RBH_BUG(\"Unexpected target for trigger\");\n    return 0;\n}\n\n/* not a function as it may be int or int64 */\n#define min_param(_param, _config) (_param == 0 ? _config : MIN(_param, _config))\n\n/* Set limits for the policy run, according to global policy config\n * and trigger config.\n * Only override the current info when it is too high or not set.\n */\nstatic void set_limits(const policy_info_t *pol, const trigger_item_t *trig,\n                       counters_t *limit)\n{\n    /* set policy run limits */\n    if ((trig != NULL) && (trig->max_action_nbr != 0))\n        limit->count = min_param(limit->count, trig->max_action_nbr);\n\n    if (pol->config->max_action_nbr != 0)\n        limit->count = min_param(limit->count, pol->config->max_action_nbr);\n\n    if ((trig != NULL) && (trig->max_action_vol != 0))\n        limit->vol = min_param(limit->vol, trig->max_action_vol);\n\n    if (pol->config->max_action_vol != 0)\n        limit->vol = min_param(limit->vol, pol->config->max_action_vol);\n}\n\n/**\n * Print policy target description to a string.\n * @return str\n */\nstatic const char *param2targetstr(const policy_param_t *param, char *str,\n                                   size_t len)\n{\n    switch (param->target) {\n    case TGT_FS:\n        /* snprintf is safer than strncpy as it null terminates string */\n        snprintf(str, len, \"all\");\n        return str;\n#ifdef _LUSTRE\n    case TGT_OST:\n        snprintf(str, len, \"OST#%u\", param->optarg_u.index);\n        return str;\n    case TGT_POOL:\n        snprintf(str, len, \"pool %s\", param->optarg_u.name);\n        return str;\n    case TGT_PROJID:\n        snprintf(str, len, \"projid#%u\", param->optarg_u.index);\n        return str;\n#endif\n    case TGT_USER:\n        snprintf(str, len, \"user %s\", param->optarg_u.name);\n        return str;\n    case TGT_GROUP:\n        snprintf(str, len, \"group %s\", param->optarg_u.name);\n        return str;\n    case TGT_FILE: /* only for manual actions */\n        snprintf(str, len, \"entry '%s'\", param->optarg_u.name);\n        return str;\n    case TGT_CLASS:    /* only for manual actions */\n        snprintf(str, len, \"fileclass %s\", param->optarg_u.name);\n        return str;\n    default:\n        RBH_BUG(\"unexpected trigger target\");\n    }\n}\n\n#ifdef _LUSTRE\nstruct ost_list {\n    unsigned int *list;\n    unsigned int count;\n};\nstatic inline void ost_list_init(struct ost_list *l)\n{\n    l->list = NULL;\n    l->count = 0;\n}\n\nstatic inline int ost_list_add(struct ost_list *l, unsigned int ost_idx)\n{\n    l->list = MemRealloc(l->list, (l->count + 1) * sizeof(*l->list));\n    if (!l->list)\n        return ENOMEM;\n\n    l->list[l->count] = ost_idx;\n    l->count++;\n    return 0;\n}\n\nstatic inline void ost_list_free(struct ost_list *l)\n{\n    if (l->list)\n        MemFree(l->list);\n    l->list = NULL;\n    l->count = 0;\n}\n\nstatic inline bool ost_list_is_member(struct ost_list *l,\n                                      unsigned int test_member)\n{\n    int i;\n    for (i = 0; i < l->count; i++) {\n        if (l->list[i] == test_member)\n            return true;\n    }\n    return false;\n}\n\nstatic int get_ost_max(struct statfs *df, trigger_value_type_t tr_type,\n                       struct ost_list *excluded)\n{\n    int ost_index, rc = 0;\n    int ost_max = -1;\n    unsigned long long ost_blocks;\n    struct statfs stat_max, stat_tmp;\n    double max_pct = 0.0, curr_pct = 0.0;\n    unsigned long long max_vol = 0LL, curr_vol = 0, curr_inode_used = 0,\n                       max_cnt_inodes = 0;\n    char ostname[128];\n\n    for (ost_index = 0;; ost_index++) {\n        if (ost_list_is_member(excluded, ost_index))\n            continue;\n\n        rc = Get_OST_usage(global_config.fs_path, ost_index, &stat_tmp);\n        if (rc == ENODEV)   /* end of OST list */\n            break;\n        else if (rc != 0)\n            /* continue with next OSTs */\n            continue;\n\n        snprintf(ostname, sizeof(ostname), \"OST #%u\", ost_index);\n        if (statfs2usage(&stat_tmp, &curr_vol, &curr_pct, &ost_blocks, ostname))\n            /* continue with next OSTs */\n            continue;\n\n        switch (tr_type) {\n        case VOL_THRESHOLD:\n            if (curr_vol > max_vol) {\n                ost_max = ost_index;\n                max_vol = curr_vol;\n                stat_max = stat_tmp;\n            }\n            break;\n        case PCT_THRESHOLD:\n            if (curr_pct > max_pct) {\n                ost_max = ost_index;\n                max_pct = curr_pct;\n                stat_max = stat_tmp;\n            }\n            break;\n        case COUNT_THRESHOLD:\n            /* number of inodes used */\n            curr_inode_used = stat_tmp.f_files - stat_tmp.f_ffree;\n            if (curr_inode_used > max_cnt_inodes) {\n                ost_max = ost_index;\n                max_cnt_inodes = curr_inode_used;\n                stat_max = stat_tmp;\n            }\n            break;\n\n        case CNTPCT_THRESHOLD:\n            curr_inode_used = stat_tmp.f_files - stat_tmp.f_ffree;\n            curr_pct = 100.0 * (double)curr_inode_used/(double)stat_tmp.f_files;\n            if (curr_pct > max_pct) {\n                ost_max = ost_index;\n                max_pct = curr_pct;\n                stat_max = stat_tmp;\n            }\n            break;\n\n        default:\n            RBH_BUG(\"Unexpected OST trigger type\");\n        }\n    }\n\n    if (ost_max == -1)\n        /* none found */\n        return -ENOENT;\n\n    *df = stat_max;\n    return ost_max;\n}\n#endif\n\n/** build report argument for a user or group */\nstatic void build_user_report_descr(report_field_descr_t info[],\n                                    trigger_item_t *trig,\n                                    unsigned long long high_blk)\n{\n    info[0].attr_index = (trig->target_type == TGT_USER ? ATTR_INDEX_uid :\n                          ATTR_INDEX_gid);\n    info[0].report_type = REPORT_GROUP_BY;\n    info[0].sort_flag = SORT_NONE;\n    info[0].filter = false;\n\n    if (is_count_trigger(trig)) {\n        info[1].attr_index = 0;\n        info[1].report_type = REPORT_COUNT;\n        info[1].sort_flag = SORT_DESC;  /* start with top consumer */\n        info[1].filter = true;\n        info[1].filter_compar = MORETHAN_STRICT;\n        info[1].filter_value.value.val_biguint = trig->hw_u.count;\n    } else {    /* volume based trigger */\n\n        /* select users/groups having sum(blocks) > high_threshold (blocks) */\n        info[1].attr_index = ATTR_INDEX_blocks;\n        info[1].report_type = REPORT_SUM;\n        info[1].sort_flag = SORT_DESC;  /* start with top consumer */\n        info[1].filter = true;\n        info[1].filter_compar = MORETHAN_STRICT;\n        info[1].filter_value.value.val_biguint = high_blk;\n    }\n}\n\n/** build request filter for user or group triggers */\nstatic int build_user_report_filter(lmgr_filter_t *filter,\n                                    trigger_item_t *trig)\n{\n    int i;\n    filter_value_t fv;\n    int attr_index = (trig->target_type == TGT_USER ? ATTR_INDEX_uid :\n                      ATTR_INDEX_gid);\n\n    /* FIXME consider released entries in quota? */\n\n    /* if a specific set of users/groups is specified, make a filter for this */\n\n    /* 2 cases: if there is a single user/group, add a simple filter for it:\n     * AND owner LIKE ...\n     * If there are several users/groups, add a OR sequence:\n     * AND (owner LIKE ... OR owner LIKE ...)\n     */\n    for (i = 0; i < trig->list_size; i++) {\n        int flag = 0;\n\n        if (attr_index == ATTR_INDEX_uid) {\n            if (set_uid_val(trig->list[i], &fv.value))\n                return -EINVAL;\n        } else {\n            if (set_gid_val(trig->list[i], &fv.value))\n                return -EINVAL;\n        }\n\n        /* add parenthesis and 'OR' for lists of items */\n        if (trig->list_size > 1) {\n            if (i == 0) /* first item */\n                flag |= FILTER_FLAG_BEGIN;\n            else if (i == trig->list_size - 1)  /* last item */\n                flag |= FILTER_FLAG_END;\n\n            /* add OR (except for the first item) */\n            if (i > 0)\n                flag |= FILTER_FLAG_OR;\n        }\n        /* else: single value in list => flag = 0 */\n\n        lmgr_simple_filter_add(filter, attr_index,\n                               global_config.uid_gid_as_numbers ? EQUAL : LIKE,\n                               fv, flag);\n    }\n\n    return 0;\n}\n\n/** check thresholds for a given trigger target */\nstatic int check_statfs_thresholds(trigger_item_t *trig, const char *tgt_name,\n                                   struct statfs *stfs, counters_t *limit,\n                                   trigger_info_t *tinfo)\n{\n    int rc;\n    double tmp_usage = 0.0;\n    double tmp_count_pct = 0.0;\n\n    if (is_count_trigger(trig)) {\n        /* inode count */\n        rc = check_count_thresholds(trig, tgt_name, stfs, &limit->count,\n                                    &tmp_count_pct);\n        if (tmp_count_pct > tinfo->last_count)\n            tinfo->last_count = tmp_count_pct;\n    } else if (trig->target_type == TGT_FS) {\n        /* block threshold */\n        rc = check_blocks_thresholds(trig, tgt_name, stfs, &limit->blocks,\n                                     &tmp_usage);\n        if (rc == 0 && tmp_usage > tinfo->last_usage)\n            tinfo->last_usage = tmp_usage;\n    } else {\n        /* blocks on OST or pool */\n        rc = check_blocks_thresholds(trig, tgt_name, stfs, &limit->targeted,\n                                     &tinfo->last_usage);\n        if (tmp_usage > tinfo->last_usage)\n            tinfo->last_usage = tmp_usage;\n    }\n    return rc;\n}\n\n/* check threshold on DB report values */\nstatic int check_report_thresholds(trigger_item_t *p_trigger,\n                                   db_value_t *result, unsigned int res_count,\n                                   counters_t *limit, trigger_info_t *tinfo,\n                                   unsigned long long low_blk512,\n                                   unsigned long long high_blk512)\n{\n    const char *what = (p_trigger->target_type == TGT_USER ? \"user\" : \"group\");\n    char buff[1024];\n    char hw_str[128];\n    int rc;\n\n    if (res_count != 2) {\n        DisplayLog(LVL_MAJOR, TAG,\n                   \"Invalid DB result size %u (2 values expected)\", res_count);\n        return EINVAL;\n    }\n\n    if (is_count_trigger(p_trigger)) {\n        if (p_trigger->hw_type == COUNT_THRESHOLD)\n            FormatFileSize(hw_str, sizeof(hw_str), p_trigger->hw_u.count);\n        else if (p_trigger->hw_type == CNTPCT_THRESHOLD)\n            snprintf(hw_str, sizeof(hw_str), \"%.2f%%\", p_trigger->hw_u.percent);\n\n        DisplayLog(LVL_EVENT, TAG, \"%s '%s' exceeds high threshold: \"\n                   \"used: %llu inodes / high threshold: %llu inodes.\",\n                   what, id_as_str(&result[0].value_u),\n                   result[1].value_u.val_biguint, p_trigger->hw_u.count);\n\n        limit->count = result[1].value_u.val_biguint - p_trigger->lw_u.count;\n\n        DisplayLog(LVL_EVENT, TAG, \"%llu files to be processed for %s '%s' \"\n                   \"(used=%llu, target=%llu)\",\n                   limit->count, what, id_as_str(&result[0].value_u),\n                   result[1].value_u.val_biguint, p_trigger->lw_u.count);\n\n        if (p_trigger->alert_hw) {\n            rc = snprintf(buff, sizeof(buff),\n                         \"Inode quota exceeded for %s '%s' (in %s)\", what,\n                         id_as_str(&result[0].value_u), global_config.fs_path);\n            if (rc >= sizeof(buff)) {\n                DisplayLog(LVL_DEBUG, TAG, \"Alert title truncated for %s\",\n                           what);\n            }\n            RaiseAlert(buff,\n                       \"%s\\n\" \"%s:       %s\\n\" \"quota:      %s inodes\\n\"\n                       \"usage:      %llu inodes\", buff, what,\n                       id_as_str(&result[0].value_u), hw_str,\n                       result[1].value_u.val_biguint);\n        }\n    } else {\n        if (p_trigger->hw_type == VOL_THRESHOLD)\n            FormatFileSize(hw_str, sizeof(hw_str), p_trigger->hw_u.volume);\n        else if (p_trigger->hw_type == PCT_THRESHOLD)\n            snprintf(hw_str, sizeof(hw_str), \"%.2f%%\", p_trigger->hw_u.percent);\n\n        DisplayLog(LVL_EVENT, TAG,\n                   \"%s '%s' exceeds high threshold: used: %llu blocks \"\n                   \"/ high threshold: %llu blocks (x%u).\",\n                   what, id_as_str(&result[0].value_u),\n                   result[1].value_u.val_biguint, high_blk512, DEV_BSIZE);\n\n        limit->blocks = result[1].value_u.val_biguint - low_blk512;\n\n        DisplayLog(LVL_EVENT, TAG, \"%llu blocks (x%u) must be processed \"\n                   \"for %s '%s' (used=%llu, target=%llu)\",\n                   limit->blocks, DEV_BSIZE, what,\n                   id_as_str(&result[0].value_u), result[1].value_u.val_biguint,\n                   low_blk512);\n\n        if (p_trigger->alert_hw) {\n            char usage_str[128];\n\n            FormatFileSize(usage_str, sizeof(usage_str),\n                           result[1].value_u.val_biguint * 512);\n            rc = snprintf(buff, sizeof(buff),\n                         \"Volume quota exceeded for %s '%s' (in %s)\", what,\n                         id_as_str(&result[0].value_u), global_config.fs_path);\n            if (rc >= sizeof(buff)) {\n                DisplayLog(LVL_DEBUG, TAG, \"Alert title truncated for %s\",\n                           what);\n            }\n            RaiseAlert(buff, \"%s\\n%s:       %s\\nquota:      %s\\nspace used: %s\",\n                       buff, what, id_as_str(&result[0].value_u), hw_str,\n                       usage_str);\n        }\n    }\n    return 0;\n}\n\ntypedef struct target_iterator_t {\n    trigger_item_t trig;\n    policy_info_t *pol;\n    union {\n        /* for FS usage */\n        unsigned int is_checked;\n        /* for DB report iterator */\n        struct lmgr_report_t *db_report;\n#ifdef _LUSTRE\n        /* for OST iterator */\n        struct ost_list ost_excl;\n        /* for pool iterator */\n        unsigned int next_pool_index;\n#endif\n    } info_u;\n    /* for user and groups vol/pct thresholds: save high and low values\n     * (in blocks) */\n    unsigned long long high_blk512;\n    unsigned long long low_blk512;\n} target_iterator_t;\n\n/** compute user blocks and save them into it structure */\nstatic int compute_user_blocks(trigger_item_t *trig, target_iterator_t *it)\n{\n    int rc;\n    unsigned long long tb = 0, bs = 0;\n\n    /* check users or groups (possible filter on specified users) */\n    /* build the DB report iterator */\n    if ((trig->hw_type == PCT_THRESHOLD) || (trig->lw_type == PCT_THRESHOLD)) {\n        rc = total_blocks(&tb, &bs);\n        if (rc)\n            return rc;\n    }\n    if (trig->hw_type == VOL_THRESHOLD)\n        it->high_blk512 = trig->hw_u.volume / DEV_BSIZE;\n    else if (trig->hw_type == PCT_THRESHOLD)\n        it->high_blk512 =\n            FSInfo2Blocs512((unsigned long)((trig->hw_u.percent * tb) / 100.0),\n                            bs);\n\n    if (trig->lw_type == VOL_THRESHOLD)\n        it->low_blk512 = trig->lw_u.volume / DEV_BSIZE;\n    else if (trig->lw_type == PCT_THRESHOLD)\n        it->low_blk512 =\n            FSInfo2Blocs512((unsigned long)((trig->lw_u.percent * tb) / 100.0),\n                            bs);\n    return 0;\n}\n\n/** Create an iterator on trigger targets */\nstatic int trig_target_it(target_iterator_t *it, policy_info_t *pol,\n                          trigger_item_t *trig)\n{\n    /* FIXME \"scheduled\" triggers may accept a target too. */\n\n    int rc;\n    /* Iterate on:\n     * each specified pool\n     * each specified file\n     * each OST,user or group, fileclass over the specified limit.\n     */\n    it->trig = *trig;\n    it->pol = pol;\n\n    if (trig->trigger_type == TRIG_ALWAYS) {\n        it->info_u.is_checked = 0;\n        return 0;\n    }\n\n    switch (trig->target_type) {\n    case TGT_FS:\n        /* no iteration, just check the FS usage */\n        it->info_u.is_checked = 0;\n        break;\n#ifdef _LUSTRE\n    case TGT_OST:\n        /* get and check the max OST */\n        ost_list_init(&it->info_u.ost_excl);\n        break;\n    case TGT_POOL:\n        /* check listed pools */\n        it->info_u.next_pool_index = 0;\n        break;\n    case TGT_PROJID:\n        RBH_BUG(\"No trigger expected on projid: only for manual actions\");\n#endif\n    case TGT_USER:\n    case TGT_GROUP:\n        {\n            /* check users or groups (possible filter on a set of users) */\n            report_field_descr_t info[2];   /* [0]user/group: [1]nb_blocks */\n            lmgr_filter_t filter;\n\n            /* get the nbr of FS blocks and convert them to high/low block\n             * thresholds */\n            rc = compute_user_blocks(trig, it);\n            if (rc)\n                return rc;\n            build_user_report_descr(info, trig, it->high_blk512);\n\n            lmgr_simple_filter_init(&filter);\n            rc = build_user_report_filter(&filter, trig);\n            if (rc)\n                return rc;\n\n            it->info_u.db_report =\n                ListMgr_Report(&pol->lmgr, info, 2, NULL, &filter, NULL);\n            lmgr_simple_filter_free(&filter);\n\n            if (it->info_u.db_report == NULL)\n                return -1;\n            break;\n        }\n    case TGT_FILE:\n        RBH_BUG(\"No trigger expected on files: only for manual actions\");\n    case TGT_CLASS:\n        RBH_BUG(\"No trigger expected on fileclass: only for manual actions\");\n    case TGT_NONE:\n        RBH_BUG(\"Unexpected trigger type TGT_NONE\");\n    }\n\n    return 0;\n}\n\n/** Get the next target from an iterator */\nstatic int trig_target_next(target_iterator_t *it, target_u *tgt,\n                            counters_t *limit, trigger_info_t *tinfo)\n{\n    struct statfs stfs;\n    int rc;\n#ifdef _LUSTRE\n    char tgtname[128];\n#endif\n\n    memset(limit, 0, sizeof(*limit));\n\n    if (it->trig.trigger_type == TRIG_ALWAYS) {\n        if (it->info_u.is_checked)\n            return ENOENT;  /* end of list */\n\n        /* no limit due to usage level */\n        it->info_u.is_checked = 1;\n        return 0;\n    }\n\n    switch (it->trig.target_type) {\n    case TGT_FS:\n        if (it->info_u.is_checked)\n            return ENOENT;  /* end of list */\n\n        /* check FS usage */\n        rc = get_fs_usage(it->pol, &stfs);\n        if (rc)\n            return rc;\n        rc = check_statfs_thresholds(&it->trig, \"Filesystem\", &stfs, limit,\n                                     tinfo);\n        if (rc)\n            return rc;\n\n        it->info_u.is_checked = 1;\n\n        if (!counter_is_set(limit))\n            return ENOENT;\n        else\n            return 0;\n        break;\n\n#ifdef _LUSTRE\n    case TGT_OST:\n        {\n            int ost_index;\n            /* get and check the max OST */\n            while ((ost_index =\n                    get_ost_max(&stfs, it->trig.hw_type, &it->info_u.ost_excl))\n                   != -ENOENT) {\n                if (ost_index < 0)\n                    return -ost_index;\n                snprintf(tgtname, sizeof(tgtname), \"OST #%u\", ost_index);\n                /* check thresholds */\n                rc = check_statfs_thresholds(&it->trig, tgtname, &stfs, limit,\n                                             tinfo);\n                if (rc)\n                    return rc;\n                if (!counter_is_set(limit)) {\n                    DisplayLog(LVL_DEBUG, TAG,\n                               \"Top OSTs are all under high threshold: \"\n                               \"skipping check of other OSTs\");\n                    return ENOENT;\n                } else {\n                    tgt->index = ost_index;\n                    /* exclude this OST for next loops */\n                    if ((rc = ost_list_add(&it->info_u.ost_excl, ost_index)))\n                        return rc;\n                    return 0;   /* something is to be done */\n                }\n            }\n            return ENOENT;\n        }\n        break;\n\n    case TGT_POOL:\n        for (; it->info_u.next_pool_index < it->trig.list_size;\n             it->info_u.next_pool_index++) {\n            /* check listed pools */\n            const char *pool = it->trig.list[it->info_u.next_pool_index];\n\n            rc = Get_pool_usage(pool, &stfs);\n            if (rc) {\n                DisplayLog(LVL_CRIT, TAG,\n                           \"Could not retrieve usage info for pool '%s': %s\",\n                           pool, strerror(rc));\n                continue;\n            }\n            snprintf(tgtname, sizeof(tgtname), \"pool '%s'\", pool);\n            rc = check_statfs_thresholds(&it->trig, tgtname, &stfs, limit,\n                                         tinfo);\n            if (rc)\n                return rc;\n            if (!counter_is_set(limit))\n                continue;\n            else {\n                it->info_u.next_pool_index++;\n                tgt->name = pool;\n                return 0;   /* something is to be done */\n            }\n        }\n        return ENOENT;\n        break;\n#endif\n    case TGT_USER:\n    case TGT_GROUP:\n#ifdef _LUSTRE\n    case TGT_PROJID:\n#endif\n        {\n            db_value_t result[2];\n            unsigned int result_count = 2;\n\n            while ((rc = ListMgr_GetNextReportItem(it->info_u.db_report,\n                                                   result, &result_count,\n                                                   NULL)) == DB_SUCCESS) {\n                rc = check_report_thresholds(&it->trig, result, result_count,\n                                             limit, tinfo, it->low_blk512,\n                                             it->high_blk512);\n                if (rc)\n                    return rc;\n\n                /* reset result count before continuing */\n                result_count = 2;\n\n                if (!counter_is_set(limit))\n                    continue;\n                else {\n                    tgt->name = result[0].value_u.val_str;\n                    return 0;   /* something is to be done */\n                }\n            }\n            return ENOENT;\n            break;\n        }\n    case TGT_FILE:\n        RBH_BUG(\"No trigger expected on files: only for manual actions\");\n    case TGT_CLASS:\n        RBH_BUG(\"No trigger expected on fileclass: only for manual actions\");\n    case TGT_NONE:\n        RBH_BUG(\"Unexpected trigger type TGT_NONE\");\n    }\n    return -1;\n}\n\n/** Close the iterator */\nstatic void trig_target_end(target_iterator_t *it)\n{\n    if (it->trig.trigger_type == TRIG_ALWAYS)\n        return;\n\n    switch (it->trig.target_type) {\n#ifdef _LUSTRE\n    case TGT_OST:\n        ost_list_free(&it->info_u.ost_excl);\n        break;\n#endif\n    case TGT_USER:\n    case TGT_GROUP:\n#ifdef _LUSTRE\n    case TGT_PROJID:\n#endif\n        ListMgr_CloseReport(it->info_u.db_report);\n        break;\n    default:\n        /* nothing to do */\n        return;\n    }\n    return;\n}\n\nstatic void sprint_ctr(char *str, int size,\n                       const counters_t *ctr, policy_target_t tgt_type)\n{\n    char buff[256];\n\n    if (!counter_is_set(ctr)) {\n        strncpy(str, \"none\", size);\n        return;\n    }\n\n    FormatFileSize(buff, sizeof(buff), ctr->vol);\n\n#ifdef _LUSTRE\n    if (tgt_type == TGT_OST || tgt_type == TGT_POOL) {\n        snprintf(str, size, \"%llu entries, total volume %s \"\n                 \"(%llu blocks, %llu in target devices)\",\n                 ctr->count, buff, ctr->blocks, ctr->targeted);\n    } else\n#endif\n    {\n        snprintf(str, size, \"%llu entries, total volume %s \"\n                 \"(%llu blocks)\", ctr->count, buff, ctr->blocks);\n    }\n}\n\nstatic void print_ctr(int level, const char *tag, const char *header,\n                      const counters_t *ctr, policy_target_t tgt_type)\n{\n    char buff[256];\n\n    if (!counter_is_set(ctr)) {\n        DisplayLog(level, tag, \"%s: none\", header);\n        return;\n    }\n\n    FormatFileSize(buff, sizeof(buff), ctr->vol);\n\n#ifdef _LUSTRE\n    if (tgt_type == TGT_OST || tgt_type == TGT_POOL) {\n        DisplayLog(level, tag, \"%s: %llu entries, total volume %s \"\n                   \"(%llu blocks, %llu in target devices)\", header,\n                   ctr->count, buff, ctr->blocks, ctr->targeted);\n    } else\n#endif\n    {\n        DisplayLog(level, tag, \"%s: %llu entries, total volume %s \"\n                   \"(%llu blocks)\", header, ctr->count, buff, ctr->blocks);\n    }\n}\n\nstatic void print_done_vs_target(char *str, int size, const counters_t *done,\n                                 const counters_t *target)\n{\n    int rc = 0;\n    char *curr = str;\n\n    str[0] = '\\0';\n\n    if (target->count != 0 && done->count < target->count) {\n        rc = snprintf(curr, size, \"%Lu entries/%Lu targeted\", done->count,\n                      target->count);\n        size -= rc;\n        curr += rc;\n    }\n\n    if (target->vol != 0 && done->vol < target->vol) {\n        rc = snprintf(curr, size, \"%s%Lu bytes/%Lu targeted\",\n                      (curr == str) ? \"\" : \", \", done->vol, target->vol);\n        size -= rc;\n        curr += rc;\n    }\n\n    if (target->blocks != 0 && done->blocks < target->blocks) {\n        rc = snprintf(curr, size, \"%s%Lu blocks/%Lu targeted\",\n                      (curr == str) ? \"\" : \", \", done->blocks, target->blocks);\n        size -= rc;\n        curr += rc;\n    }\n\n    if (target->targeted != 0 && done->targeted < target->targeted) {\n        snprintf(curr, size, \"%s%Lu blocks in target device/%Lu targeted\",\n                 (curr == str) ? \"\" : \", \", done->blocks, target->blocks);\n    }\n}\n\n/** store policy run stats to DB */\nstatic void store_policy_run_stats(policy_info_t *pol, time_t start,\n                                   time_t end, const char *trigger_info,\n                                   const char *status_info)\n{\n    char var_name[POLICY_NAME_LEN + 128]; /* policy name + suffix (oversized) */\n    char val_buff[RBH_PATH_MAX];\n\n    /* clear values for current run */\n    snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_START_SUFFIX,\n             tag(pol));\n    ListMgr_SetVar(&pol->lmgr, var_name, NULL);\n    snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_TRIGGER_SUFFIX,\n             tag(pol));\n    ListMgr_SetVar(&pol->lmgr, var_name, NULL);\n\n    /* store last run times */\n    snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_START_SUFFIX,\n             tag(pol));\n    snprintf(val_buff, sizeof(val_buff), \"%lu\", (unsigned long)start);\n    ListMgr_SetVar(&pol->lmgr, var_name, val_buff);\n\n    snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_END_SUFFIX, tag(pol));\n    snprintf(val_buff, sizeof(val_buff), \"%lu\", (unsigned long)end);\n    ListMgr_SetVar(&pol->lmgr, var_name, val_buff);\n\n    /* store trigger info */\n    snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_TRIGGER_SUFFIX,\n             tag(pol));\n    ListMgr_SetVar(&pol->lmgr, var_name, trigger_info);\n\n    /* store status info */\n    snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_STATUS_SUFFIX,\n             tag(pol));\n    ListMgr_SetVar(&pol->lmgr, var_name, status_info);\n}\n\nstatic void store_policy_start_stats(policy_info_t *pol, time_t start,\n                                     const char *trigger_info)\n{\n    char var_name[POLICY_NAME_LEN + 16];\n    char val_buff[RBH_PATH_MAX];\n\n    /* store current run times */\n    snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_START_SUFFIX,\n             tag(pol));\n    snprintf(val_buff, sizeof(val_buff), \"%lu\", (unsigned long)start);\n    ListMgr_SetVar(&pol->lmgr, var_name, val_buff);\n\n    /* store trigger info */\n    snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_TRIGGER_SUFFIX,\n             tag(pol));\n    ListMgr_SetVar(&pol->lmgr, var_name, trigger_info);\n}\n\n/** \\param trigger_index -1 if this is a manual run */\nstatic void report_policy_run(policy_info_t *pol, policy_param_t *param,\n                              action_summary_t *summary, lmgr_t *lmgr,\n                              int trigger_index, int policy_rc)\n{\n    char buff[1024];\n    char *trigger_buff = NULL;\n    char *status_buff = NULL;\n    char time_buff[128];\n    char vol_buff[128];\n    char bw_buff[128];\n    unsigned int spent;\n    time_t time_end = time(NULL);\n    int rc;\n\n    print_ctr(LVL_DEBUG, tag(pol), \"target\", &param->target_ctr, param->target);\n    print_ctr(LVL_DEBUG, tag(pol), \"done\", &summary->action_ctr, param->target);\n\n    if (trigger_index != -1) {\n        /* save the summary to trigger_info */\n        pol->trigger_info[trigger_index].last_ctr = summary->action_ctr;\n        counters_add(&pol->trigger_info[trigger_index].total_ctr,\n                     &summary->action_ctr);\n        if (asprintf(&trigger_buff, \"trigger: %s (%s), target: %s\",\n                     trigger2str(&pol->config->trigger_list[trigger_index]),\n                     one_shot(pol) ? \"one-shot command\" : \"daemon\",\n                     param2targetstr(param, buff, sizeof(buff))) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string: %s (%s), target: %s\",\n                       trigger2str(&pol->config->trigger_list[trigger_index]),\n                       one_shot(pol) ? \"one-shot command\" : \"daemon\",\n                       param2targetstr(param, buff, sizeof(buff)));\n            return;\n        }\n    } else {\n        if (asprintf(&trigger_buff, \"manual run, target: %s\",\n                     param2targetstr(param, buff, sizeof(buff))) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string: manual run, target: %s\",\n                       param2targetstr(param, buff, sizeof(buff)));\n            return;\n        }\n    }\n\n    spent = time_end - summary->policy_start;\n    if (spent == 0)\n        spent = 1;\n\n    FormatDuration(time_buff, sizeof(time_buff), spent);\n    FormatFileSize(vol_buff, sizeof(vol_buff), summary->action_ctr.vol);\n    FormatFileSize(bw_buff, sizeof(bw_buff), summary->action_ctr.vol / spent);\n\n    if (policy_rc == 0) {\n        DisplayLog(LVL_MAJOR, tag(pol),\n                   \"Policy run summary: time=%s; target=%s; %llu successful actions (%.2f/sec); \"\n                   \"volume: %s (%s/sec); %u entries skipped; %u errors.\",\n                   time_buff, param2targetstr(param, buff, sizeof(buff)),\n                   summary->action_ctr.count,\n                   (float)summary->action_ctr.count / (float)spent,\n                   vol_buff, bw_buff, summary->skipped, summary->errors);\n\n        if (asprintf(&status_buff,\n                     \"%llu successful actions, volume: %s; %u entries skipped; %u errors\",\n                     summary->action_ctr.count, vol_buff, summary->skipped,\n                     summary->errors) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string: %llu successful actions, volume: %s; %u entries skipped; %u errors\",\n                       summary->action_ctr.count, vol_buff, summary->skipped,\n                       summary->errors);\n            free(trigger_buff);\n            return;\n        }\n\n        if (counter_not_reached(&summary->action_ctr, &param->target_ctr)) {\n            trigger_item_t *trig = NULL;\n\n            print_done_vs_target(buff, sizeof(buff), &summary->action_ctr,\n                                 &param->target_ctr);\n\n            if (trigger_index != -1) {\n                trig = &pol->config->trigger_list[trigger_index];\n\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Warning: could not reach the specified policy target for trigger #%u (%s): %s\",\n                           trigger_index, trigger2str(trig), buff);\n\n                if (trig->alert_lw) {\n                    char title[1024];\n                    char ctr1[1024];\n                    char ctr2[1024];\n\n                    rc = snprintf(title, sizeof(title),\n                                  \"%s on %s: could not reach policy target\",\n                                  tag(pol), global_config.fs_path);\n                    if (rc >= sizeof(title)) {\n                        DisplayLog(LVL_DEBUG, tag(pol),\n                                   \"alert title truncated\");\n                    }\n\n                    sprint_ctr(ctr1, sizeof(ctr1), &summary->action_ctr,\n                               param->target);\n                    sprint_ctr(ctr2, sizeof(ctr2), &param->target_ctr,\n                               param->target);\n\n                    RaiseAlert(title, \"Could not reach the specified target \"\n                               \"for policy '%s', trigger #%u (%s)\\n\"\n                               \"%s\\nTargeted: %s\\nDone: %s\", tag(pol),\n                               trigger_index, trigger2str(trig),\n                               buff, ctr2, ctr1);\n                }\n            } else {\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Warning: could not reach the specified policy target: %s\",\n                           buff);\n            }\n\n            update_trigger_status(pol, trigger_index, TRIG_NOT_ENOUGH);\n        } else\n            update_trigger_status(pol, trigger_index, TRIG_OK);\n\n    } else if (policy_rc == ENOENT) {\n        update_trigger_status(pol, trigger_index, TRIG_NO_LIST);\n        DisplayLog(LVL_EVENT, tag(pol),\n                   \"Could not run policy on %s: no list is available.\",\n                   param2targetstr(param, buff, sizeof(buff)));\n\n        status_buff = strdup(\"Could not run policy: no list is available\");\n    } else if (policy_rc == ECANCELED) {\n        update_trigger_status(pol, trigger_index, TRIG_ABORTED);\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Policy run aborted after %s; target=%s; %llu successful actions (%.2f/sec); \"\n                   \"volume: %s (%s/sec); %u entries skipped; %u errors.\",\n                   time_buff, param2targetstr(param, buff, sizeof(buff)),\n                   summary->action_ctr.count,\n                   (float)summary->action_ctr.count / (float)spent,\n                   vol_buff, bw_buff, summary->skipped, summary->errors);\n\n        if (asprintf(&status_buff,\n                     \"Policy run aborted after %llu successful actions, volume: %s; %u entries skipped; %u errors\",\n                     summary->action_ctr.count, vol_buff, summary->skipped,\n                     summary->errors) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string for status buffer\");\n            free(trigger_buff);\n            return;\n        }\n    } else {\n        update_trigger_status(pol, trigger_index, TRIG_CHECK_ERROR);\n        DisplayLog(LVL_CRIT, tag(pol), \"Error running policy on %s. \"\n                   \"%llu successful actions; volume: %s; %u entries skipped; %u errors.\",\n                   param2targetstr(param, buff, sizeof(buff)),\n                   summary->action_ctr.count, vol_buff,\n                   summary->skipped, summary->errors);\n\n        if (asprintf(&status_buff,\n                     \"Fatal error running policy after %llu successful actions, volume: %s; %u entries skipped; %u errors\",\n                     summary->action_ctr.count, vol_buff, summary->skipped,\n                     summary->errors) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string for status buffer\");\n            free(trigger_buff);\n            return;\n        }\n    }\n\n    store_policy_run_stats(pol, summary->policy_start, time_end,\n                           trigger_buff, status_buff);\n    free(trigger_buff);\n    free(status_buff);\n\n    FlushLogs();\n}\n\n/** generic function to check a trigger (TODO to be completed) */\nstatic int check_trigger(policy_info_t *pol, unsigned trigger_index)\n{\n    policy_param_t param;\n    int rc;\n    action_summary_t summary;\n    trigger_item_t *trig = &pol->config->trigger_list[trigger_index];\n    time_modifier_t tmod;\n    target_iterator_t it;\n    char buff[1024];\n\n    if (!CheckFSDevice(pol))\n        return ENODEV;\n\n    memset(&param, 0, sizeof(param));\n\n    rc = check_trigger_type(trig);\n    if (rc)\n        return rc;\n    param.target = trig->target_type;\n\n    update_trigger_status(pol, trigger_index, TRIG_BEING_CHECKED);\n\n    // FIXME, for now, does not check start condition */\n\n    /* iteration on targets over the limit */\n    rc = trig_target_it(&it, pol, trig);\n    if (rc) {\n        update_trigger_status(pol, trigger_index, TRIG_CHECK_ERROR);\n        return rc;\n    }\n\n    while (!pol->aborted\n           && (rc = trig_target_next(&it, &param.optarg_u, &param.target_ctr,\n                                     &pol->trigger_info[trigger_index])) == 0\n           /* recheck condition as trig_target_next() can be long */\n           && !pol->aborted) {\n\n        /* check is done and logged in trig_target_next() */\n        if (check_only(pol))\n            continue;\n\n        /* complete computed limits with policy and trigger limits */\n        set_limits(pol, trig, &param.target_ctr);\n\n        if (check_maintenance_mode(pol, &tmod))\n            param.time_mod = &tmod;\n\n        param.action_params = &trig->action_params;\n\n        /* run actions! */\n        param2targetstr(&param, buff, sizeof(buff));\n\n        DisplayLog(LVL_EVENT, tag(pol), \"Checking policy rules for %s\", buff);\n        update_trigger_status(pol, trigger_index, TRIG_RUNNING);\n\n        /* insert info to DB about current trigger\n         * (for rbh-report --activity) */\n        char *trigger_buff;\n        if (asprintf(&trigger_buff, \"trigger: %s (%s), target: %s\",\n                     trigger2str(trig), one_shot(pol) ?\n                     \"one-shot command\" : \"daemon\", buff) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate string: trigger: %s (%s), target: %s\",\n                       trigger2str(trig), one_shot(pol) ?\n                       \"one-shot command\" : \"daemon\", buff);\n            rc = ENOMEM;\n            break;\n        }\n        store_policy_start_stats(pol, time(NULL), trigger_buff);\n        free(trigger_buff);\n\n        /* make sure to reset first eligible file for each target */\n        pol->first_eligible = 0;\n\n        memset(&summary, 0, sizeof(summary));\n        /* run the policy */\n        rc = run_policy(pol, &param, &summary, &pol->lmgr);\n\n        report_policy_run(pol, &param, &summary, &pol->lmgr, trigger_index, rc);\n\n        /* post apply sleep? */\n        if (!pol->aborted && counter_is_set(&summary.action_ctr) &&\n            trig->post_trigger_wait > 0) {\n            DisplayLog(LVL_EVENT, tag(pol),\n                       \"Waiting %lus before checking other trigger targets.\",\n                       trig->post_trigger_wait);\n            rh_sleep(trig->post_trigger_wait);\n        }\n    }\n    trig_target_end(&it);\n\n    if (pol->aborted)\n        update_trigger_status(pol, trigger_index, TRIG_ABORTED);\n    else if (rc != ENOENT && rc != 0)\n        update_trigger_status(pol, trigger_index, TRIG_CHECK_ERROR);\n    else {\n        update_trigger_status(pol, trigger_index, TRIG_OK);\n        rc = 0;\n    }\n    return rc;\n}\n\nstatic inline void mk_fake_trigger(trigger_item_t *trig, policy_target_t tgt,\n                                   double usage_val)\n{\n    trig->trigger_type = TRIG_CONDITION;\n    trig->target_type = tgt;\n    trig->list = NULL;\n    trig->list_size = 0;\n    trig->check_interval = 0;\n    /* HW=LW=target */\n    trig->hw_type = PCT_THRESHOLD;\n    trig->lw_type = PCT_THRESHOLD;\n    trig->hw_u.percent = usage_val;\n    trig->lw_u.percent = usage_val;\n}\n\n/* force running policies on a specific target */\nstatic int targeted_run(policy_info_t *pol, const policy_opt_t *opt)\n{\n    policy_param_t param;\n    int rc;\n    action_summary_t summary;\n    char buff[1024];\n\n    rc = ListMgr_InitAccess(&pol->lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Could not connect to database (error %d). Trigger checking cannot be started.\",\n                   rc);\n        return rc;\n    }\n\n    if (!CheckFSDevice(pol)) {\n        rc = ENODEV;\n        goto out;\n    }\n\n    memset(&param, 0, sizeof(param));\n\n    param.target = opt->target;\n    param.optarg_u = opt->optarg_u;\n    param.target_ctr.count = opt->max_action_nbr;\n    param.target_ctr.vol = opt->max_action_vol;\n\n    if ((param.target == TGT_FS\n#ifdef _LUSTRE\n         || param.target == TGT_OST || param.target == TGT_POOL\n#endif\n        ) && opt->usage_pct != -1.0) {\n        trigger_item_t trig;\n        trigger_info_t info;\n        struct statfs stfs;\n        char tgtname[256];\n\n        memset(&trig, 0, sizeof(trig));\n        memset(&info, 0, sizeof(info));\n\n        /* convert target level to target counter */\n\n        /* build fake trigger */\n        mk_fake_trigger(&trig, param.target, opt->usage_pct);\n\n        if (param.target == TGT_FS) {\n            strcpy(tgtname, \"filesystem\");\n            rc = get_fs_usage(pol, &stfs);\n        }\n#ifdef _LUSTRE\n        else if (param.target == TGT_OST) {\n            snprintf(tgtname, sizeof(tgtname), \"OST #%u\", param.optarg_u.index);\n            rc = Get_OST_usage(global_config.fs_path, param.optarg_u.index,\n                               &stfs);\n        } else if (param.target == TGT_POOL) {\n            snprintf(tgtname, sizeof(tgtname), \"pool '%s'\",\n                     param.optarg_u.name);\n            rc = Get_pool_usage(param.optarg_u.name, &stfs);\n        }\n#endif\n        else\n            RBH_BUG(\"Unexpected target type in targeted_run()\");\n\n        if (rc) {\n            DisplayLog(LVL_CRIT, tag(pol), \"Cannot retrieve usage for %s: %s\",\n                       tgtname, strerror(abs(rc)));\n            goto out;\n        }\n\n        rc = check_statfs_thresholds(&trig, tgtname, &stfs, &param.target_ctr,\n                                     &info);\n        if (rc)\n            goto out;\n\n        if (check_only(pol)) {\n            rc = 0;\n            goto out;\n        }\n\n        if (!counter_is_set(&param.target_ctr)) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"%s is already under the given threshold\", tgtname);\n            rc = 0;\n            goto out;\n        }\n    }\n\n    if (!pol->aborted) {\n        time_modifier_t tmod;\n\n        /* complete computed limits with policy global limits */\n        set_limits(pol, NULL, &param.target_ctr);\n\n        if (check_maintenance_mode(pol, &tmod))\n            param.time_mod = &tmod;\n\n        /* run actions! */\n        param2targetstr(&param, buff, sizeof(buff));\n        DisplayLog(LVL_EVENT, tag(pol), \"Checking policy rules for %s\", buff);\n\n        /* insert info to DB about current trigger\n         * (for rbh-report --activity) */\n        char *trigger_buff;\n        if (asprintf(&trigger_buff, \"manual run, target: %s\", buff) < 0) {\n            DisplayLog(LVL_CRIT, tag(pol),\n                       \"Could not allocate trigger string for target: %s\",\n                       buff);\n            rc = -ENOMEM;\n            goto out;\n        }\n        store_policy_start_stats(pol, time(NULL), trigger_buff);\n        free(trigger_buff);\n\n        /* make sure to reset first eligible file for each target */\n        pol->first_eligible = 0;\n\n        memset(&summary, 0, sizeof(summary));\n        /* run the policy */\n        rc = run_policy(pol, &param, &summary, &pol->lmgr);\n\n        report_policy_run(pol, &param, &summary, &pol->lmgr, -1, rc);\n\n        /* Manual run: no post action delay */\n    }\n\n out:\n    ListMgr_CloseAccess(&pol->lmgr);\n    return rc;\n}\n\nstruct targeted_run_arg {\n    policy_info_t *policy;\n    const policy_opt_t *options;\n};\nstatic void *targeted_run_thr(void *arg)\n{\n    struct targeted_run_arg *targ = (struct targeted_run_arg *)arg;\n    int rc;\n\n    rc = targeted_run(targ->policy, targ->options);\n    /* this was allocated by the thread starter */\n    DisplayLog(LVL_DEBUG, tag(targ->policy),\n               \"Policy run terminated with status %d\", rc);\n    MemFree(arg);\n    pthread_exit(NULL);\n    return NULL;\n}\n\n/**\n * Main loop for checking triggers periodically (1 per policy).\n */\nstatic void *trigger_check_thr(void *thr_arg)\n{\n    unsigned int i;\n    int rc;\n    unsigned int nb_reset = 0;\n    unsigned int nb_total = 0;\n    double max_usage;\n    char tmpstr[128];\n    policy_info_t *pol = (policy_info_t *) thr_arg;\n    time_t last_action_check = time(NULL);\n\n    rc = ListMgr_InitAccess(&pol->lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, tag(pol),\n                   \"Could not connect to database (error %d). Trigger checking cannot be started.\",\n                   rc);\n        return NULL;\n    }\n\n    if (pol->config->check_action_status_on_startup) {\n        if (pol->descr->status_current == NULL) {\n            DisplayLog(LVL_MAJOR, tag(pol),\n                       \"'check_action_on_startup' is enabled, but no 'status_current'\"\n                       \" is defined for this policy: skipping action check (check_action_on_startup=no).\");\n            pol->config->check_action_status_on_startup = false;\n        } else {\n            DisplayLog(LVL_EVENT, tag(pol),\n                       \"Checking status of outstanding actions...\");\n            rc = check_current_actions(pol, &pol->lmgr, &nb_reset, &nb_total);\n\n            if (rc != 0)\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error checking outstanding action status\");\n            else\n                DisplayLog(LVL_EVENT, tag(pol),\n                           \"%u actions finished / %u total\", nb_reset,\n                           nb_total);\n        }\n    }\n\n    do {\n        max_usage = 0.0;\n\n        /* check every trigger */\n        for (i = 0; i < pol->config->trigger_count; i++) {\n            const char *tname = trigger2str(&pol->config->trigger_list[i]);\n\n            if (pol->aborted) {\n                DisplayLog(LVL_MAJOR, tag(pol),\n                           \"Stop requested: aborting trigger check\");\n                break;\n            }\n\n            if (time(NULL) - pol->trigger_info[i].last_check >=\n                pol->config->trigger_list[i].check_interval) {\n                if (pol->trigger_info[i].last_check != 0)\n                    DisplayLog(LVL_DEBUG, tag(pol),\n                               \"Checking trigger #%u (%s), last check %lus ago\",\n                               i, tname,\n                               time(NULL) - pol->trigger_info[i].last_check);\n                else\n                    DisplayLog(LVL_DEBUG, tag(pol),\n                               \"Checking trigger #%u (%s), never checked\", i,\n                               tname);\n\n                rc = check_trigger(pol, i);\n\n                /* don't update last_check if trigger check failed */\n                if (rc != 0) {\n                    if (rc == ECANCELED)\n                        DisplayLog(LVL_CRIT, tag(pol),\n                                   \"Trigger #%u (%s): aborted.\", i, tname);\n                    else\n                        DisplayLog(LVL_CRIT, tag(pol), \"Trigger #%u (%s): \"\n                                   \"returned error %d... Will retry later\",\n                                   i, tname, rc);\n                } else\n                    pol->trigger_info[i].last_check = time(NULL);\n            }\n\n            /* in any case compute max usage */\n            if (pol->trigger_info[i].last_usage > max_usage)\n                max_usage = pol->trigger_info[i].last_usage;\n        }\n\n        /* Finally update max_usage in persistent stats */\n        if (max_usage > 0.0) {\n            snprintf(tmpstr, sizeof(tmpstr), \"%.2f\", max_usage);\n            if (ListMgr_SetVar(&pol->lmgr, USAGE_MAX_VAR, tmpstr) != DB_SUCCESS)\n                DisplayLog(LVL_CRIT, tag(pol),\n                           \"Error updating value of \" USAGE_MAX_VAR\n                           \" variable (value = %s)\", tmpstr);\n            DisplayLog(LVL_EVENT, tag(pol), \"Current usage max is %.2f%%\",\n                       max_usage);\n        }\n\n        if (!one_shot(pol) && !pol->aborted) {\n            rh_intr_sleep(pol->gcd_interval, pol->aborted);\n            if (pol->aborted)\n                goto out;\n        } else\n            goto out;\n\n        /* cancel old actions */\n        if ((pol->config->check_action_status_delay != 0)\n            && (time(NULL) - last_action_check >=\n                pol->config->check_action_status_delay)) {\n            if (pol->descr->status_current == NULL) {\n                DisplayLog(LVL_MAJOR, tag(pol),\n                           \"'check_actions_interval' is enabled but no 'status_current'\"\n                           \" is defined for this policy: disabling action check (check_actions_interval=0).\");\n                pol->config->check_action_status_delay = 0;\n            } else {\n                DisplayLog(LVL_EVENT, tag(pol),\n                           \"Checking status of outstanding actions...\");\n                rc = check_current_actions(pol, &pol->lmgr, &nb_reset,\n                                           &nb_total);\n\n                if (rc != 0)\n                    DisplayLog(LVL_CRIT, tag(pol),\n                               \"Error checking outstanding action status\");\n                else\n                    DisplayLog(LVL_EVENT, tag(pol),\n                               \"%u actions finished / %u total\", nb_reset,\n                               nb_total);\n            }\n        }\n    } while (1);\n\n out:\n    ListMgr_CloseAccess(&pol->lmgr);\n    pthread_exit(NULL);\n    return NULL;\n}\n\n/* ------------ Exported functions ------------ */\n\n/** Recompute trigger check interval as the GCD of all triggers\n *  (required after reloading config)\n */\nvoid policy_module_update_check_interval(policy_info_t *policy)\n{\n    unsigned int i;\n\n    policy->gcd_interval = 1;\n\n    if (policy->config->trigger_count == 0)\n        return;\n\n    /* compute GCD of trigger check intervals */\n    if (policy->config->trigger_count == 1)\n        policy->gcd_interval = policy->config->trigger_list[0].check_interval;\n    else if (policy->config->trigger_count > 1) {\n        policy->gcd_interval =\n            gcd(policy->config->trigger_list[0].check_interval,\n                policy->config->trigger_list[1].check_interval);\n        for (i = 2; i < policy->config->trigger_count; i++)\n            policy->gcd_interval =\n                gcd(policy->gcd_interval,\n                    policy->config->trigger_list[i].check_interval);\n    }\n\n    DisplayLog(LVL_DEBUG, tag(policy), \"GCD of trigger check intervals is %us\",\n               (unsigned int)policy->gcd_interval);\n}\n\n/**\n * Initialize module and start checker threads\n */\nint policy_module_start(policy_info_t *policy, /* out */\n                        policy_descr_t *policy_descr,  /* in */\n                        policy_run_config_t *p_config, /* in */\n                        const policy_opt_t *options)\n{   /* in */\n    unsigned int i;\n    int rc;\n\n    if (!policy || !policy_descr || !p_config || !options)\n        RBH_BUG(\"Unexpected NULL argument\");\n\n    memset(policy, 0, sizeof(*policy));\n\n    policy->descr = policy_descr;\n    policy->config = p_config;\n\n    policy->fs_dev = get_fsdev();\n    policy->flags = options->flags;\n\n    /* initialize schedulers */\n    policy->sched_res = calloc(p_config->sched_count,\n                               sizeof(struct sched_res_t));\n    if (policy->sched_res == NULL)\n        return ENOMEM;\n\n    for (i = 0; i < p_config->sched_count; i++) {\n        const char *sched_name = policy->config->schedulers[i]->sched_name;\n        DisplayLog(LVL_DEBUG, tag(policy), \"Initializing scheduler '%s'\",\n                   sched_name);\n        rc = sched_init(&policy->sched_res[i], p_config->schedulers[i],\n                        p_config->sched_cfg[i], p_config);\n        if (rc) {\n            DisplayLog(LVL_CRIT, tag(policy),\n                       \"Failed to initialize scheduler '%s'\", sched_name);\n            return rc;\n        }\n    }\n\n    /* policy-> progress, first_eligible, time_modifier, threads\n     * are initialized in policy_run (for internal use in policy_run).\n     */\n\n    /* check there is at least 1 trigger */\n    if ((options->target == TGT_NONE) && (p_config->trigger_count == 0)) {\n        DisplayLog(LVL_CRIT, tag(policy),\n                   \"No trigger defined in configuration file, and no target \"\n                   \"specified on command line. Disabling action scheduling.\");\n        return ENOENT;\n    } else if (NO_POLICY(&policy_descr->rules)\n               && !(options->flags & RUNFLG_IGNORE_POL)) {\n        DisplayLog(LVL_CRIT, tag(policy),\n                   \"No policy rules defined in configuration file... \"\n                   \"Disabling action scheduling.\");\n        return ENOENT;\n    }\n\n    /* Display an info message if no default policy is specified */\n    if (!has_default_policy(&policy_descr->rules))\n        DisplayLog(LVL_EVENT, tag(policy),\n                   \"Notice: no 'default' policy rule is defined. \"\n                   \"Unmatched entries will be ignored.\");\n\n    /* intervals must only be computed for daemon mode */\n    if (!one_shot(policy))\n        policy_module_update_check_interval(policy);\n    else\n        policy->gcd_interval = 1;\n\n    /* initialize worker queue */\n    rc = CreateQueue(&policy->queue, p_config->queue_size, AS_ENUM_COUNT - 1,\n                     AF_ENUM_COUNT);\n    if (rc) {\n        DisplayLog(LVL_CRIT, tag(policy),\n                   \"Error %d initializing action queue\", rc);\n        return rc;\n    }\n\n    /* start worker threads */\n    rc = start_worker_threads(policy);\n    if (rc)\n        /* don't care about leaks here, as the program is going to exit */\n        return rc;\n\n    /**  @TODO take max-count and max-vol parameters into account */\n\n    /* Allocate and initialize trigger_info array\n     * (only if there is no a specific target)\n     */\n    if (options->target == TGT_NONE) {\n        policy->trigger_info =\n            (trigger_info_t *) MemCalloc(p_config->trigger_count,\n                                         sizeof(trigger_info_t));\n        if (policy->trigger_info == NULL) {\n            DisplayLog(LVL_CRIT, tag(policy), \"Memory Error in %s\", __func__);\n            return ENOMEM;\n        }\n\n        for (i = 0; i < p_config->trigger_count; i++)\n            policy->trigger_info[i].status = TRIG_NOT_CHECKED;\n\n        /* start trigger check thread */\n        rc = pthread_create(&policy->trigger_thr, NULL, trigger_check_thr,\n                            (void *)policy);\n    } else {    /* targeted run */\n\n        /* This structure is to be released by the thread, once it used its\n         * contents */\n        struct targeted_run_arg *thr_arg =\n            MemAlloc(sizeof(struct targeted_run_arg));\n\n        if (thr_arg == NULL) {\n            DisplayLog(LVL_CRIT, tag(policy), \"Memory Error in %s\", __func__);\n            return ENOMEM;\n        }\n        thr_arg->policy = policy;\n        thr_arg->options = options;\n\n        rc = pthread_create(&policy->trigger_thr, NULL, targeted_run_thr,\n                            (void *)thr_arg);\n    }\n\n    if (rc != 0) {\n        rc = errno;\n        DisplayLog(LVL_CRIT, tag(policy),\n                   \"Error %d starting trigger thread: %s\", rc, strerror(rc));\n    }\n    return rc;\n}\n\nint policy_module_stop(policy_info_t *policy)\n{\n    policy->aborted = true;    /* seen by all components, from triggers to worker\n                                * threads in policy_run */\n    return 0;\n}\n\nint policy_module_wait(policy_info_t *policy)\n{\n    void *returned;\n    int rc = 0;\n\n    /* /!\\ pb: 2 threads cannot join the same other thread.\n     * In one_shot mode, the main thread is already waiting\n     * for main thread to end. Thus, the signal manager thread\n     * would get an error when trying to join it after abort.\n     */\n    if (!policy->waiting) {\n        /* Ensure SIGTERM is not simultaneous with module start */\n        if (policy->trigger_thr != 0) {\n            policy->waiting = true;\n            rc = pthread_join(policy->trigger_thr, &returned);\n            if (rc != 0)\n                DisplayLog(LVL_MAJOR, tag(policy),\n                           \"pthread_join() returned error %d: %s\", rc,\n                           strerror(rc));\n            else\n                policy->waiting = false;\n        }\n    } else {\n        /* the second thread that needs to join polls the 'waiting' variable */\n        while (policy->waiting)\n            rh_sleep(1);\n    }\n    return rc;\n}\n\nvoid policy_module_dump_stats(policy_info_t *policy)\n{\n    unsigned int status_tab[AS_ENUM_COUNT];\n    unsigned long long feedback_tab[AF_ENUM_COUNT];\n\n    unsigned int nb_waiting, nb_items;\n    time_t last_submitted, last_started, last_ack;\n\n    char tmp_buff[256];\n    char trigstr[256];\n    time_t now = time(NULL);\n    int i;\n    struct tm paramtm;\n\n    /* Stats about triggers */\n    DisplayLog(LVL_MAJOR, \"STATS\", \"======= %s policy: trigger stats ======\",\n               tag(policy));\n\n    /* sanity check */\n    if ((policy->config->trigger_list != NULL)\n        && (policy->trigger_info != NULL)) {\n        for (i = 0; i < policy->config->trigger_count; i++) {\n            snprintf(trigstr, sizeof(trigstr), \"Trigger #%u (%s)\", i,\n                     trigger2str(&policy->config->trigger_list[i]));\n\n            switch (policy->trigger_info[i].status) {\n            case TRIG_NOT_CHECKED: /* not checked yet */\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: not checked yet.\",\n                           trigstr);\n                break;\n            case TRIG_BEING_CHECKED:   /* currently beeing checked */\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: being checked.\",\n                           trigstr);\n                break;\n            case TRIG_RUNNING: /* purge running for this trigger */\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: running.\", trigstr);\n                break;\n            case TRIG_OK:  /* no purge is needed */\n                strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n                         localtime_r(&policy->trigger_info[i].last_check,\n                                     &paramtm));\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: OK (last check: %s).\",\n                           trigstr, tmp_buff);\n                break;\n            case TRIG_NO_LIST: /* no list available */\n                strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n                         localtime_r(&policy->trigger_info[i].last_check,\n                                     &paramtm));\n                DisplayLog(LVL_MAJOR, \"STATS\",\n                           \"%-30s: no list available (last check: %s).\",\n                           trigstr, tmp_buff);\n                break;\n            case TRIG_NOT_ENOUGH:  /* not enough candidates */\n                strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n                         localtime_r(&policy->trigger_info[i].last_check,\n                                     &paramtm));\n                DisplayLog(LVL_MAJOR, \"STATS\",\n                           \"%-30s: last run (%s) was incomplete: not enough candidate entries.\",\n                           trigstr, tmp_buff);\n                break;\n\n            case TRIG_CHECK_ERROR: /* Misc Error */\n                strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n                         localtime_r(&policy->trigger_info[i].last_check,\n                                     &paramtm));\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: last check failed (%s).\",\n                           trigstr, tmp_buff);\n                break;\n\n            case TRIG_ABORTED: /*  */\n                strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n                         localtime_r(&policy->trigger_info[i].last_check,\n                                     &paramtm));\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: last run aborted (%s)\",\n                           trigstr, tmp_buff);\n                break;\n\n            case TRIG_UNSUPPORTED: /* Trigger not supported in this mode */\n                DisplayLog(LVL_MAJOR, \"STATS\", \"%-30s: not supported.\",\n                           trigstr);\n                break;\n            }\n\n            print_ctr(LVL_MAJOR, \"STATS\", \"    last run\",\n                      &policy->trigger_info[i].last_ctr,\n                      policy->config->trigger_list[i].target_type);\n            if (!one_shot(policy))\n                print_ctr(LVL_MAJOR, \"STATS\", \"    total   \",\n                          &policy->trigger_info[i].total_ctr,\n                          policy->config->trigger_list[i].target_type);\n        }\n    }\n\n    /* Policy stats */\n    RetrieveQueueStats(&policy->queue, &nb_waiting, &nb_items, &last_submitted,\n                       &last_started, &last_ack, status_tab, feedback_tab);\n\n    DisplayLog(LVL_MAJOR, \"STATS\", \"======= %s policy: action stats ======\",\n               tag(policy));\n    DisplayLog(LVL_MAJOR, \"STATS\", \"idle threads       = %u\", nb_waiting);\n    DisplayLog(LVL_MAJOR, \"STATS\", \"queued entries     = %u\", nb_items);\n    DisplayLog(LVL_MAJOR, \"STATS\", \"action status:\");\n\n    for (i = 0; i < AS_ENUM_COUNT; i++) {\n        /* always display AS_OK and display error only if they have occurred */\n        if ((status_tab[i] > 0) || (i == AS_OK))\n            DisplayLog(LVL_MAJOR, \"STATS\", \"    %-30s = %u\",\n                       action_status_descr[i], status_tab[i]);\n    }\n\n    if (feedback_tab[AF_TARGETED_OK] > 0)\n        DisplayLog(LVL_MAJOR, \"STATS\",\n                   \"%llu actions successful/%llu, %s (%llu blocks, %llu in target devices)\",\n                   feedback_tab[AF_NBR_OK],\n                   feedback_tab[AF_NBR_OK] + feedback_tab[AF_NBR_NOK],\n                   FormatFileSize(tmp_buff, sizeof(tmp_buff),\n                                  feedback_tab[AF_VOL_OK]),\n                   feedback_tab[AF_BLOCKS_OK], feedback_tab[AF_TARGETED_OK]);\n    else\n        DisplayLog(LVL_MAJOR, \"STATS\",\n                   \"%llu actions successful/%llu, %s (%llu blocks)\",\n                   feedback_tab[AF_NBR_OK],\n                   feedback_tab[AF_NBR_OK] + feedback_tab[AF_NBR_NOK],\n                   FormatFileSize(tmp_buff, sizeof(tmp_buff),\n                                  feedback_tab[AF_VOL_OK]),\n                   feedback_tab[AF_BLOCKS_OK]);\n\n    if (last_submitted)\n        DisplayLog(LVL_MAJOR, \"STATS\", \"last action queued    %2d s ago\",\n                   (int)(now - last_submitted));\n\n    if (last_started)\n        DisplayLog(LVL_MAJOR, \"STATS\", \"last action started   %2d s ago\",\n                   (int)(now - last_started));\n\n    if (last_ack)\n        DisplayLog(LVL_MAJOR, \"STATS\", \"last action completed %2d s ago\",\n                   (int)(now - last_ack));\n}\n"
  },
  {
    "path": "src/policies/run_policies.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009-2014 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * \\file run_policies.h\n * \\brief This module triggers migrations to HSM or external storage.\n */\n#ifndef _RUN_POLICIES_H\n#define _RUN_POLICIES_H\n\n#include \"policy_run.h\"\n\ntypedef struct policy_runs_t {\n    policy_info_t *runs;\n    unsigned int count;\n} policy_runs_t;\n\n/* defined in policy_triggers.c */\nextern policy_runs_t policy_runs;\n\n/* Action status */\ntypedef enum {\n    AS_OK = 0,  /* action successful */\n\n/* skipped */\n    AS_ACCESSED,    /* entry has been accessed recently */\n    AS_MOVED,   /* entry has been moved or deleted */\n    AS_WHITELISTED, /* entry is whitelisted  */\n    AS_OUT_OF_SCOPE,    /* entry is no longer in policy scope */\n    AS_NO_POLICY,   /* entry matches no policy */\n    AS_BAD_TYPE,    /* policy does not apply to this type of entry */\n    AS_BUSY,    /* entry is is use */\n    AS_ALREADY, /* action is already running */\n    AS_NOT_SCHEDULED, /* skipped by scheduler */\n\n/* errors */\n    AS_MISSING_MD,  /* entry metadata is incomplete */\n    AS_STAT_FAILURE,  /* stat failure */\n    AS_ERROR,   /* action failed */\n    AS_ABORT,   /* action aborted by termination signal */\n\n    AS_ENUM_COUNT   /* last status index + 1 */\n} action_status_t;\n\n/* Action status description */\nstatic const char __attribute__ ((__unused__))\n    *action_status_descr[AS_ENUM_COUNT] = {\n        \"action successful\", \"accessed since last update\",\n        \"moved or deleted since last update\", \"whitelisted/ignored\",\n        \"out of scope\", \"no matching rule\", \"entry type out of scope\",\n        \"entry is in use/busy\", \"action already running\",\n        \"skipped by scheduler\",\n        \"incomplete metadata\", \"stat failure\", \"action error\",\n        \"action aborted\"};\n\n/* feedback from action queue (count, volume, ...) */\ntypedef enum {\n    AF_NBR_OK,\n    AF_NBR_NOK,\n\n    AF_VOL_OK,\n    AF_VOL_NOK,\n\n    AF_TARGETED_OK,\n    AF_TARGETED_NOK,\n\n    AF_BLOCKS_OK,\n    AF_BLOCKS_NOK,\n\n    AF_ENUM_COUNT   /* last status index + 1 */\n} action_feedback_t;\n\ntypedef struct policy_param_t {\n    policy_target_t target;\n    target_u optarg_u;\n    counters_t target_ctr;\n    time_modifier_t *time_mod;\n\n    const action_params_t *action_params;\n\n} policy_param_t;\n\nint run_policy(policy_info_t *p_pol_info, const policy_param_t *p_param,\n               action_summary_t *p_summary, lmgr_t *lmgr);\n\n/* Note: the number of threads is in p_pol_info->config */\nint start_worker_threads(policy_info_t *p_pol_info);\n\n/* Note: the timeout is in p_pol_info->config */\nint check_current_actions(policy_info_t *p_pol_info, lmgr_t *lmgr,\n                          unsigned int *p_nb_reset, unsigned int *p_nb_total);\n\n#endif\n"
  },
  {
    "path": "src/policies/status_manager.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"status_manager.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_modules.h\"\n#include \"Memory.h\"\n\n/** list of status manager instances */\nstatic sm_instance_t **sm_inst = NULL;\nunsigned int sm_inst_count = 0; /* must be available from other modules\n                                 * to handle attribute masks */\n\n/** list of status manager info */\nstruct _sm_attr_info *sm_attr_info = NULL;\nunsigned int sm_attr_count;\n\nvoid sm_status_ensure_alloc(char const ***p_tab)\n{\n    if (*p_tab != NULL || sm_inst_count == 0)\n        return;\n\n    *p_tab = MemCalloc(sm_inst_count, sizeof(char *));\n}\n\nvoid sm_status_free(char const ***p_tab)\n{\n    if (*p_tab == NULL)\n        return;\n\n    MemFree(*p_tab);\n    *p_tab = NULL;\n}\n\n/** allocate sm_info array */\nvoid sm_info_ensure_alloc(void ***p_tab)\n{\n    if (*p_tab != NULL || sm_attr_count == 0)\n        return;\n\n    *p_tab = MemCalloc(sm_attr_count, sizeof(void *));\n}\n\n/** free info array */\nvoid sm_info_free(void ***p_tab)\n{\n    int i;\n\n    if (*p_tab == NULL || sm_attr_count == 0)\n        return;\n\n    for (i = 0; i < sm_attr_count; i++)\n        free((*p_tab)[i]);  /* strdup -> free */\n\n    MemFree(*p_tab);\n    *p_tab = NULL;\n}\n\nint set_sm_info(const sm_instance_t *smi, attr_set_t *pattrs,\n                unsigned int attr_index, void *val)\n{\n    void **info;\n\n    /* check allocation of sm_info array */\n    sm_info_ensure_alloc(&pattrs->attr_values.sm_info);\n    if (pattrs->attr_values.sm_info == NULL)\n        return -ENOMEM;\n\n    assert(attr2sminfo_index(smi_info_index(smi, attr_index))\n           < sizeof(pattrs->attr_mask.sm_info) * CHAR_BIT);\n\n    info = &SMI_INFO(pattrs, smi, attr_index);\n\n    if (*info != NULL)\n        /* free the previous value */\n        free(*info);\n\n    *info = val;\n    ATTR_MASK_INFO_SET(pattrs, smi, attr_index);\n\n    return 0;\n}\n\n/** build a string with the list of statuses in the given mask */\nchar *name_status_mask(uint32_t status_mask, char *buf, int sz)\n{\n    int i = 0;\n    uint32_t m = 1;\n    buf[0] = '\\0';\n    char *cur = buf;\n\n    for (i = 0; i < sm_inst_count; i++, m <<= 1) {\n        if (status_mask & m) {\n            sm_instance_t *smi = get_sm_instance(i);\n            /* append smi name */\n            if (!EMPTY_STRING(buf)) {\n                *cur = ',';\n                cur++;\n            }\n            rh_strncpy(cur, smi->instance_name, sz - (ptrdiff_t) (cur - buf));\n            cur += strlen(cur);\n        }\n    }\n    return buf;\n}\n\n/** retrieve a status manager from its name */\nsm_instance_t *smi_by_name(const char *smi_name)\n{\n    int i;\n\n    for (i = 0; i < sm_inst_count; i++) {\n        sm_instance_t *smi = sm_inst[i];\n\n        if (!strcmp(smi->instance_name, smi_name))\n            return smi;\n    }\n    /* not found */\n    return NULL;\n}\n\nstatic const sm_info_def_t status_def = {\n    .user_name = \"status\",\n    .db_name = \"status\",\n    .db_type = DB_TEXT, /* not used? */\n    .db_type_size = 0,\n    .crit_type = PT_STRING,\n};\n\n/** helper for sm_attr_get. Assume smi is set. */\nstatic int get_smi_attr(const sm_instance_t *smi, const attr_set_t *p_attrs,\n                        const char *attr_name, void **val,\n                        const sm_info_def_t **ppdef, unsigned int *attr_index)\n{\n    int i;\n\n    assert(smi != NULL);\n\n    if (!strcasecmp(attr_name, \"status\")) {\n        *ppdef = &status_def;\n\n        *attr_index = smi_status_index(smi);\n        if (val == NULL)\n            /* caller doesn't care about the value, it just want to know if\n             * the attribute exists (+ the attribute index)*/\n            return 0;\n\n        /* XXX NULL or empty string? */\n        if (!ATTR_MASK_STATUS_TEST(p_attrs, smi->smi_index))\n            return -ENODATA;\n        if (p_attrs->attr_values.sm_status == NULL)\n            return -ENODATA;\n\n        *val = (char *)STATUS_ATTR(p_attrs, smi->smi_index);\n        return *val != NULL ? 0 : -ENODATA;\n    }\n\n    /* other attrs */\n    for (i = 0; i < smi->sm->nb_info; i++) {\n        if (!strcasecmp(attr_name, smi->sm->info_types[i].user_name)) {\n            *ppdef = &smi->sm->info_types[i];\n            *attr_index = smi_info_index(smi, i);\n\n            if (val == NULL)\n                /* caller doesn't care about the value, it just want to know if\n                 * the attribute exists (and the smi info index) */\n                return 0;\n\n            if (!ATTR_MASK_INFO_TEST(p_attrs, smi, i))\n                return -ENODATA;\n            if (p_attrs->attr_values.sm_info == NULL)\n                return -ENODATA;\n\n            *val = SMI_INFO(p_attrs, smi, i);\n            return *val != NULL ? 0 : -ENODATA;\n        }\n    }\n    return -EINVAL;\n}\n\n/* -EINVAL: invalid argument.\n * -ENODATA: attribute exists, but is missing.\n */\nint sm_attr_get(const sm_instance_t *smi, const attr_set_t *p_attrs,\n                const char *name, void **val, const sm_info_def_t **ppdef,\n                unsigned int *attr_index)\n{\n    const char *dot = strchr(name, '.');\n\n    /* if there is no smi in context, and no dot is found:\n     * nothing can't match */\n    if (!dot && !smi)\n        return -EINVAL;\n\n    if (dot) {\n        char *smi_name = strndup(name, (ptrdiff_t) dot - (ptrdiff_t) name);\n        sm_instance_t *smi2;\n\n        /* get the status manager with the given name */\n        smi2 = smi_by_name(smi_name);\n        if (smi2 == NULL) {\n            DisplayLog(LVL_CRIT, __func__,\n                       \"ERROR: unknown status manager '%s' in parameter '%s'\",\n                       smi_name, name);\n            free(smi_name);\n            return -EINVAL;\n        }\n        free(smi_name);\n\n        return get_smi_attr(smi2, p_attrs, dot + 1, val, ppdef, attr_index);\n    } else {\n        return get_smi_attr(smi, p_attrs, name, val, ppdef, attr_index);\n    }\n}\n\n/* contents of status_manager:\nname, flags, status_enum, status_count, status_needs_attrs_cached,\nstatus_needs_attrs_fresh, get_status_func, changelog_cb.\n\nFor status manager that handle removed entries the 2 masks are:\n    - to determine if the entry is to be saved in softrm table\n    - fields to save in softrm table (needed for undelete or recovery)\n*/\n\n/* -------------- managing status managers ---------- */\n\nstatic status_manager_t *load_status_manager(const char *name)\n{\n    return module_get_status_manager(name);\n}\n\n/** check if an instance of shared status manager exists */\nstatic bool sm_instance_exists(bool is_shared, const char *sm_name,\n                               const char *pol_name, sm_instance_t **smi_ptr)\n{\n    int i;\n\n    for (i = 0; i < sm_inst_count; i++) {\n        if ((is_shared && !strcasecmp(sm_inst[i]->sm->name, sm_name)) ||\n            (!is_shared && !strcasecmp(sm_inst[i]->sm->name, sm_name) &&\n             !strcasecmp(sm_inst[i]->instance_name, pol_name))) {\n            *smi_ptr = sm_inst[i];\n            return true;\n        }\n    }\n    return false;\n}\n\n/**\n * As status managers don't know their index instance by advance,\n * they provide generic masks as if there were only their own status and\n * attributes.\n * This function translates generic masks to the actual ones.\n */\nstatic attr_mask_t actual_mask(sm_instance_t *smi, attr_mask_t mask)\n{\n    uint64_t gen_info;\n    uint32_t gen_status;\n\n    /* generic attribute mask */\n    gen_info = mask.sm_info & bit_range(GENERIC_INFO_OFFSET, smi->sm->nb_info);\n    /* generic status mask */\n    gen_status = mask.status & SMI_MASK(0);\n\n    /* clean generic info */\n    mask.sm_info &= ~gen_info;\n    mask.status &= ~gen_status;\n\n    /* replace with real info */\n    if (gen_info)\n        /* shift gen_info by real offset - GENERIC_INFO_OFFSET */\n        mask.sm_info |= (gen_info << smi->sm_info_offset);\n\n    if (gen_status)\n        mask.status |= SMI_MASK(smi->smi_index);\n\n    return mask;\n}\n\n/** translate a generic mask SMI_MASK(0) and GENERIC_INFO_OFFSET to all status\n *  and info masks */\nattr_mask_t translate_all_status_mask(attr_mask_t mask)\n{\n    uint64_t gen_info;\n    uint32_t gen_status;\n\n    /* generic status mask */\n    gen_status = mask.status & SMI_MASK(0);\n    /* generic attribute mask */\n    gen_info = mask.sm_info & bit_range(GENERIC_INFO_OFFSET, sm_attr_count);\n\n    /* clean generic bits */\n    mask.status &= ~gen_status;\n    mask.sm_info &= ~gen_info;\n\n    /* replace with real bits */\n    if (gen_info)\n        mask.sm_info |= all_sm_info_mask();\n\n    if (gen_status)\n        mask.status |= all_status_mask();\n\n    return mask;\n}\n\n/** create a status manager instance (if it does not already exist) */\nsm_instance_t *create_sm_instance(const char *pol_name, const char *sm_name)\n{\n    const status_manager_t *sm = load_status_manager(sm_name);\n    sm_instance_t *smi = NULL;\n    int i;\n\n    /* load_status_manager() checks that the status manager exists and load it\n     * if necessary. NULL means that it really isn't available. */\n    if (sm == NULL)\n        return NULL;\n\n    /* if it is shared, check if it is already instanciated */\n    if (sm_instance_exists(sm->flags & SM_SHARED, sm_name, pol_name, &smi))\n        return smi;\n\n    /* create an instance */\n    smi = calloc(1, sizeof(sm_instance_t));\n    if (smi == NULL)\n        return NULL;\n\n    smi->sm = sm;\n    smi->smi_index = sm_inst_count;\n\n    /* compute the offset of specific policy info in attribute structure */\n    if (sm_inst_count == 0)\n        smi->sm_info_offset = 0;\n    else\n        /* offset of smi info: previous attr count */\n        smi->sm_info_offset = sm_attr_count;\n\n    if (sm->flags & SM_SHARED)\n        /* If the status manager is shared between policies,\n         * it just consists of the status manager name. */\n        smi->instance_name = strdup(sm->name);\n    else    /* private status manager (1 instance per policy) */\n        /* same as <policy name>\\0 */\n        smi->instance_name = strdup(pol_name);\n\n    if (smi->instance_name == NULL)\n        goto out_free;\n\n    /* <instance_name>_status */\n    if (asprintf(&smi->db_field, \"%s_status\", smi->instance_name) < 0)\n        goto out_free;\n\n    if (asprintf(&smi->user_name, \"%s.status\", smi->instance_name) < 0)\n        goto out_free;\n\n    /* @TODO load its configuration */\n    /* @TODO initialize it */\n\n    /* check it fits into the status mask */\n    if (sm_inst_count + 1 >= MEMBER_SIZE(attr_mask_t, status) * CHAR_BIT) {\n        DisplayLog(LVL_CRIT, \"smi_create\",\n                   \"Too many status managers: max %lu supported\",\n                   MEMBER_SIZE(attr_mask_t, status) * CHAR_BIT);\n        goto out_free;\n    }\n\n    /* check it fits into the sm_info mask */\n    if (sm_attr_count + sm->nb_info >=\n        MEMBER_SIZE(attr_mask_t, sm_info) * CHAR_BIT) {\n        DisplayLog(LVL_CRIT, \"smi_create\",\n                   \"Too many policy-specific attributes: max %lu supported\",\n                   MEMBER_SIZE(attr_mask_t, sm_info) * CHAR_BIT);\n        goto out_free;\n    }\n\n    /* add it the the list of SMIs */\n    sm_inst_count++;\n    sm_inst = realloc(sm_inst, sm_inst_count * sizeof(sm_instance_t *));\n    if (sm_inst == NULL)\n        goto out_free;\n    sm_inst[sm_inst_count - 1] = smi;\n\n    /* register sm specific info */\n    sm_attr_count += sm->nb_info;\n    sm_attr_info =\n        realloc(sm_attr_info, sm_attr_count * sizeof(struct _sm_attr_info));\n    if (sm_attr_info == NULL)\n        goto out_free;\n\n    /* <instance_name>_<attr_name> */\n    for (i = 0; i < sm->nb_info; i++) {\n        int tgt_idx = sm_attr_count - sm->nb_info + i;\n\n        if (asprintf((char **)&sm_attr_info[tgt_idx].db_attr_name, \"%s_%s\",\n                     smi->instance_name, smi->sm->info_types[i].db_name) < 0)\n            goto out_free_attrs;\n        if (asprintf((char **)&sm_attr_info[tgt_idx].user_attr_name, \"%s.%s\",\n                     smi->instance_name, smi->sm->info_types[i].user_name) < 0)\n            goto out_free_attrs_db;\n        sm_attr_info[tgt_idx].def = &smi->sm->info_types[i];\n        sm_attr_info[tgt_idx].smi = smi;\n    }\n\n    return smi;\n out_free_attrs_db:\n    free((char **)sm_attr_info[sm_attr_count - sm->nb_info + i].db_attr_name);\n out_free_attrs:\n    /* start freeing from where we failed */\n    for (i--; i >= 0; i--) {\n        int tgt_idx = sm_attr_count - sm->nb_info + i;\n\n        free((char **)sm_attr_info[tgt_idx].db_attr_name);\n        free((char **)sm_attr_info[tgt_idx].user_attr_name);\n    }\n out_free:\n    if (smi) {\n        free(smi->user_name);\n        free(smi->db_field);\n        free(smi->instance_name);\n        free(smi);\n    }\n    return NULL;\n}\n\n/** get the Nth status manager instance */\nsm_instance_t *get_sm_instance(unsigned int n)\n{\n    if (n >= sm_inst_count)\n        return NULL;\n    else\n        return sm_inst[n];\n}\n\n/** get the constant string that match the input string */\nconst char *get_status_str(const status_manager_t *sm, const char *in_str)\n{\n    int i;\n\n    if (in_str == NULL || EMPTY_STRING(in_str))\n        return NULL;\n\n    for (i = 0; i < sm->status_count; i++) {\n        if (!strcmp(sm->status_enum[i], in_str))\n            return sm->status_enum[i];\n    }\n    /* not found */\n    return NULL;\n}\n\n/** return the list of allowed status for a status manager */\nchar *allowed_status_str(const status_manager_t *sm, char *buf, int sz)\n{\n    int i;\n    char *cur = buf;\n    buf[0] = '\\0';\n\n    rh_strncpy(cur, \"\\\"\\\"(empty)\", sz);\n    cur += strlen(cur);\n\n    for (i = 0; i < sm->status_count; i++) {\n        if (!EMPTY_STRING(buf)) {\n            *cur = ',';\n            cur++;\n        }\n        rh_strncpy(cur, sm->status_enum[i], sz - (ptrdiff_t) (cur - buf));\n        cur += strlen(cur);\n    }\n    return buf;\n}\n\n#ifdef HAVE_CHANGELOGS\nint run_all_cl_cb(const CL_REC_TYPE *logrec, const entry_id_t *id,\n                  const attr_set_t *attrs, attr_set_t *refreshed_attrs,\n                  attr_mask_t *status_need, uint32_t status_mask,\n                  proc_action_e *rec_action)\n{\n    int rc, err_max = 0;\n    int i = 0;\n    sm_instance_t *smi;\n\n    *rec_action = PROC_ACT_NONE;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        bool getstatus = false;\n        proc_action_e curr_action = PROC_ACT_NONE;\n\n        if (smi->sm->changelog_cb == NULL)\n            continue;\n\n        /* entry not in policy scope */\n        if ((SMI_MASK(i) & status_mask) == 0)\n            continue;\n\n        rc = smi->sm->changelog_cb(smi, logrec, id, attrs, refreshed_attrs,\n                                   &getstatus, &curr_action);\n        DisplayLog(LVL_DEBUG, __func__, \"changelog callback for \"\n                   \"status manager '%s' => rc=%d, action=%d\",\n                   smi->instance_name, rc, curr_action);\n\n        if (err_max == 0 || rc > err_max)\n            err_max = rc;\n\n        if (rc == 0) {\n            if (getstatus)\n                status_need->status |= SMI_MASK(i);\n\n            /* keep the action with the highest priority */\n            if (curr_action > *rec_action)\n                *rec_action = curr_action;\n        }\n    }\n    return err_max;\n}\n#endif\n\n/** When an entry is deleted, this function indicates what action is to be taken\n * by querying all status manager (remove from DB, move to softrm, ...)\n */\nproc_action_e match_all_softrm_filters(const entry_id_t *id,\n                                       const attr_set_t *attrs)\n{\n    int i = 0;\n    proc_action_e pa = PROC_ACT_RM_ALL; /* default is rm */\n    sm_instance_t *smi;\n\n    while ((smi = get_sm_instance(i)) != NULL) {\n        if (smi_manage_deleted(smi) && smi->sm->softrm_filter_func != NULL) {\n            proc_action_e curr_pa;\n\n            curr_pa = smi->sm->softrm_filter_func(smi, id, attrs);\n\n            /* keep the action with the highest priority */\n            if (curr_pa > pa)\n                pa = curr_pa;\n        }\n        i++;\n    }\n    return pa;\n}\n\n/** set status and attribute masks of status manager instances,\n * once they are all loaded */\nvoid smi_update_masks(void)\n{\n#define MASK_TAG \"smi_masks\"\n    int i = 0;\n    sm_instance_t *smi;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        /* now that all smi are loaded sm_inst_count is known.\n         * so we can compute the real attribute masks */\n        smi->status_mask_fresh =\n            actual_mask(smi, smi->sm->status_needs_attrs_fresh);\n        smi->status_mask_cached =\n            actual_mask(smi, smi->sm->status_needs_attrs_cached);\n\n        if (smi->sm->flags & SM_DELETED) {\n            smi->softrm_table_mask =\n                actual_mask(smi, smi->sm->softrm_table_mask);\n            smi->softrm_filter_mask =\n                actual_mask(smi, smi->sm->softrm_filter_mask);\n        }\n    }\n}\n\n/** initialize all status managers having init function */\nint smi_init_all(run_flags_t flags)\n{\n#define INIT_TAG \"smi_init\"\n    int rc;\n    int i = 0;\n    sm_instance_t *smi;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->init_func == NULL)\n            continue;\n\n        rc = smi->sm->init_func(smi, flags);\n        if (rc != 0) {\n            DisplayLog(LVL_CRIT, INIT_TAG,\n                       \"Failed to initialize status manager %s: error=%d\",\n                       smi->instance_name, rc);\n            return rc;\n        } else\n            DisplayLog(LVL_VERB, INIT_TAG,\n                       \"Status manager %s successfully initialized\",\n                       smi->instance_name);\n    }\n\n    return 0;\n}\n\nstatic void *smi_cfg_new(void)\n{\n    void **smi_cfg_tab;\n    sm_instance_t *smi;\n    int i;\n\n    smi_cfg_tab = calloc(sm_inst_count, sizeof(void *));\n    if (smi_cfg_tab == NULL)\n        return NULL;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs == NULL || smi->sm->cfg_funcs->new == NULL) {\n            smi_cfg_tab[i] = NULL;\n            continue;\n        }\n\n        smi_cfg_tab[i] = smi->sm->cfg_funcs->new();\n        if (smi_cfg_tab[i] == NULL)\n            goto reverse_free;\n    }\n\n    return smi_cfg_tab;\n\n reverse_free:\n    /* allocation failed for last 'i' (do nothing if it was the first) */\n    while (i != 0) {\n        i--;\n        smi = get_sm_instance(i);\n        if (smi->sm->cfg_funcs == NULL || smi->sm->cfg_funcs->free == NULL) {\n            smi_cfg_tab[i] = NULL;\n            continue;\n        }\n        smi->sm->cfg_funcs->free(smi_cfg_tab[i]);\n        smi_cfg_tab[i] = NULL;\n    }\n    free(smi_cfg_tab);\n\n    return NULL;\n}\n\nstatic void smi_cfg_free(void *arg)\n{\n    void **smi_cfg_tab = arg;\n    sm_instance_t *smi;\n    int i;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs != NULL && smi->sm->cfg_funcs->free != NULL)\n            smi->sm->cfg_funcs->free(smi_cfg_tab[i]);\n        smi_cfg_tab[i] = NULL;\n    }\n    free(smi_cfg_tab);\n}\n\nstatic void smi_cfg_set_default(void *arg)\n{\n    void **smi_cfg_tab = arg;\n    sm_instance_t *smi;\n    int i;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs != NULL\n            && smi->sm->cfg_funcs->set_default != NULL)\n            smi->sm->cfg_funcs->set_default(smi_cfg_tab[i]);\n    }\n}\n\nstatic int smi_cfg_read(config_file_t config, void *cfg, char *msg_out)\n{\n    void **smi_cfg_tab = cfg;\n    sm_instance_t *smi;\n    int i, rc;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs == NULL || smi->sm->cfg_funcs->read == NULL)\n            continue;\n\n        DisplayLog(LVL_DEBUG, \"smi_cfg\", \"Loading status manager '%s' config\",\n                   smi->instance_name);\n\n        rc = smi->sm->cfg_funcs->read(config, smi_cfg_tab[i], msg_out);\n        if (rc != 0)\n            return rc;\n    }\n    return 0;\n}\n\nstatic int smi_cfg_set(void *cfg, bool reload)\n{\n    void **smi_cfg_tab = cfg;\n    sm_instance_t *smi;\n    int i, rc;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs == NULL\n            || smi->sm->cfg_funcs->set_config == NULL)\n            continue;\n\n        rc = smi->sm->cfg_funcs->set_config(smi_cfg_tab[i], reload);\n        if (rc != 0)\n            return rc;\n    }\n    return 0;\n}\n\nstatic void smi_cfg_write_default(FILE *f)\n{\n    sm_instance_t *smi;\n    int i;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs != NULL\n            && smi->sm->cfg_funcs->write_default != NULL)\n            smi->sm->cfg_funcs->write_default(f);\n    }\n}\n\nstatic void smi_cfg_write_template(FILE *f)\n{\n    sm_instance_t *smi;\n    int i;\n\n    for (i = 0, smi = get_sm_instance(i); smi != NULL;\n         i++, smi = get_sm_instance(i)) {\n        if (smi->sm->cfg_funcs != NULL\n            && smi->sm->cfg_funcs->write_template != NULL)\n            smi->sm->cfg_funcs->write_template(f);\n    }\n}\n\n/** wraps config handlers for all status managers */\nmod_cfg_funcs_t smi_cfg_hdlr = {\n    .module_name = \"status managers\",\n    .new = smi_cfg_new,\n    .free = smi_cfg_free,\n    .set_default = smi_cfg_set_default,\n    .read = smi_cfg_read,\n    .set_config = smi_cfg_set,\n    .write_default = smi_cfg_write_default,\n    .write_template = smi_cfg_write_template,\n};\n"
  },
  {
    "path": "src/robinhood/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\nAM_LDFLAGS= -lpthread -rpath $(pkglibdir)\n\n#if LUSTRE\n#LVERSION=`rpm -qa \"lustre[-_]modules*\" --qf \"%{Version}\\n\" | tail -1`\n#FS_CFLAGS=-DLUSTRE_VERSION=\\\"$(LVERSION)\\\"\n#endif\n\nDATE=`date --utc --date @$${SOURCE_DATE_EPOCH:-$$(date +%s)} '+%F %T'`\nMISC_FLAGS=\"-DCOMPIL_DATE=\\\"$(DATE)\\\"\"\n\nnoinst_LTLIBRARIES=librbhhelpers.la\nlibrbhhelpers_la_SOURCES=cmd_helpers.h cmd_helpers.c\nlibrbhhelpers_la_LDFLAGS=-static\n\nall_libs=   ../cfg_parsing/librbhcfg.la         \\\n            ../fs_scan/libfsscan.la             \\\n            ../entry_processor/libentryproc.la  \\\n            ../policies/libpolicies.la\n\nif CHANGELOGS\nall_libs += ../chglog_reader/libchglog_rd.la\nendif\n\nall_libs += ./librbhhelpers.la ../list_mgr/liblistmgr.la \\\n            ../common/libcommontools.la ../cfg_parsing/libconfigparsing.la\n\n#sbin_PROGRAMS=robinhood rbh-report rbh-diff rbh-recov rbh-undelete rbh-import rbh-rebind\nsbin_PROGRAMS=robinhood rbh-report rbh-diff rbh-undelete rbh-rebind\nbin_PROGRAMS=rbh-find rbh-du\n\n# dependencies:\nrobinhood_DEPENDENCIES=$(all_libs)\nrbh_report_DEPENDENCIES=$(all_libs)\nrbh_du_DEPENDENCIES=$(all_libs)\nrbh_find_DEPENDENCIES=$(all_libs)\nrbh_diff_DEPENDENCIES=$(all_libs)\n#rbh_recov_DEPENDENCIES=$(all_libs)\nrbh_undelete_DEPENDENCIES=$(all_libs)\n#rbh_import_DEPENDENCIES=$(all_libs)\nrbh_rebind_DEPENDENCIES=$(all_libs)\n#\nrobinhood_SOURCES=rbh_daemon.c\nrobinhood_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrobinhood_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n\nrbh_report_SOURCES=rbh_report.c\nrbh_report_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_report_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n\nrbh_find_SOURCES=rbh_find.c rbh_find_printf.c rbh_find.h\nrbh_find_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_find_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n\nrbh_du_SOURCES=rbh_du.c\nrbh_du_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_du_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n\nrbh_diff_SOURCES=rbh_diff.c\nrbh_diff_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_diff_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAG) $(AM_LDFLAGS)\n\n#rbh_recov_SOURCES=rbh_recov.c\n#rbh_recov_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\n#rbh_recov_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n#\nrbh_undelete_SOURCES=rbh_undelete.c\nrbh_undelete_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_undelete_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n#\n#rbh_import_SOURCES=rbh_import.c\n#rbh_import_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\n#rbh_import_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n#\nrbh_rebind_SOURCES=rbh_rebind.c\nrbh_rebind_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nrbh_rebind_LDFLAGS=-rdynamic $(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS) $(AM_LDFLAGS)\n\nnew: clean all\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/robinhood/cmd_helpers.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n * Copyright 2013 Cray Inc. All Rights Reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for recovering filesystem content after a disaster (backup flavor)\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <glib.h>\n#include <unistd.h>\n\n#include \"uidgidcache.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"xplatform_print.h\"\n#include \"status_manager.h\"\n\n#define SCRUB_TAG \"Scrubber\"\n#define P2ID_TAG \"Path2Id\"\n\n/* Initially empty array. This is a LIFO array; oldest elements are\n * stacked from the last entry to the first. When element 0 is\n * occupied, it is time to increase the size of the array. */\nstatic wagon_t *dir_array = NULL;\nstatic unsigned int array_len;   /* number of elements in array. */\nstatic unsigned int array_first; /* index of first valid element in array. */\n#define array_used (array_len-array_first)\n\n#define LS_CHUNK    1  /* TODO - ListMgr_GetChild will get confused if not 1 */\n\nstatic size_t what_2_power(size_t s)\n{\n    size_t c = 1;\n    while (c < s)\n        c <<= 1;\n    return c;\n}\n\n/* Copy the ids and names array. */\nstatic void copy_arrays(const wagon_t *src,\n                        wagon_t *dest, int dst_first, int count)\n{\n    int src_first = 0;\n\n    while (count) {\n        dest[dst_first].id = src[src_first].id;\n        dest[dst_first].fullname = strdup(src[src_first].fullname);\n\n        src_first++;\n        dst_first++;\n        count--;\n    }\n}\n\n/** add a list of ids to the scrubbing array */\nstatic int add_id_list(const wagon_t *list, unsigned int count)\n{\n    /* always add at the beginning to have LIFO behavior */\n\n    /* is there enough room before the first item ? */\n    if (count <= array_first) {\n        /* copy it just before the head (entries must be consecutive) */\n        copy_arrays(list, dir_array, array_first - count, count);\n\n        array_first -= count;\n\n#ifdef _DEBUG_ID_LIST\n        printf(\"1)...<new_ids:%u-%u><ids:%u-%u>...(len=%u)\\n\",\n               array_first, array_first + count - 1,\n               array_first + count, array_len - 1, array_len);\n#endif\n    }\n    /* is the array empty ? */\n    else if ((array_used == 0) && (count <= array_len)) {\n        /* copy from the beginning */\n        copy_arrays(list, dir_array, array_len - count, count);\n        array_first = array_len - count;\n\n#ifdef _DEBUG_ID_LIST\n        printf(\"2) <new_ids:%u-%u>...(len=%u)\\n\",\n               array_first, array_len - 1, array_len);\n#endif\n    } else {    /* increase array size */\n\n        wagon_t *dir_array_new;\n        size_t new_len = what_2_power(array_len + count);\n\n        dir_array_new = MemAlloc(new_len * sizeof(wagon_t));\n        if (!dir_array_new)\n            return -ENOMEM;\n\n        /* First, transfer current ids and names */\n        if (dir_array) {\n            if (array_used)\n                memcpy(&dir_array_new[new_len - array_used],\n                       &dir_array[array_first], array_used * sizeof(wagon_t));\n            MemFree(dir_array);\n        }\n\n        /* update array info */\n        dir_array = dir_array_new;\n        array_first = new_len - array_used;\n        array_len = new_len;\n\n        /* Then copy new ids */\n        copy_arrays(list, dir_array, array_first - count, count);\n        array_first -= count;\n\n#ifdef _DEBUG_ID_LIST\n        printf(\"3)...<ids:%u-%u>...(len=%u)\\n\",\n               array_first, array_len - 1, array_len);\n#endif\n    }\n    return 0;\n}\n\n/** release a list of ids from the array */\nstatic inline void rbh_scrub_release_list(unsigned int first,\n                                          unsigned int count)\n{\n    free_wagon(dir_array, first, first + count);\n\n    if (first != array_first)\n        DisplayLog(LVL_CRIT, SCRUB_TAG,\n                   \"IMPLEMENTATION ISSUE: array_first was %u, is now %u\\n\",\n                   first, array_first);\n    array_first += count;\n\n#ifdef _DEBUG_ID_LIST\n    printf(\"released %u-%u\\n\", array_first - count, array_first - 1);\n#endif\n}\n\n/** scan sets of directories\n * \\param cb_func, callback function for each set of directory\n */\nint rbh_scrub(lmgr_t *p_mgr, const wagon_t *id_list,\n              unsigned int id_count, attr_mask_t dir_attr_mask,\n              scrub_callback_t cb_func, void *arg)\n{\n    wagon_t *curr_array;\n    unsigned int count;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    int i, rc;\n    int last_err = 0;\n\n    rc = add_id_list(id_list, id_count);\n    if (rc)\n        return rc;\n\n    /* only get subdirs (for scanning) */\n    fv.value.val_str = STR_TYPE_DIR;\n    lmgr_simple_filter_init(&filter);\n    lmgr_simple_filter_add(&filter, ATTR_INDEX_type, EQUAL, fv, 0);\n\n    /* while the array is not empty */\n    while (array_used > 0) {\n        unsigned int res_count = 0;\n        wagon_t *child_ids;\n        attr_set_t *child_attrs;\n\n        /* get a set of entry_ids */\n        curr_array = &dir_array[array_first];\n        if (array_used < LS_CHUNK) {\n            /* get all available dirs */\n            count = array_used;\n        } else {\n            /* get a constant chunk */\n            count = LS_CHUNK;\n        }\n\n#ifdef _DEBUG_ID_LIST\n        printf(\"processing %u-%u\\n\", array_first, array_first + count - 1);\n#endif\n\n        /* read childs */\n        res_count = 0;\n        child_ids = NULL;\n        child_attrs = NULL;\n\n        rc = ListMgr_GetChild(p_mgr, &filter, curr_array, count, dir_attr_mask,\n                              &child_ids, &child_attrs, &res_count);\n\n        if (rc) {\n            DisplayLog(LVL_CRIT, SCRUB_TAG,\n                       \"ListMgr_GetChild() terminated with error %d\", rc);\n            /* @TODO free allocated resources */\n            break;\n        }\n\n        /* Call the callback func for each listed dir */\n        rc = cb_func(child_ids, child_attrs, res_count, arg);\n        if (rc)\n            /* XXX break the scan? */\n            last_err = rc;\n\n        /* can release the list of input ids */\n        rbh_scrub_release_list(array_first, count);\n\n        /* copy entry ids before freeing them */\n        /* TODO: we could transfer the pathname instead of strdup() them. */\n        add_id_list(child_ids, res_count);\n\n        /* attributes no longer needed */\n        /* release attrs */\n        if (child_attrs) {\n            for (i = 0; i < res_count; i++)\n                ListMgr_FreeAttrs(&child_attrs[i]);\n            MemFree(child_attrs);\n            child_attrs = NULL;\n        }\n\n        /* free the returned id array */\n        if (child_ids) {\n            free_wagon(child_ids, 0, res_count);\n            MemFree(child_ids);\n            child_ids = NULL;\n        }\n    }\n    lmgr_simple_filter_free(&filter);\n\n    return last_err;\n}\n\nint Path2Id(const char *path, entry_id_t *id)\n{\n    int rc;\n    unsigned int len;\n    char rpath[RBH_PATH_MAX];\n    const char *mnt;\n    char *tmp_path;\n\n    mnt = get_mount_point(&len);\n    tmp_path = realpath(path, NULL);\n\n    if (tmp_path == NULL) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, P2ID_TAG, \"Error in realpath(%s): %s\",\n                   path, strerror(-rc));\n        return rc;\n    }\n    if (strlen(tmp_path) >= RBH_PATH_MAX) {\n        free(tmp_path);\n        DisplayLog(LVL_CRIT, P2ID_TAG, \"Path length is too long!\");\n        return -ENAMETOOLONG;\n    }\n    /* safe because of previous check */\n    strcpy(rpath, tmp_path);\n    /* now can release tmp path */\n    free(tmp_path);\n\n    /* check that path is under FS root */\n    if (strncmp(mnt, rpath, len)) {\n        /* if path differs from realpath, display both */\n        if (strcmp(path, rpath))\n            DisplayLog(LVL_CRIT, P2ID_TAG,\n                       \"Error: %s (%s) is not under filesystem root %s\", path,\n                       rpath, mnt);\n        else\n            DisplayLog(LVL_CRIT, P2ID_TAG,\n                       \"Error: %s is not under filesystem root %s\", path, mnt);\n        return -EINVAL;\n    }\n\n    rc = path2id(path, id, NULL);\n    return rc;\n}\n\nstruct __diffattr {\n    attr_mask_t mask;   /* 0 for last */\n    char *name; /* NULL for last */\n    int negate; /* negate the given mask */\n} diffattrs[] = {\n    {{.std = ATTR_MASK_fullpath | ATTR_MASK_parent_id | ATTR_MASK_name},\n      \"path\", 0},\n    {{.std = POSIX_ATTR_MASK | ATTR_MASK_link}, \"posix\", 0},\n#ifdef _LUSTRE\n    {{.std = ATTR_MASK_stripe_info | ATTR_MASK_stripe_items}, \"stripe\", 0},\n#endif\n    {{.std = ATTR_MASK_fullpath | ATTR_MASK_name | ATTR_MASK_parent_id\n#ifdef _LUSTRE\n                | ATTR_MASK_stripe_info | ATTR_MASK_stripe_items\n#endif\n                | POSIX_ATTR_MASK | ATTR_MASK_link | ATTR_MASK_creation_time,\n            .status = SMI_MASK(0), /* stands for all status */\n            .sm_info = GENERIC_INFO_BIT(0), /* stands for all policy specific\n                                                info */\n        },\"all\", 0},\n    {{.status = SMI_MASK(0)} /* stands for all status */ , \"status\", 0},\n    {{.std = ATTR_MASK_last_mod | ATTR_MASK_last_access\n             | ATTR_MASK_creation_time}, \"notimes\", 1},\n    {{.std = ATTR_MASK_last_access}, \"noatime\", 1},\n    {{0}, NULL, 0}\n};\n\n/* parse attrset for --diff option */\nint parse_diff_mask(const char *arg, attr_mask_t *diff_mask, char *msg)\n{\n    attr_mask_t mask_pos = null_mask;\n    attr_mask_t mask_neg = null_mask;\n    struct __diffattr *attr;\n    char buff[4096];\n    char *curr, *init;\n\n    /* tmp copy of argument */\n    rh_strncpy(buff, arg, 4096);\n    init = buff;\n\n    while ((curr = strtok(init, \",\")) != NULL) {\n        init = NULL;\n        int found = 0;\n        for (attr = diffattrs; attr->name != NULL; attr++) {\n            if (!strcasecmp(attr->name, curr)) {\n                found = 1;\n                if (attr->negate)\n                    mask_neg = attr_mask_or(&mask_neg, &attr->mask);\n                else\n                    mask_pos = attr_mask_or(&mask_pos, &attr->mask);\n            }\n        }\n        if (!found) {\n            sprintf(msg, \"invalid diff attr '%s'\", curr);\n            return -EINVAL;\n        }\n    }\n\n    *diff_mask = attr_mask_and_not(&mask_pos, &mask_neg);\n    return 0;\n}\n\nint parse_status_arg(const char *option, char *arg, char **p_st_name,\n                     char **p_st_val, bool mandatory_value)\n{\n    int   sn_len;\n    char *delim;\n\n    if (!arg) {\n        fprintf(stderr, \"Missing mandatory argument <status_name>%s for %s\\n\",\n                mandatory_value ? \":<status_value>\" : \"[:<status_value>]\",\n                option);\n        return EINVAL;\n    }\n\n    /* the expected argument is <status_name>:<status_value> */\n    delim = strchr(arg, ':');\n    if (delim == NULL && mandatory_value) {\n        fprintf(stderr, \"Invalid argument for %s: \"\n                \"<status_name>:<status_value> expected\\n\", option);\n        return EINVAL;\n    }\n    *p_st_name = arg;\n\n    if (delim != NULL) {\n        *delim = '\\0';\n        *p_st_val = delim + 1;\n    } else\n        *p_st_val = NULL;\n\n    if (EMPTY_STRING(*p_st_name)) {\n        fprintf(stderr, \"Invalid argument for %s: <status_name>%s expected\\n\",\n                option,\n                mandatory_value ? \":<status_value>\" : \"[:<status_value>]\");\n        return EINVAL;\n    }\n\n    /* if status_name ends with \"_status\", remove it */\n    sn_len = strlen(*p_st_name);\n    if ((sn_len > 7) && !strcmp((*p_st_name) + sn_len - 7, \"_status\"))\n        *((char *)((*p_st_name) + sn_len - 7)) = '\\0';\n\n    return 0;\n}\n\nint check_status_args(const char *status_name, const char *status_value,\n                      const char **str_val_new, sm_instance_t **p_smi)\n{\n    /* resolve the status name now, as config file has been parsed */\n    *str_val_new = status_value;\n    char buff[1024];\n\n    /* get status index by name */\n    *p_smi = smi_by_name(status_name);\n    if (*p_smi == NULL) {\n        int idx;\n        /* try with a policy name */\n        if (policy_exists(status_name, &idx)) {\n            (*p_smi) = policies.policy_list[idx].status_mgr;\n            if (*p_smi == NULL) {\n                fprintf(stderr, \"ERROR: policy '%s' doesn't manage status\\n\",\n                        status_name);\n                return -ENOENT;\n            }\n        } else {\n            fprintf(stderr,\n                    \"ERROR: status manager or policy '%s' is not defined\\n\",\n                    status_name);\n            return -EINVAL;\n        }\n    }\n\n    /* check status value */\n    if (status_value && !EMPTY_STRING(status_value)) {\n        *str_val_new = get_status_str((*p_smi)->sm, status_value);\n        if (*str_val_new == NULL) {\n            fprintf(stderr, \"ERROR: unexpected value for %s_status: '%s'.\"\n                    \" Expected values are: %s\\n\", status_name, status_value,\n                    allowed_status_str((*p_smi)->sm, buff, sizeof(buff)));\n            return -EINVAL;\n        }\n    }\n\n    return 0;\n}\n\nstatic const char *print_sm_attr(char *out, size_t out_sz, const void *pvalue,\n                                 cfg_param_type type, bool csv)\n{\n    switch (type) {\n    case PT_STRING:\n        return (char *)pvalue;\n\n    case PT_BOOL:\n        {\n            const bool *b = pvalue;\n            return bool2str(*b);\n        }\n    case PT_DURATION:\n        {\n            struct tm stm;\n            /* dates managed as 32bits */\n            time_t tt = *((unsigned int *)pvalue);\n            if (tt == 0)\n                return \"0\";\n            strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n            return out;\n        }\n    case PT_SIZE:\n        {\n            const uint64_t *s = pvalue;\n            if (csv)\n                snprintf(out, out_sz, \"%\" PRIu64, *s);\n            else\n                FormatFileSize(out, out_sz, *s);\n            return out;\n        }\n    case PT_INT:\n        {\n            const int32_t *i = pvalue;\n            snprintf(out, out_sz, \"%d\", *i);\n            return out;\n        }\n    case PT_INT64:\n        {\n            const int64_t *l = pvalue;\n            snprintf(out, out_sz, \"%\" PRId64, *l);\n            return out;\n        }\n    case PT_FLOAT:\n        {\n            /* no such type in DB */\n            const float *f = pvalue;\n            snprintf(out, out_sz, \"%.2f\", *f);\n            return out;\n        }\n    default:\n        return \"unknown/unhandled type\";\n    }\n}\n\n/** print an attribute from attrs structure */\nconst char *attr2str(attr_set_t *attrs, const entry_id_t *id,\n                     unsigned int attr_index, int csv, name_func name_resolver,\n                     char *out, size_t out_sz)\n{\n    time_t tt;\n    struct tm stm;\n\n    /* if attr is not set in mask, print nothing */\n    if (attr_index != ATTR_INDEX_fullpath   /* specific case */\n        && attr_index != ATTR_INDEX_ID\n        && !attr_mask_test_index(&attrs->attr_mask, attr_index))\n        return \"\";\n\n    if (is_status(attr_index))\n        return STATUS_ATTR(attrs, attr2status_index(attr_index));\n    else if (is_sm_info(attr_index)) {\n        unsigned int idx = attr2sminfo_index(attr_index);\n\n        return print_sm_attr(out, out_sz, attrs->attr_values.sm_info[idx],\n                             sm_attr_info[idx].def->crit_type, csv);\n    }\n\n    switch (attr_index) {\n    case ATTR_INDEX_ID:\n        snprintf(out, out_sz, DFID, PFID(id));\n        return out;\n\n    case ATTR_INDEX_fullpath:\n        if (ATTR_MASK_TEST(attrs, fullpath))\n            return ATTR(attrs, fullpath);\n        else if (name_resolver != NULL)\n            return name_resolver(id, attrs, out);\n        else\n            return \"n/a\";   /* TODO fid2path if possible? */\n    case ATTR_INDEX_avgsize:\n        if (csv)\n            snprintf(out, out_sz, \"%\" PRIu64, ATTR(attrs, avgsize));\n        else\n            FormatFileSize(out, out_sz, ATTR(attrs, avgsize));\n        return out;\n    case ATTR_INDEX_dircount:\n        snprintf(out, out_sz, \"%u\", ATTR(attrs, dircount));\n        return out;\n    case ATTR_INDEX_parent_id:\n        snprintf(out, out_sz, DFID, PFID(&ATTR(attrs, parent_id)));\n        return out;\n\n    case ATTR_INDEX_link:\n        return ATTR(attrs, link);\n\n    case ATTR_INDEX_type:\n        return ATTR(attrs, type);\n    case ATTR_INDEX_nlink:\n        snprintf(out, out_sz, \"%u\", ATTR(attrs, nlink));\n        return out;\n\n    case ATTR_INDEX_depth:\n        snprintf(out, out_sz, \"%u\", ATTR(attrs, depth));\n        return out;\n\n    case ATTR_INDEX_name:\n        return ATTR(attrs, name);\n\n    case ATTR_INDEX_mode:\n        if (csv)\n            snprintf(out, out_sz, \"%#03o\", ATTR(attrs, mode));\n        else {\n            memset(out, 0, out_sz);\n            mode_string(ATTR(attrs, mode), out);\n        }\n        return out;\n\n    case ATTR_INDEX_uid:\n        if (global_config.uid_gid_as_numbers) {\n            snprintf(out, out_sz, \"%d\", ATTR(attrs, uid).num);\n            return out;\n        } else {\n            return ATTR(attrs, uid).txt;\n        }\n\n    case ATTR_INDEX_gid:\n        if (global_config.uid_gid_as_numbers) {\n            snprintf(out, out_sz, \"%d\", ATTR(attrs, gid).num);\n            return out;\n        } else {\n            return ATTR(attrs, gid).txt;\n        }\n\n    case ATTR_INDEX_blocks:\n        if (csv)\n            snprintf(out, out_sz, \"%\" PRIu64, ATTR(attrs, blocks) * DEV_BSIZE);\n        else\n            FormatFileSize(out, out_sz, ATTR(attrs, blocks) * DEV_BSIZE);\n        return out;\n\n    case ATTR_INDEX_size:\n        if (csv)\n            snprintf(out, out_sz, \"%\" PRIu64, ATTR(attrs, size));\n        else\n            FormatFileSize(out, out_sz, ATTR(attrs, size));\n        return out;\n\n    case ATTR_INDEX_last_access:\n        tt = ATTR(attrs, last_access);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_last_mod:\n        tt = ATTR(attrs, last_mod);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_last_mdchange:\n        tt = ATTR(attrs, last_mdchange);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_creation_time:\n        tt = ATTR(attrs, creation_time);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_rm_time:\n        tt = ATTR(attrs, rm_time);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_md_update:\n        tt = ATTR(attrs, md_update);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_path_update:\n        tt = ATTR(attrs, path_update);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n    case ATTR_INDEX_fileclass:\n        return ATTR(attrs, fileclass);\n\n    case ATTR_INDEX_class_update:\n        tt = ATTR(attrs, class_update);\n        strftime(out, out_sz, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        return out;\n\n#ifdef ATTR_INDEX_invalid\n    case ATTR_INDEX_invalid:\n        return ATTR(attrs, invalid) ? \"yes\" : \"no\";\n#endif\n\n#ifdef _LUSTRE\n    case ATTR_INDEX_stripe_info:\n        if (csv)\n            snprintf(out, out_sz, \"%10u, %11\" PRIu64 \", %9s\",\n                     ATTR(attrs, stripe_info).stripe_count,\n                     ATTR(attrs, stripe_info).stripe_size,\n                     ATTR(attrs, stripe_info).pool_name);\n        else {\n            char tmp[128];\n            FormatFileSize(tmp, sizeof(tmp),\n                           ATTR(attrs, stripe_info).stripe_size);\n            sprintf(out, \"%10u, %11s, %9s\",\n                    ATTR(attrs, stripe_info).stripe_count, tmp,\n                    ATTR(attrs, stripe_info).pool_name);\n        }\n        return out;\n\n    case ATTR_INDEX_stripe_items:\n        {\n            GString *osts = g_string_new(\"\");\n\n            append_stripe_list(osts, &ATTR(attrs, stripe_items), csv);\n            rh_strncpy(out, osts->str, out_sz);\n            g_string_free(osts, TRUE);\n\n            return out;\n        }\n#endif\n    }\n    return \"?\";\n}\n\n/** display helper type */\ntypedef const char *(*result2str_func) (const db_value_t *val, bool csv,\n                                        char *out, size_t out_sz);\n\n/** display helper functions */\nstatic const char *print_res_status(const db_value_t *val, bool csv,\n                                    char *out, size_t out_sz)\n{\n    return status_format(val->value_u.val_str);\n}\n\nstatic const char *print_res_sm_info(const db_value_t *val, bool csv,\n                                     char *out, size_t out_sz)\n{\n    GString *gs = g_string_new(\"\");\n\n    ListMgr_PrintAttr(gs, val->type, &val->value_u, \"\");\n    rh_strncpy(out, gs->str, out_sz);\n    g_string_free(gs, TRUE);\n\n    return out;\n}\n\nstatic const char *print_res_class(const db_value_t *val, bool csv,\n                                   char *out, size_t out_sz)\n{\n    return class_format(val->value_u.val_str);\n}\n\nstatic const char *print_res_count(const db_value_t *val, bool csv,\n                                   char *out, size_t out_sz)\n{\n    snprintf(out, out_sz, \"%llu\", val->value_u.val_biguint);\n    return out;\n}\n\nstatic const char *print_res_int(const db_value_t *val, bool csv,\n                                 char *out, size_t out_sz)\n{\n    snprintf(out, out_sz, \"%d\", val->value_u.val_int);\n    return out;\n}\n\nstatic const char *print_res_string(const db_value_t *val, bool csv,\n                                    char *out, size_t out_sz)\n{\n    rh_strncpy(out, val->value_u.val_str, out_sz);\n    return out;\n}\n\nstatic const char *print_res_size(const db_value_t *val, bool csv,\n                                  char *out, size_t out_sz)\n{\n    if (csv)\n        snprintf(out, out_sz, \"%llu\", val->value_u.val_biguint);\n    else\n        FormatFileSize(out, out_sz, val->value_u.val_biguint);\n\n    return out;\n}\n\nstatic const char *print_res_space(const db_value_t *val, bool csv,\n                                   char *out, size_t out_sz)\n{\n    if (csv)\n        snprintf(out, out_sz, \"%llu\", val->value_u.val_biguint * DEV_BSIZE);\n    else\n        FormatFileSize(out, out_sz, val->value_u.val_biguint * DEV_BSIZE);\n\n    return out;\n}\n\nstatic const char *print_res_empty(const db_value_t *val, bool csv,\n                                   char *out, size_t out_sz)\n{\n    out[0] = '\\0';\n    return out;\n}\n\n/** attribute display specification for reports */\nstatic struct attr_display_spec {\n    int attr_index;\n    const char *name;\n    unsigned int length_csv;\n    unsigned int length_full;\n    result2str_func result2str;\n} attr[] = {\n        {ATTR_INDEX_fullpath,  \"path\", 40, 40},\n        {ATTR_INDEX_name,      \"name\", 10, 10},\n        {ATTR_INDEX_depth,     \"depth\", 3, 3},\n        {ATTR_INDEX_dircount,  \"dircount\", 8, 8},\n        {ATTR_INDEX_type,      \"type\", 8, 8, print_res_string},\n        {ATTR_INDEX_mode,      \"mode\", 4, 6},\n        {ATTR_INDEX_nlink,     \"nlink\", 5, 5},\n        {ATTR_INDEX_parent_id, \"parent_id\", 20, 20},\n        {ATTR_INDEX_uid,       \"user\", 10, 10,  print_res_string},\n        {ATTR_INDEX_gid,       \"group\", 10, 10, print_res_string},\n        {ATTR_INDEX_projid,    \"projid\", 10, 10, print_res_int},\n        {ATTR_INDEX_link,      \"link\", 20, 20},\n        {ATTR_INDEX_fileclass, \"fileclass\", 30, 30, print_res_class},\n        /* times */\n        {ATTR_INDEX_last_access,   \"last_access\", 20, 20},\n        {ATTR_INDEX_last_mod,      \"last_mod\", 20, 20},\n        {ATTR_INDEX_last_mdchange, \"last_mdchange\", 20, 20},\n        {ATTR_INDEX_creation_time, \"creation\", 20, 20},\n        {ATTR_INDEX_rm_time,       \"rm_time\", 20, 20},\n        {ATTR_INDEX_md_update,     \"md updt\", 20, 20},\n        {ATTR_INDEX_path_update,   \"path updt\", 20, 20},\n        {ATTR_INDEX_class_update,  \"class updt\", 20, 20},\n        /* sizes */\n        /* 15 digits for 999To, 10 chars for 1024.21 GB */\n        {ATTR_INDEX_blocks,    \"spc_used\", 15, 10, print_res_space},\n        {ATTR_INDEX_avgsize,   \"avgsize\", 15, 10},\n        {ATTR_INDEX_size,      \"size\", 15, 10, print_res_size},\n\n#ifdef ATTR_INDEX_invalid\n        {ATTR_INDEX_invalid, \"invalid\", 3, 3}, /* yes/no */\n#endif\n#define STRIPE_TITLE \"stripe_cnt, stripe_size,      pool\"\n        {ATTR_INDEX_stripe_info,  STRIPE_TITLE, sizeof(STRIPE_TITLE),\n                                                sizeof(STRIPE_TITLE)},\n        {ATTR_INDEX_stripe_items, \"stripes\", 30, 30},\n        {ATTR_INDEX_COUNT,        \"count\", 10, 10, print_res_count},\n        {ATTR_INDEX_ID,           \"id\", 25, 25},\n\n        {0, NULL, 0, 0}, /* final element */\n};\n\nstatic inline struct attr_display_spec *attr_info(int index)\n{\n    int i;\n    static struct attr_display_spec tmp_rec = { -3, \"?\", 1, 1, NULL };\n    static bool init = false;\n\n    if (!init) {\n        init = true;\n\n        if (global_config.uid_gid_as_numbers) {\n            /* Change the function to print the UID/GID, as the\n             * argument is a number, not a string. */\n            for (i = 0; attr[i].name != NULL; i++)\n                if (attr[i].attr_index == ATTR_INDEX_uid ||\n                    attr[i].attr_index == ATTR_INDEX_gid)\n                    attr[i].result2str = print_res_int;\n        }\n    }\n\n    if (is_status(index)) {\n        /* build a special descriptor (/!\\ not reentrant) */\n        tmp_rec.attr_index = index;\n        tmp_rec.name = get_sm_instance(attr2status_index(index))->user_name;\n        tmp_rec.length_csv = tmp_rec.length_full = 15;\n        tmp_rec.result2str = print_res_status;\n        return &tmp_rec;\n    } else if (is_sm_info(index)) {\n        /* build a special descriptor (/!\\ not reentrant) */\n        tmp_rec.attr_index = index;\n        tmp_rec.name = sm_attr_info[attr2sminfo_index(index)].user_attr_name;\n        tmp_rec.length_csv = tmp_rec.length_full = 15;\n        tmp_rec.result2str = print_res_sm_info;\n        return &tmp_rec;\n    }\n\n    for (i = 0; attr[i].name != NULL; i++)\n        if (attr[i].attr_index == index)\n            return &attr[i];\n\n    tmp_rec.attr_index = index;\n    tmp_rec.result2str = print_res_empty;\n    return &tmp_rec;\n}\n\nstatic inline int rec_len(struct attr_display_spec *rec, bool csv)\n{\n    return csv ? rec->length_csv : rec->length_full;\n}\n\nconst char *attrindex2name(unsigned int index)\n{\n    int i;\n\n    if (is_status(index))\n        return get_sm_instance(attr2status_index(index))->user_name;\n    else if (is_sm_info(index))\n        return sm_attr_info[attr2sminfo_index(index)].user_attr_name;\n\n    for (i = 0; attr[i].name != NULL; i++)\n        if (attr[i].attr_index == index)\n            return attr[i].name;\n\n    return \"?\";\n}\n\nunsigned int attrindex2len(unsigned int index, int csv)\n{\n    int i;\n\n    if (is_status(index))\n        return 15;\n    else if (is_sm_info(index))\n        return 30;\n\n    for (i = 0; attr[i].name != NULL; i++)\n        if (attr[i].attr_index == index)\n            return csv ? attr[i].length_csv : attr[i].length_full;\n\n    return 1;   /* for '?' */\n}\n\n#define PROF_CNT_LEN     8\n#define PROF_RATIO_LEN   7\n\n/** standard attribute display for reports */\nvoid print_attr_list_custom(int rank_field, unsigned int *attr_list,\n                            int attr_count, profile_field_descr_t *p_profile,\n                            bool csv, const char *custom_title, int custom_len)\n{\n    int i;\n    int coma = 0;\n    struct attr_display_spec *rec;\n\n    if (rank_field) {\n        printf(\"rank\");\n        coma = 1;\n    }\n    for (i = 0; i < attr_count; i++) {\n        rec = attr_info(attr_list[i]);\n        if (coma)\n            printf(\", %*s\", rec_len(rec, csv), rec->name);\n        else {\n            printf(\"%*s\", rec_len(rec, csv), rec->name);\n            coma = 1;\n        }\n    }\n    if (p_profile) {\n        if (p_profile->attr_index == ATTR_INDEX_size) {\n            for (i = 0; i < SZ_PROFIL_COUNT; i++) {\n                if (coma)\n                    printf(\", %*s\", PROF_CNT_LEN, size_range[i].title);\n                else {\n                    printf(\"%*s\", PROF_CNT_LEN, size_range[i].title);\n                    coma = 1;\n                }\n            }\n            if (p_profile->range_ratio_len > 0) {\n                char tmp[128];\n                char tmp1[40];\n                char tmp2[40];\n                if (p_profile->range_ratio_start + p_profile->range_ratio_len ==\n                    SZ_PROFIL_COUNT)\n                    sprintf(tmp, \"ratio(%s..inf)\",\n                            print_brief_sz(SZ_MIN_BY_INDEX\n                                           (p_profile->range_ratio_start),\n                                           tmp1));\n                else\n                    sprintf(tmp, \"ratio(%s..%s-)\",\n                            print_brief_sz(SZ_MIN_BY_INDEX\n                                           (p_profile->range_ratio_start),\n                                           tmp1),\n                            print_brief_sz(SZ_MIN_BY_INDEX\n                                           (p_profile->range_ratio_start +\n                                            p_profile->range_ratio_len), tmp2));\n\n                printf(\", %*s\", PROF_RATIO_LEN, tmp);\n            }\n        }\n    }\n    if (custom_title) {\n        if (coma)\n            printf(\", %*s\", custom_len, custom_title);\n        else\n            printf(\"%*s\", custom_len, custom_title);\n    }\n    printf(\"\\n\");\n}\n\nvoid print_attr_values_custom(int rank, unsigned int *attr_list, int attr_count,\n                              attr_set_t *attrs, const entry_id_t *id,\n                              bool csv, name_func name_resolver,\n                              const char *custom, int custom_len)\n{\n    int i, coma = 0;\n    char str[24576];\n    struct attr_display_spec *rec;\n\n    if (rank) {\n        printf(\"%4d\", rank);\n        coma = 1;\n    }\n\n    for (i = 0; i < attr_count; i++) {\n        rec = attr_info(attr_list[i]);\n        if (coma)\n            printf(\", %*s\", rec_len(rec, csv),\n                   attr2str(attrs, id, attr_list[i], csv, name_resolver, str,\n                            sizeof(str)));\n        else {\n            printf(\"%*s\", rec_len(rec, csv),\n                   attr2str(attrs, id, attr_list[i], csv, name_resolver, str,\n                            sizeof(str)));\n            coma = 1;\n        }\n    }\n    if (custom) {\n        if (coma)\n            printf(\", %*s\", custom_len, custom);\n        else\n            printf(\"%*s\", custom_len, custom);\n    }\n    printf(\"\\n\");\n}\n\n/* return attr name to be displayed */\nstatic inline const char *attrdesc2name(const report_field_descr_t *desc,\n                                        struct attr_display_spec *rec)\n{\n    switch (desc->attr_index) {\n    case ATTR_INDEX_COUNT:\n        if (desc->report_type == REPORT_COUNT)\n            return \"count\";\n        break;\n    case ATTR_INDEX_size:\n        if (desc->report_type == REPORT_MIN)\n            return \"min_size\";\n        else if (desc->report_type == REPORT_MAX)\n            return \"max_size\";\n        else if (desc->report_type == REPORT_AVG)\n            return \"avg_size\";\n        else if (desc->report_type == REPORT_SUM)\n            return \"volume\";\n        else\n            return \"size\";\n        /*default: */\n    }\n    return rec->name;\n}\n\n/**\n * Generic function to display a report\n */\nvoid display_report(const report_field_descr_t *descr,\n                    unsigned int field_count, const db_value_t *result,\n                    unsigned int result_count,\n                    const profile_field_descr_t *prof_descr,\n                    profile_u *p_prof, bool csv, bool header, int rank)\n{\n    unsigned int i;\n    struct attr_display_spec *rec;\n\n    if (header) {\n        if (rank)\n            printf(\"rank, \");\n\n        rec = attr_info(descr[0].attr_index);\n        printf(\"%*s\", rec_len(rec, csv), attrdesc2name(&descr[0], rec));\n\n        for (i = 1; i < field_count && i < result_count; i++) {\n            if (!result || !DB_IS_NULL(&result[i])) {\n                rec = attr_info(descr[i].attr_index);\n                printf(\", %*s\", rec_len(rec, csv),\n                       attrdesc2name(&descr[i], rec));\n            }\n        }\n        if (prof_descr) {\n            if (prof_descr->attr_index == ATTR_INDEX_size) {\n                for (i = 0; i < SZ_PROFIL_COUNT; i++)\n                    printf(\", %*s\", PROF_CNT_LEN, size_range[i].title);\n\n                if (prof_descr->range_ratio_len > 0) {\n                    char tmp[128];\n                    char tmp1[40];\n                    char tmp2[40];\n                    if (prof_descr->range_ratio_start +\n                        prof_descr->range_ratio_len == SZ_PROFIL_COUNT)\n                        sprintf(tmp, \"ratio(%s..inf)\",\n                                print_brief_sz(SZ_MIN_BY_INDEX\n                                               (prof_descr->range_ratio_start),\n                                               tmp1));\n                    else\n                        sprintf(tmp, \"ratio(%s..%s)\",\n                                print_brief_sz(SZ_MIN_BY_INDEX\n                                               (prof_descr->range_ratio_start),\n                                               tmp1),\n                                print_brief_sz(SZ_MIN_BY_INDEX\n                                               (prof_descr->range_ratio_start +\n                                                prof_descr->range_ratio_len) -\n                                               1, tmp2));\n\n                    printf(\", %*s\", PROF_RATIO_LEN, tmp);\n                }\n            }\n        }\n\n        printf(\"\\n\");\n    }\n\n    if (result) {\n        if (rank)\n            printf(\"%4d, \", rank);\n\n        char tmpstr[1024];\n        for (i = 0; i < field_count && i < result_count; i++) {\n            rec = attr_info(descr[i].attr_index);\n\n            if (!DB_IS_NULL(&result[i]) || i == 0)  /* tag first column */\n                printf(\"%s%*s\", i == 0 ? \"\" : \", \", rec_len(rec, csv),\n                       rec->result2str(&result[i], csv, tmpstr,\n                                       sizeof(tmpstr)));\n        }\n\n        if (prof_descr && p_prof) {\n            if (prof_descr->attr_index == ATTR_INDEX_size) {\n                uint64_t tot = 0;\n                uint64_t range = 0;\n\n                for (i = 0; i < SZ_PROFIL_COUNT; i++) {\n                    printf(\", %*\" PRIu64, PROF_CNT_LEN,\n                           p_prof->size.file_count[i]);\n                    tot += p_prof->size.file_count[i];\n                    if ((prof_descr->range_ratio_len > 0) &&\n                        (i >= prof_descr->range_ratio_start) &&\n                        (i <\n                         prof_descr->range_ratio_start +\n                         prof_descr->range_ratio_len))\n                        range += p_prof->size.file_count[i];\n                }\n\n                if (prof_descr->range_ratio_len > 0)\n                    printf(\", %.2f%%\", 100.0 * range / tot);\n            }\n        }\n\n        printf(\"\\n\");\n    }\n}\n\n/** initialize internal resources (glib, llapi, internal resources...) */\nint rbh_init_internals(void)\n{\n    int rc = 0;\n\n#if !(GLIB_CHECK_VERSION(2, 32, 0))\n    g_thread_init(NULL);\n#endif\n\n    /* Initialize global tools */\n#ifdef _LUSTRE\n    if ((rc = Lustre_Init()) != 0) {\n        fprintf(stderr, \"Error %d initializing liblustreapi\\n\", rc);\n        return rc;\n    }\n#endif\n\n    /* Initilize uidgid cache */\n    if (InitUidGid_Cache()) {\n        fprintf(stderr, \"Error initializing uid/gid cache\\n\");\n        return 1;\n    }\n\n    return rc;\n}\n\n/** convert a list of attribute indexes into a attribute mask. */\nattr_mask_t list2mask(unsigned int *attr_list, int attr_count)\n{\n    int i;\n    attr_mask_t mask = { 0 };\n\n    for (i = 0; i < attr_count; i++) {\n        /* skip special values (ID, ...) */\n        if (attr_list[i] & ATTR_INDEX_FLG_UNSPEC)\n            continue;\n        attr_mask_set_index(&mask, attr_list[i]);\n    }\n\n    return mask;\n}\n\n/** template callback to display stdout and stderr */\nint cb_redirect_all(void *arg, char *line, size_t size, int stream)\n{\n    int len;\n\n    if (line == NULL)\n        return -EINVAL;\n\n    len = strnlen(line, size);\n    /* terminate the string */\n    if (len >= size)\n        line[len - 1] = '\\0';\n\n    /* remove '\\n' */\n    if ((len > 0) && (line[len - 1] == '\\n'))\n        line[len - 1] = '\\0';\n\n    switch (stream) {\n    case STDOUT_FILENO:\n        printf(\"%s\\n\", line);\n        break;\n    case STDERR_FILENO:\n        fprintf(stderr, \"%s\\n\", line);\n        break;\n    }\n\n    return 0;\n}\n\n/**\n * Check if there is a single status manager that supports\n * undelete, and load it.\n * \\retval 0 if a single status manager was found.\n * \\retval -EINVAL if more than 1 status managers implement 'undelete'.\n * \\retval -ENOENT if no status manager implements undelete.\n */\nint load_single_smi(sm_instance_t **smi)\n{\n    int i = 0;\n    sm_instance_t *smi_curr;\n\n    /** XXX based on policies or status managers? what about the scope? */\n    while ((smi_curr = get_sm_instance(i)) != NULL) {\n        if (smi_curr->sm->undelete_func != NULL) {\n            if (*smi != NULL) {\n                DisplayLog(LVL_CRIT, __func__,\n                           \"ERROR: no status manager specified, but several of \"\n                           \"them implement 'undelete'\");\n                return -EINVAL;\n            }\n            *smi = smi_curr;\n        }\n        i++;\n    }\n\n    if (*smi == NULL) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: no status manager implements 'undelete'\");\n        return -ENOENT;\n    }\n\n    return 0;\n}\n\n/**\n * Load the Status Manager Instance with the given name\n */\nint load_smi(const char *sm_name, sm_instance_t **smi)\n{\n    int rc;\n    const char *dummy;\n\n    rc = check_status_args(sm_name, NULL, &dummy, smi);\n    if (rc)\n        return rc;\n\n    if ((*smi)->sm->undelete_func == NULL) {\n        DisplayLog(LVL_CRIT, __func__,\n                   \"ERROR: the specified status manager '%s' doesn't \"\n                   \"implement 'undelete'\", sm_name);\n        return -EINVAL;\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "src/robinhood/cmd_helpers.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2004-2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n * \\file  RobinhoodMisc.h\n * \\brief Common tools for parsing, converting, checking FS status...\n */\n#ifndef _RBH_CMD_HELPERS_H\n#define _RBH_CMD_HELPERS_H\n\n#include \"list_mgr.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_misc.h\"\n#include \"status_manager.h\"\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\n/** initialize internal resources (glib, llapi, internal resources...) */\nint rbh_init_internals(void);\n\n/** The caller's function to be called for scanned entries */\ntypedef int (*scrub_callback_t) (wagon_t *id_list,\n                                 attr_set_t *attr_list,\n                                 unsigned int entry_count, void *arg);\n\n/** scan sets of directories\n * \\param cb_func, callback function for each set of directory\n */\nint rbh_scrub(lmgr_t *p_mgr, const wagon_t *id_list,\n              unsigned int id_count, attr_mask_t dir_attr_mask,\n              scrub_callback_t cb_func, void *arg);\n\nint Path2Id(const char *path, entry_id_t *id);\n\n/** Free the content of a wagon list. */\nstatic inline void free_wagon(wagon_t *ids, int first, int last)\n{\n    int i;\n\n    if (ids) {\n        for (i = first; i < last; i++) {\n            free(ids[i].fullname);\n        }\n    }\n}\n\n/** parse attrset for --diff option */\nint parse_diff_mask(const char *arg, attr_mask_t *diff_mask, char *msg);\n\n/** parse a status argument <status_name|policy_name>[:<status_value>] */\nint parse_status_arg(const char *option, char *arg, char **p_st_name,\n                     char **p_st_val, bool mandatory_value);\n\n/** check a status argument <status_name|policy_name>[:<status_value>],\n *  after the configuration has been loaded.\n */\nint check_status_args(const char *status_name, const char *status_value,\n                      const char **str_val_new, sm_instance_t **p_smi);\n\n#define KB  1024LL\n#define MB  (KB*KB)\n#define GB  (KB*MB)\n#define TB  (KB*GB)\n#define PB  (KB*TB)\n#define EB  (KB*PB)\n\nstatic inline char *print_brief_sz(uint64_t sz, char *buf)\n{\n    if (sz < KB)\n        sprintf(buf, \"%\" PRIu64, sz);\n    else if (sz < MB)\n        sprintf(buf, \"%lluK\", sz / KB);\n    else if (sz < GB)\n        sprintf(buf, \"%lluM\", sz / MB);\n    else if (sz < TB)\n        sprintf(buf, \"%lluG\", sz / GB);\n    else if (sz < PB)\n        sprintf(buf, \"%lluT\", sz / TB);\n    else if (sz < EB)\n        sprintf(buf, \"%lluP\", sz / PB);\n    else\n        sprintf(buf, \"%lluE\", sz / EB);\n    return buf;\n}\n\n/* special attr indexes for display functions */\n#define ATTR_INDEX_COUNT ATTR_INDEX_FLG_COUNT\n#define ATTR_INDEX_ID    (ATTR_INDEX_FLG_UNSPEC | 0x1)\n\nconst char *attrindex2name(unsigned int index);\nunsigned int attrindex2len(unsigned int index, int csv);\n\n/** function to try resolving the name from attributes and id */\ntypedef const char * (*name_func)(const entry_id_t *p_id, attr_set_t *attrs,\n                                  char *buff);\n\nconst char *attr2str(attr_set_t *attrs, const entry_id_t *id,\n                     unsigned int attr_index, int csv, name_func name_resolver,\n                     char *out, size_t out_sz);\n\nvoid print_attr_list_custom(int rank_field, unsigned int *attr_list,\n                            int attr_count, profile_field_descr_t *p_profile,\n                            bool csv, const char *custom_title, int custom_len);\n\nvoid print_attr_values_custom(int rank, unsigned int *attr_list, int attr_count,\n                              attr_set_t *attrs, const entry_id_t *id,\n                              bool csv, name_func name_resolver,\n                              const char *custom, int custom_len);\n\nstatic inline void print_attr_list(int rank_field, unsigned int *attr_list,\n                                   int attr_count,\n                                   profile_field_descr_t *p_profile, bool csv)\n{\n    print_attr_list_custom(rank_field, attr_list, attr_count, p_profile, csv,\n                           NULL, 0);\n}\n\nstatic inline void print_attr_values(int rank, unsigned int *attr_list,\n                                     int attr_count, attr_set_t *attrs,\n                                     const entry_id_t *id, bool csv,\n                                     name_func name_resolver)\n{\n    print_attr_values_custom(rank, attr_list, attr_count, attrs, id, csv,\n                             name_resolver, NULL, 0);\n}\n\nvoid display_report(const report_field_descr_t *descr,\n                    unsigned int field_count, const db_value_t *result,\n                    unsigned int result_count,\n                    const profile_field_descr_t *prof_descr,\n                    profile_u * p_prof, bool csv, bool header, int rank);\n\n/** convert a list of attribute indexes into a attribute mask. */\nattr_mask_t list2mask(unsigned int *attr_list, int attr_count);\n\nstatic inline const char *class_format(const char *class_name)\n{\n    if (class_name == NULL)\n        return \"[n/a]\";\n    else if (EMPTY_STRING(class_name))\n        return \"[none]\";\n\n    return class_name;\n}\n\nstatic inline const char *status_format(const char *name)\n{\n    if (name == NULL)\n        return \"[none]\";\n\n    return name;\n}\n\n/** callback to display stdout and stderr */\nint cb_redirect_all(void *arg, char *line, size_t size, int stream);\n\n/**\n * Check if there is a single status manager that supports\n * undelete, and load it.\n * \\retval 0 if a single status manager was found.\n * \\retval -EINVAL if more than 1 status managers implement 'undelete'.\n * \\retval -ENOENT if no status manager implements undelete.\n */\nint load_single_smi(sm_instance_t **smi);\n\n/**\n * Load the Status Manager Instance with the given name\n */\nint load_smi(const char *sm_name, sm_instance_t **smi);\n\n\n#endif\n"
  },
  {
    "path": "src/robinhood/rbh_daemon.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n *  Daemon statup functions\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_run.h\"\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_basename.h\"\n\n/* needed to dump their stats */\n#include \"fs_scan_main.h\"\n#include \"chglog_reader.h\"\n#include \"entry_processor.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <pthread.h>\n#include <fcntl.h>  /* for open flags */\n#include <signal.h>\n\n#ifdef _LUSTRE\n#include \"lustre_extended_types.h\"\n#endif\n\n#define MAIN_TAG    \"Main\"\n#define RELOAD_TAG \"ReloadConfig\"\n\nstatic time_t boot_time;\n\n/* values must be over max char index */\n#define DRY_RUN           260\n#define NO_LIMIT          261\n#define TEST_SYNTAX       262\n#define PARTIAL_SCAN      263\n#define SHOW_DIFF         264\n#define NO_GC             265\n#define RUN_POLICIES      266\n#define TGT_USAGE         267\n#define FORCE_ALL         268\n#define ALTER_DB          269\n\n/* deprecated params */\n#define FORCE_OST_PURGE   270\n#define FORCE_FS_PURGE    271\n#define FORCE_CLASS_PURGE 272\n\n#define FORCE_OST_MIGR    280\n#define FORCE_USER_MIGR   281\n#define FORCE_GROUP_MIGR  282\n#define FORCE_CLASS_MIGR  283\n#define MIGR_ONE_FILE     284\n\n#define DEPRECATED_WM     290\n\n#define ACTION_MASK_SCAN                0x00000001\n#define ACTION_MASK_PURGE               0x00000002\n#define ACTION_MASK_HANDLE_EVENTS       0x00000004\n#define ACTION_MASK_RUN_POLICIES        0x00000008\n\n/* currently running modules */\nstatic int running_mask = 0;\n/* selected modules (used for reloading config) */\nstatic int parsing_mask = 0;\n\n/* currently running policies mask */\nstatic uint64_t policy_run_mask = 0LL;\n\n/* info for started policy modules */\nstatic policy_info_t   *policy_run = NULL;\nstatic unsigned int     policy_run_cpt = 0;\n\n/* Array of options for getopt_long().\n * Each record consists of: {const char *name, int has_arg, int *flag, int val}\n */\n\nstatic struct option option_tab[] = {\n\n    /* Actions selectors */\n    {\"scan\", optional_argument, NULL, 'S'},\n    {\"diff\", required_argument, NULL, SHOW_DIFF},\n#ifdef HAVE_CHANGELOGS\n#ifdef HAVE_DNE\n    {\"readlog\", optional_argument, NULL, 'r'},\n    {\"read-log\", optional_argument, NULL, 'r'},\n#else\n    {\"readlog\", no_argument, NULL, 'r'},\n    {\"read-log\", no_argument, NULL, 'r'},\n    {\"handle-events\", no_argument, NULL, 'r'},  /* for backward compatibility */\n#endif\n#endif\n    {\"run\", optional_argument, NULL, RUN_POLICIES},\n    {\"check-thresholds\", optional_argument, NULL, 'C'},\n\n    /* specifies a policy target:\n     * ost, pool, all, class, user, group, file... */\n    {\"target\", required_argument, NULL, 't'},\n    /* target usage for FS, OST or pool */\n    {\"target-usage\", required_argument, NULL, TGT_USAGE},\n\n    /* For policies, this forces to apply policy to files in policy scope,\n     * by ignoring condition of policy rules and 'ignore' statements.\n     */\n    {\"ignore-conditions\", no_argument, NULL, 'I'},\n    {\"force\", no_argument, NULL, 'F'},\n\n    /* behavior flags */\n    {\"dry-run\", no_argument, NULL, DRY_RUN},\n    {\"one-shot\", no_argument, NULL, 'O'},   /* for backward compatibility */\n    {\"once\", no_argument, NULL, 'O'},\n    {\"detach\", no_argument, NULL, 'd'},\n    {\"no-limit\", no_argument, NULL, NO_LIMIT},\n    {\"no-gc\", no_argument, NULL, NO_GC},\n    {\"alter-db\", no_argument, NULL, ALTER_DB},\n    {\"alterdb\", no_argument, NULL, ALTER_DB},\n    /* generic policies equivalent for --sync:\n     * alias to --once --no-limit --ignore-conditions --force */\n    {\"force-all\", no_argument, NULL, FORCE_ALL},\n    {\"forceall\", no_argument, NULL, FORCE_ALL},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n    {\"template\", optional_argument, NULL, 'T'},\n    {\"defaults\", no_argument, NULL, 'D'},\n    {\"test-syntax\", no_argument, NULL, TEST_SYNTAX},\n\n    /* override config file options */\n    {\"log-file\", required_argument, NULL, 'L'},\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n    {\"pid-file\", required_argument, NULL, 'p'},\n\n    /* kept for compatibility */\n    {\"partial-scan\", required_argument, NULL, PARTIAL_SCAN},\n\n    /* deprecated params */\n    {\"purge\", no_argument, NULL, 'P'},\n    {\"release\", no_argument, NULL, 'P'},\n    {\"check-watermarks\", no_argument, NULL, DEPRECATED_WM},\n    {\"migrate\", no_argument, NULL, 'M'},\n    {\"archive\", no_argument, NULL, 'M'},\n    /* generic policies equivalent: --force-all */\n    {\"sync\", no_argument, NULL, 's'},\n    {\"hsm-remove\", no_argument, NULL, 'R'},\n    {\"hsm-rm\", no_argument, NULL, 'R'},\n    {\"rmdir\", no_argument, NULL, 'R'},\n    {\"purge-ost\", required_argument, NULL, FORCE_OST_PURGE},\n    {\"release-ost\", required_argument, NULL, FORCE_OST_PURGE},\n    {\"purge-fs\", required_argument, NULL, FORCE_FS_PURGE},\n    {\"release-fs\", required_argument, NULL, FORCE_FS_PURGE},\n    {\"purge-class\", required_argument, NULL, FORCE_CLASS_PURGE},\n    {\"release-class\", required_argument, NULL, FORCE_CLASS_PURGE},\n    {\"migrate-ost\", required_argument, NULL, FORCE_OST_MIGR},\n    {\"archive-ost\", required_argument, NULL, FORCE_OST_MIGR},\n    {\"migrate-user\", required_argument, NULL, FORCE_USER_MIGR},\n    {\"archive-user\", required_argument, NULL, FORCE_USER_MIGR},\n    {\"migrate-group\", required_argument, NULL, FORCE_GROUP_MIGR},\n    {\"archive-group\", required_argument, NULL, FORCE_GROUP_MIGR},\n    {\"migrate-class\", required_argument, NULL, FORCE_CLASS_MIGR},\n    {\"archive-class\", required_argument, NULL, FORCE_CLASS_MIGR},\n    {\"migrate-file\", required_argument, NULL, MIGR_ONE_FILE},\n    {\"archive-file\", required_argument, NULL, MIGR_ONE_FILE},\n    /* -i replaced by -I to allow confusions with rbh-report -i */\n\n    {NULL, 0, NULL, 0}\n};\n\n#define SHORT_OPT_STRING     \"SrCt:IOdf:T:DL:l:hVp:F\"\n#define SHORT_OPT_DEPRECATED \"PMRi\"\n\n#define MAX_OPT_LEN 1024\n#define MAX_TYPE_LEN 256\n\ntypedef struct rbh_options {\n    run_flags_t    flags;\n    bool           detach;\n    char           config_file[MAX_OPT_LEN];\n    char           template_file[MAX_OPT_LEN];\n    bool           write_template;\n    bool           write_defaults;\n    bool           pid_file;\n    char           pid_filepath[MAX_OPT_LEN];\n    bool           test_syntax;\n    bool           partial_scan;\n    char           partial_scan_path[RBH_PATH_MAX]; /* can be a deep path */\n    attr_mask_t    diff_mask;\n\n    char           policy_string[MAX_OPT_LEN];\n    char           target_string[RBH_PATH_MAX]; /* can be a deep file */\n    double         usage_target; /* set -1.0 if not set */\n\n    int            mdtidx;\n    enum lmgr_init_flags db_flags;\n} rbh_options;\n\n#define TGT_NOT_SET   -1.0\n\nstatic inline void zero_options(struct rbh_options *opts)\n{\n    /* default value is 0 for most options */\n    memset(opts, 0, sizeof(struct rbh_options));\n    opts->usage_target = TGT_NOT_SET;\n    opts->mdtidx = -1;  /* all MDTs */\n}\n\n/* program options from command line  */\nstatic struct rbh_options options;\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\nstatic const char *cmd_help = _B \"Usage:\" B_ \" %s [options]\\n\";\n\nstatic const char *action_help =\n    _B \"Actions:\" B_ \"\\n\"\n    \"    \" _B \"-S\" B_ \", \" _B \"--scan\" B_ \"[=\" _U \"dir\" U_ \"]\\n\"\n    \"        Scan the filesystem namespace. If \" _U \"dir\" U_ \" is specified, only scan the specified subdir.\\n\"\n#ifdef HAVE_CHANGELOGS\n    \"    \" _B \"-r\" B_ \", \" _B \"--read-log\" B_ \"[=\" _U \"mdt_idx\" U_ \"]\\n\"\n    \"        Read events from MDT ChangeLog.\\n\"\n    \"        If \" _U \"mdt_idx\" U_ \" is specified, only read ChangeLogs for the given MDT.\\n\"\n    \"        Else, start 1 changelog reader thread per MDT (with DNE).\\n\"\n#endif\n    \"    \" _B \"--run\" B_ \"[=all]\\n\"\n    \"        Run all polices (based on triggers).\\n\"\n    \"    \" _B \"--run\" B_ \"=\" _U \"policy1\" U_ \"(\" _U \"args\" U_ \"),\" _U \"policy2\" U_ \"(\" _U \"args\" U_ \")...\\n\"\n    \"        Run the given policies with the specified arguments. \\n\"\n    \"        See \\\"Policy run options\\\" for details about \" _U \"args\" U_ \".\\n\"\n    \"    \" _B \"-C\" B_ \" \" _U \"policy1,policy2...\" U_ \", \" _B \"--check-thresholds\" B_ \"[=\" _U \"policy1,policy2...\" U_ \"]\\n\"\n    \"        Only check trigger thresholds without applying policy actions.\\n\"\n    \"        If no policy is specified (or 'all'), check all triggers.\\n\";\n\nstatic const char *run_help =\n    _B \"Policy run options:\" B_ \"\\n\"\n    \"    \" _U \"args\" U_ \"\\n\"\n    \"       Comma-separated list of <param>=<value>.\\n\"\n    \"           e.g. --run=cleanup(target=user:foo,max-count=1000)\\n\"\n    \"       The following parameters are allowed:\\n\"\n    \"       \" _B \"target\" B_ \"=\" _U \"tgt\" U_ \"\\n\"\n    \"           Targeted subset of entries for the policy run.\\n\"\n    \"           \" _U \"tgt\" U_ \" can be one of:\\n\"\n    \"               \" _B \"all\" B_ \" (all entries), \" _B \"user\" B_ \":\" _U \"username\" U_ \", \" _B \"group\" B_ \":\" _U \"grpname\" U_ \",\\n\"\n    \"               \" _B \"file\" B_ \":\" _U \"path\" U_ \", \" _B \"class\" B_ \":\" _U \"fileclass\" U_\n#ifdef _LUSTRE\n    \", \" _B \"ost\" B_ \":\" _U \"ost_idx\" U_ \", \" _B \"pool\" B_ \":\" _U \"poolname\" U_\",\\n\"\n    \"               \" _B \"projid\" B_\":\"_U \"projid\" U_\n#endif\n    \".\\n\"\n    \"       \" _B \"max-count\" B_ \"=\" _U \"nbr\" U_ \"\\n\"\n    \"           Max number of actions to execute for a policy run.\\n\"\n    \"       \" _B \"max-vol\" B_ \"=\" _U \"size\" U_ \"\\n\"\n    \"           Max volume of entries impacted by a policy run.\\n\"\n    \"       \" _B \"target-usage\" B_ \"=\" _U \"pct\" U_ \"\\n\"\n    \"           Targeted filesystem or OST usage for a policy run, in percent.\\n\"\n    \"\\n\"\n    \"    \" _B \"-t\" B_ \" \" _U \"tgt\" U_ \", \" _B \"--target\" B_ \"=\" _U \"tgt\" U_ \"\\n\"\n    \"        Specify the default target for policy runs (see target syntax above).\\n\"\n    \"    \" _B \"--target-usage\" B_ \"=\" _U \"pct\" U_ \"\\n\"\n    \"       Specifies the default target disk usage (in pct) for 'all', 'ost' or 'pool' targets.\\n\"\n    \"    \" _B \"-I\" B_ \", \" _B \"--ignore-conditions\" B_ \"\\n\"\n    \"        Apply policy to all entries in policy scope, without checking policy rule conditions.\\n\"\n    \"    \" _B \"-F\" B_ \", \" _B \"--force\" B_ \"\\n\"\n    \"        Force applying policies even if no full scan has never been done (partial DB contents).\\n\"\n    \"    \" _B \"--no-limit\" B_ \"\\n\"\n    \"        Don't limit the maximum number/volume of policy actions per pass.\\n\"\n    \"    \" _B \"--dry-run\" B_ \"\\n\"\n    \"        Only report policy actions that would be performed without really doing them.\\n\"\n    \"        Note: Robinhood DB is impacted as if the reported actions were really done.\\n\"\n    \"    \" _B \"--force-all\" B_ \"\\n\"\n    \"        Force applying a policy to all eligible entries, without considering\\n\"\n    \"        policy limits and rule conditions.\\n\"\n    \"        This is equivalent to: --once --no-limit --ignore-conditions --force\\n\";\n\nstatic const char *scan_help =\n    _B \"Scanning options:\" B_ \"\\n\"\n    \"    \" _B \"--no-gc\" B_ \"\\n\"\n    \"        Garbage collection of entries in DB is a long operation when terminating\\n\"\n    \"        a scan. This skips this operation if you don't care about removed\\n\"\n    \"        entries (or don't expect entries to be removed).\\n\"\n    \"        This is also recommended for partial scanning (see -scan=dir option).\\n\";\n\nstatic const char *output_help =\n    _B \"Output options:\" B_ \"\\n\"\n    \"    \" _B \"--diff\" B_ \"=\" _U \"attrset\" U_ \"\\n\"\n    \"        When scanning or reading changelogs, display changes for the given set of attributes (to stdout).\\n\"\n    \"        \" _U \"attrset\" U_ \" is a list of values in: path,posix,stripe,all,status,notimes,noatime.\\n\";\n\nstatic const char *behavior_help =\n    _B \"Behavior options:\" B_ \"\\n\"\n    \"    \" _B \"-O\" B_ \", \" _B \"--once\" B_ \"\\n\"\n    \"        Perform only one pass of the specified action and exit.\\n\"\n    \"    \" _B \"-d\" B_ \", \" _B \"--detach\" B_ \"\\n\"\n    \"        Daemonize the process (detach from parent process).\\n\"\n    \"    \" _B \"--alter-db\" B_ \"\\n\"\n    \"        Allow database schema modifications (backup your DB before using this).\\n\";\n\nstatic const char *config_help =\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"cfg_file\" U_ \", \" _B \"--config-file=\" B_ _U \"cfg_file\" U_ \"\\n\"\n    \"        Path to configuration file (or short name).\\n\"\n    \"    \" _B \"-T\" B_ \" \" _U \"output_file\" U_ \", \" _B \"--template\" B_ \"[=\" _U \"output_file\" U_ \"]\\n\"\n    \"        Write a configuration file template to the specified file.\\n\"\n    \"    \" _B \"-D\" B_ \", \" _B \"--defaults\" B_ \"\\n\"\n    \"        Display default configuration values.\\n\" \"    \" _B \"--test-syntax\" B_ \"\\n\"\n    \"        Check configuration file and exit.\\n\";\n\nstatic const char *log_help =\n    _B \"Log options:\" B_ \"\\n\"\n    \"    \" _B \"-L\" B_ \" \" _U \"logfile\" U_ \", \" _B \"--log-file=\" B_ _U \"logfile\" U_ \"\\n\"\n    \"        Force the path to the log file (overrides configuration value).\\n\"\n    \"        Special values \\\"stdout\\\" and \\\"stderr\\\" can be used.\\n\" \"    \" _B\n    \"-l\" B_ \" \" _U \"loglevel\" U_ \", \" _B \"--log-level=\" B_ _U \"loglevel\" U_ \"\\n\"\n    \"        Force the log verbosity level (overrides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\";\n\nstatic const char *misc_help =\n    _B \"Miscellaneous options:\" B_ \"\\n\"\n    \"    \" _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\"\n    \"    \" _B \"-p\" B_ \" \" _U \"pidfile\" U_ \", \" _B \"--pid-file=\" B_ _U \"pidfile\" U_ \"\\n\"\n    \"         Pid file (used for service management).\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(cmd_help, bin_name);\n    printf(\"\\n\");\n    printf(\"%s\\n\", action_help);\n    printf(\"%s\\n\", run_help);\n    printf(\"%s\\n\", scan_help);\n    printf(\"%s\\n\", output_help);\n    printf(\"%s\\n\", behavior_help);\n    printf(\"%s\\n\", config_help);\n    printf(\"%s\\n\", log_help);\n    printf(\"%s\", misc_help);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \"\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* FS type */\n#ifdef _LUSTRE\n    printf(\"    Lustre filesystems\\n\");\n#ifdef LUSTRE_VERSION\n    printf(\"    Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#endif\n\n#else\n    printf(\"    Posix filesystems\\n\");\n#endif\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#ifndef HAVE_DNE\n    printf(\"    Support Changelogs from multiple MDT (DNE)\\n\");\n#endif\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n    printf(\"\\n\");\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic pthread_t stat_thread;\n\n/* database connexion for updating stats */\nstatic lmgr_t   lmgr;\nstatic bool     lmgr_init = false;\nstatic char     boot_time_str[256];\n\nstatic void running_mask2str(int mask, uint64_t pol_mask, char *str)\n{\n    str[0] = '\\0';\n    if (mask & MODULE_MASK_FS_SCAN)\n        strcat(str, \"scan,\");\n#ifdef HAVE_CHANGELOGS\n    if (mask & MODULE_MASK_EVENT_HDLR)\n        strcat(str, \"log_reader,\");\n#endif\n    if (mask & MODULE_MASK_POLICY_RUN) {\n        bool first_pol = true;\n        int i;\n\n        strcat(str, \"policy_run(\");\n        for (i = 0; i < policy_run_cpt; i++) {\n            if ((pol_mask) & (1LL << i)) {\n                if (!first_pol)\n                    strcat(str, \",\");\n\n                strcat(str, policy_run[i].descr->name);\n                first_pol = false;\n            }\n        }\n        strcat(str, \"),\");\n    }\n\n    /* remove final ',' */\n    int len = strlen(str);\n    if ((len > 0) && str[len - 1] == ',')\n        str[len - 1] = '\\0';\n    return;\n}\n\n/** prevent from dumping module stats when the daemon is shutting down */\nstatic pthread_mutex_t shutdown_mtx = PTHREAD_MUTEX_INITIALIZER;\n\n/** signal flags */\nstatic int      terminate_sig = 0;\nstatic bool     reload_sig = false;\nstatic bool     dump_sig = false;\n\n/** async signal handler */\nstatic pthread_t sig_thr;\n\n/** dump stats of all modules */\nstatic void dump_stats(lmgr_t *lmgr, const int *module_mask,\n                       const uint64_t *p_policy_mask)\n{\n    char   tmp_buff[256];\n    struct tm date;\n    time_t now;\n\n    if (pthread_mutex_trylock(&shutdown_mtx) != 0)\n        /* daemon is shutting down, don't dump stats */\n        return;\n\n    now = time(NULL);\n    strftime(tmp_buff, sizeof(tmp_buff), \"%Y/%m/%d %T\",\n             localtime_r(&now, &date));\n\n    DisplayLog(LVL_MAJOR, \"STATS\",\n               \"==================== Dumping stats at %s =====================\",\n               tmp_buff);\n    DisplayLog(LVL_MAJOR, \"STATS\", \"======== General statistics =========\");\n    DisplayLog(LVL_MAJOR, \"STATS\", \"Daemon start time: %s\", boot_time_str);\n    running_mask2str(*module_mask, *p_policy_mask, tmp_buff);\n    DisplayLog(LVL_MAJOR, \"STATS\", \"Started modules: %s\", tmp_buff);\n\n    if (*module_mask & MODULE_MASK_FS_SCAN) {\n        FSScan_DumpStats();\n        FSScan_StoreStats(lmgr);\n    }\n#ifdef HAVE_CHANGELOGS\n    if (*module_mask & MODULE_MASK_EVENT_HDLR) {\n        cl_reader_dump_stats();\n        cl_reader_store_stats(lmgr);\n    }\n#endif\n\n    if (*module_mask & MODULE_MASK_ENTRY_PROCESSOR) {\n        EntryProcessor_DumpCurrentStages();\n    }\n\n    if (*module_mask & MODULE_MASK_POLICY_RUN\n        && *p_policy_mask != 0LL && policy_run_cpt != 0 && policy_run != NULL) {\n        int i;\n\n        for (i = 0; i < policy_run_cpt; i++) {\n            if ((*p_policy_mask) & (1LL << i))\n                policy_module_dump_stats(&policy_run[i]);\n        }\n    }\n\n    pthread_mutex_unlock(&shutdown_mtx);\n\n    /* Flush stats */\n    FlushLogs();\n}\n\nstatic void *stats_thr(void *arg)\n{\n    struct tm date;\n\n    strftime(boot_time_str, 256, \"%Y/%m/%d %T\", localtime_r(&boot_time, &date));\n\n    if (!lmgr_init) {\n        if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS)\n            return NULL;\n        lmgr_init = true;\n    }\n\n    DisplayLog(LVL_VERB, MAIN_TAG, \"Statistics thread started\");\n\n    WaitStatsInterval();\n    while (!terminate_sig) {\n        dump_stats(&lmgr, &running_mask, &policy_run_mask);\n        WaitStatsInterval();\n    }\n    return NULL;\n}\n\n#define SIGHDL_TAG  \"SigHdlr\"\n\nstatic void terminate_handler(int sig)\n{\n    terminate_sig = sig;\n    lmgr_cancel_retry = true;\n}\n\nstatic void reload_handler(int sig)\n{\n    reload_sig = true;\n}\n\nstatic void usr_handler(int sig)\n{\n    dump_sig = true;\n}\n\nstatic int action2parsing_mask(int act_mask)\n{\n    /* build config parsing mask */\n    int parse_mask = 0;\n    if (act_mask & ACTION_MASK_SCAN)\n        parse_mask |= MODULE_MASK_FS_SCAN | MODULE_MASK_ENTRY_PROCESSOR;\n    if (act_mask & ACTION_MASK_RUN_POLICIES)\n        parse_mask |= MODULE_MASK_POLICY_RUN;\n#ifdef HAVE_CHANGELOGS\n    if (act_mask & ACTION_MASK_HANDLE_EVENTS)\n        parse_mask |= MODULE_MASK_EVENT_HDLR | MODULE_MASK_ENTRY_PROCESSOR;\n#endif\n\n    return parse_mask;\n}\n\nstatic void *signal_handler_thr(void *arg)\n{\n\n    struct sigaction act_sighup;\n    struct sigaction act_sigterm;\n    struct sigaction act_sigusr;\n\n    /* create signal handlers */\n    memset(&act_sigterm, 0, sizeof(act_sigterm));\n    act_sigterm.sa_flags = 0;\n    act_sigterm.sa_handler = terminate_handler;\n    if (sigaction(SIGTERM, &act_sigterm, NULL) == -1\n        || sigaction(SIGINT, &act_sigterm, NULL) == -1) {\n        DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                   \"Error while setting signal handlers for SIGTERM and SIGINT: %s\",\n                   strerror(errno));\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, SIGHDL_TAG,\n                   \"Signals SIGTERM and SIGINT (daemon shutdown) are ready to be used\");\n\n    memset(&act_sighup, 0, sizeof(act_sighup));\n    act_sighup.sa_flags = 0;\n    act_sighup.sa_handler = reload_handler;\n    if (sigaction(SIGHUP, &act_sighup, NULL) == -1) {\n        DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                   \"Error while setting signal handlers for SIGHUP: %s\",\n                   strerror(errno));\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, SIGHDL_TAG,\n                   \"Signal SIGHUP (config reloading) is ready to be used\");\n\n    memset(&act_sigusr, 0, sizeof(act_sigusr));\n    act_sigusr.sa_flags = 0;\n    act_sigusr.sa_handler = usr_handler;\n    if (sigaction(SIGUSR1, &act_sigusr, NULL) == -1) {\n        DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                   \"Error while setting signal handlers for SIGUSR1: %s\",\n                   strerror(errno));\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, SIGHDL_TAG,\n                   \"Signal SIGUSR1 (stats dump) is ready to be used\");\n\n    /* signal flag checking loop */\n    while (1) {\n        /* check for signal every second */\n        rh_sleep(1);\n\n        if (terminate_sig != 0) {\n            const struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0 };\n\n            if (terminate_sig == SIGTERM)\n                DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                           \"SIGTERM received: performing clean daemon shutdown\");\n            else if (terminate_sig == SIGINT)\n                DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                           \"SIGINT received: performing clean daemon shutdown\");\n            FlushLogs();\n\n            /* wait up to 1s to get the termination mutex (avoids dumping stats\n             * while terminating the daemon) */\n            if (pthread_mutex_timedlock(&shutdown_mtx, &timeout) != 0) {\n                /* Something when wrong for an unexpected reason,\n                 * but we have to shutdown now!\n                 * => continuing */\n                DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                           \"WARNING: Failed to get termination mutex: %m\");\n            }\n\n            /* first ask policy consummers and feeders to stop\n             * (long operations first) */\n\n            /* 1- stop submitting policy actions */\n            if (running_mask & MODULE_MASK_POLICY_RUN\n                && policy_run_mask != 0LL\n                && policy_run_cpt != 0 && policy_run != NULL) {\n                int i;\n\n                for (i = 0; i < policy_run_cpt; i++) {\n                    if (policy_run_mask & (1LL << i)) {\n                        int rc = policy_module_stop(&policy_run[i]);\n                        if (rc)\n                            DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                                       \"Failed to stop policy module '%s' (rc=%d).\",\n                                       policy_run[i].descr->name, rc);\n                        FlushLogs();\n                    }\n                }\n            }\n\n            /* 2 - stop feeding with changelogs */\n#ifdef HAVE_CHANGELOGS\n            if (running_mask & MODULE_MASK_EVENT_HDLR) {\n                /* stop changelog processing */\n                cl_reader_terminate();\n                FlushLogs();\n            }\n#endif\n            /* 2b - stop feeding from scan */\n            if (running_mask & MODULE_MASK_FS_SCAN) {\n                /* avoid stats thread to try dumping the stats while\n                 * terminating */\n                running_mask &= ~MODULE_MASK_FS_SCAN;\n\n                /* stop FS scan (blocking) */\n                FSScan_Terminate();\n                FlushLogs();\n            }\n\n            /* TODO 3) wait changelog reader (blocking) */\n\n            /* 4 - entry processor can be stopped */\n            if (running_mask & MODULE_MASK_ENTRY_PROCESSOR) {\n                /* avoid stats thread to try dumping the status while\n                 * terminating */\n                running_mask &= ~MODULE_MASK_ENTRY_PROCESSOR;\n\n                /* drop pipeline waiting operations and terminate threads */\n                EntryProcessor_Terminate(false);\n\n#ifdef HAVE_CHANGELOGS\n                if (running_mask & MODULE_MASK_EVENT_HDLR) {\n                    /* Ack last changelog records. */\n                    cl_reader_done();\n                }\n#endif\n                FlushLogs();\n            }\n\n            /* 5 - wait policy consumers */\n            if (running_mask & MODULE_MASK_POLICY_RUN\n                && policy_run_mask != 0LL\n                && policy_run_cpt != 0 && policy_run != NULL) {\n                int i, rc;\n\n                running_mask &= ~MODULE_MASK_POLICY_RUN;\n\n                for (i = 0; i < policy_run_cpt; i++) {\n                    if (policy_run_mask & (1LL << i)) {\n                        policy_run_mask &= ~(1LL << i);\n                        rc = policy_module_wait(&policy_run[i]);\n                        if (rc)\n                            DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                                       \"Failure while waiting for policy module '%s' to end (rc=%d).\",\n                                       policy_run[i].descr->name, rc);\n                        FlushLogs();\n                    }\n                }\n            }\n\n            if (lmgr_init) {\n                ListMgr_CloseAccess(&lmgr);\n                lmgr_init = false;\n            }\n\n            DisplayLog(LVL_MAJOR, SIGHDL_TAG, \"Exiting.\");\n            FlushLogs();\n\n            /* indicate the process terminated due to a signal */\n            exit(128 + terminate_sig);\n\n        } else if (reload_sig) {\n            DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                       \"SIGHUP received: reloading configuration\");\n            DisplayLog(LVL_EVENT, RELOAD_TAG,\n                       \"Reloading configuration from '%s'\", config_file_path());\n\n            if (rbh_cfg_reload(parsing_mask) != 0) {\n                DisplayLog(LVL_MAJOR, RELOAD_TAG,\n                           \"Failure reloading configuration from '%s'\", config_file_path());\n            }\n\n            reload_sig = false;\n            FlushLogs();\n        } else if (dump_sig) {\n            DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                       \"SIGUSR1 received: dumping stats\");\n\n            if (!lmgr_init) {\n                if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS)\n                    return NULL;\n                lmgr_init = true;\n            }\n\n            dump_stats(&lmgr, &running_mask, &policy_run_mask);\n            dump_sig = false;\n        }\n    }\n}\n\nstatic inline int do_write_template(const char *file)\n{\n    int rc;\n    FILE *stream;\n\n    if (!EMPTY_STRING(file)) {\n        stream = fopen(file, \"w\");\n\n        if (stream == NULL) {\n            rc = errno;\n            fprintf(stderr, \"Error opening file '%s' for writing: %s.\\n\", file,\n                    strerror(rc));\n            return rc;\n        }\n    } else\n        stream = stdout;\n\n    rc = rbh_cfg_write_template(stream);\n    if (rc)\n        fprintf(stderr, \"Error writing configuration template: %s\\n\",\n                strerror(rc));\n    else if (stream != stdout)\n        fprintf(stderr,\n                \"Configuration template successfully written to '%s'.\\n\", file);\n\n    if (stream != stdout)\n        fclose(stream);\n\n    return rc;\n}\n\nstatic void create_pid_file(const char *pid_file)\n{\n    int fd = open(pid_file, O_CREAT | O_TRUNC | O_WRONLY, 0644);\n\n    if (fd < 0) {\n        DisplayLog(LVL_CRIT, MAIN_TAG,\n                   \"WARNING: Could not open pid file %s: %s\", pid_file,\n                   strerror(errno));\n    } else {\n        char pid_str[128];\n        ssize_t iolen;\n\n        snprintf(pid_str, 128, \"%lu\\n\", (unsigned long)getpid());\n        iolen = write(fd, pid_str, strlen(pid_str) + 1);\n\n        if (iolen == -1) {\n            DisplayLog(LVL_CRIT, MAIN_TAG, \"ERROR writing pid file %s: %s\",\n                       pid_file, strerror(errno));\n        }\n\n        close(fd);\n    }\n\n}\n\n/** parse a target-usage parameter (float)\n * @return 0 on success. errno value on failure.\n */\nstatic int parse_target_usage(const char *str, double *val)\n{\n    char extra_chr[MAX_OPT_LEN];\n    int n;\n\n    /* parse float argument */\n    n = sscanf(str, \"%lf%s\", val, extra_chr);\n    if (n != 1 && n != 2) {\n        fprintf(stderr, \"ERROR: invalid target-usage '%s'. Float expected.\\n\",\n                str);\n        return EINVAL;\n    }\n    if (n == 2 && strcmp(extra_chr, \"\\%\") != 0) {\n        fprintf(stderr, \"ERROR: unexpected suffix '%s' in target-usage. \"\n                \"Only '%%' is allowed.\\n\", extra_chr);\n        return EINVAL;\n    }\n\n    return 0;\n}\n\n/** parse options in robinhood command line */\nstatic int rh_read_parameters(const char *bin, int argc, char **argv,\n                              int *action_mask, struct rbh_options *opt)\n{\n    int c, option_index = 0;\n    char err_msg[4096];\n\n    /* no action, by default */\n    *action_mask = 0;\n\n    zero_options(&options);\n\n    /* parse command line options */\n    while ((c = getopt_long(argc, argv, SHORT_OPT_STRING SHORT_OPT_DEPRECATED,\n                            option_tab, &option_index)) != -1) {\n        switch (c) {\n        case PARTIAL_SCAN:\n            fprintf(stderr,\n                    \"Warning: --partial-scan is deprecated. Use '--scan=<dir>' instead.\\n\");\n            /* same as 'scan' with optarg != NULL\n             * => continue to -S:\n             */\n        case 'S':\n            *action_mask |= ACTION_MASK_SCAN;\n\n            if (optarg) {   /* optional argument => partial scan */\n                opt->flags |= RUNFLG_ONCE;\n                opt->partial_scan = true;\n                rh_strncpy(opt->partial_scan_path, optarg, RBH_PATH_MAX);\n                /* clean final slash */\n                if (FINAL_SLASH(opt->partial_scan_path))\n                    REMOVE_FINAL_SLASH(opt->partial_scan_path);\n            }\n            break;\n\n        case SHOW_DIFF:\n            if (parse_diff_mask(optarg, &opt->diff_mask, err_msg)) {\n                fprintf(stderr, \"Invalid argument for --diff: %s\\n\", err_msg);\n                return EINVAL;\n            }\n            break;\n\n        case DEPRECATED_WM:\n            fprintf(stderr,\n                    \"Warning: '--check-watermarks' is deprecated. Use '--check-thresholds' instead.\\n\");\n            optarg = NULL;\n            /* same as '--check-thresholds' with opt_arg=NULL.\n             * => continue to -C\n             */\n        case 'C':\n            *action_mask |= ACTION_MASK_RUN_POLICIES;\n            opt->flags |= RUNFLG_CHECK_ONLY;\n            /* 4 cases:\n             *  --run=foo,bar --check-thresholds=bah,boo\n             *      => reject if arguments are different\n             *  --run=foo,bar --check-thresholds\n             *      => user may want to check thresholds for the given policies\n             *  --run --check-thresholds=bah,boo\n             *      => user may want to check thresholds for the given policies\n             *  --run --check-thresholds\n             *      => user may want to check thresholds for all policies\n             */\n            /* first case: */\n            if (!EMPTY_STRING(opt->policy_string) && (optarg != NULL)\n                && !EMPTY_STRING(optarg)) {\n                if (strcasecmp(opt->policy_string, optarg) != 0) {\n                    fprintf(stderr,\n                            \"Incompatible arguments for --run and --check-thresholds ('%s' != '%s').\\n\"\n                            \"You can specify:\\n\" \"--check-thresholds=foo,bar\\n\"\n                            \"--run=foo,bar --check-thresholds\\n\",\n                            opt->policy_string, optarg);\n                    return EINVAL;\n                } else\n                    fprintf(stderr,\n                            \"Duplicate arguments for --run and --check-thresholds\\n\"\n                            \"Assuming --check-thresholds=%s\",\n                            opt->policy_string);\n            } else {    /* all other cases: global check flag. */\n\n                /* copy specified policies (unless if specified by run) */\n                if (EMPTY_STRING(opt->policy_string) && optarg != NULL &&\n                    !EMPTY_STRING(optarg))\n                    rh_strncpy(opt->policy_string, optarg,\n                               sizeof(opt->policy_string));\n            }\n            break;\n\n        case 'r':\n#ifndef HAVE_CHANGELOGS\n            fprintf(stderr,\n                    \"-r | --read-log option is only supported in Lustre v2.x versions.\\n\");\n            return ENOTSUP;\n#else\n            *action_mask |= ACTION_MASK_HANDLE_EVENTS;\n#ifdef HAVE_DNE\n            if (optarg) {   /* optional argument => MDT index */\n                opt->mdtidx = str2int(optarg);\n                if (opt->mdtidx == -1) {\n                    fprintf(stderr,\n                            \"Invalid argument to --read-log: expected numerical value for <mdt_index>.\\n\");\n                    return EINVAL;\n                }\n            }\n#endif\n#endif\n            break;\n\n        case RUN_POLICIES:\n            /* avoid conflicts with check-policies */\n            if (opt->flags & RUNFLG_CHECK_ONLY) {\n                if (optarg != NULL && !EMPTY_STRING(optarg))\n                    fprintf(stderr,\n                            \"ERROR: --run is redundant with --check-thresholds\\n\"\n                            \"Did you mean: --check-thresholds=%s ?\\n\", optarg);\n                else if (!EMPTY_STRING(opt->policy_string))\n                    fprintf(stderr,\n                            \"ERROR: --run is redundant with --check-thresholds\\n\"\n                            \"Did you just mean: --check-thresholds=%s ?\\n\",\n                            opt->policy_string);\n                else    /* both empty */\n                    fprintf(stderr,\n                            \"ERROR: --run is redundant with --check-thresholds\\n\"\n                            \"Did you just mean: --check-thresholds ?\\n\");\n                return EINVAL;\n            } else if (!EMPTY_STRING(opt->policy_string)) {\n                /* forbid using '--run' (without option)\n                 * together with '--run=policy1...' (with options) */\n                if (optarg == NULL) {\n                    fprintf(stderr, \"ERROR: multiple inconsistent '--run' \"\n                            \"parameters on command line.\\n\");\n                    return EINVAL;\n                }\n                /* Concatenate with previous 'run' parameters, to allow\n                 * specifying --run=policy1 --run=policy2,...\n                 */\n                strncat(opt->policy_string, \",\", sizeof(opt->policy_string)\n                        - strlen(opt->policy_string) - 1);\n                strncat(opt->policy_string, optarg, sizeof(opt->policy_string)\n                        - strlen(opt->policy_string) - 1);\n            } else if (optarg != NULL && !EMPTY_STRING(optarg)) {\n                /* was there a previous '--run' without policies? */\n                if ((*action_mask) & ACTION_MASK_RUN_POLICIES) {\n                    fprintf(stderr,\n                            \"ERROR: multiple inconsistent '--run' or '--check-thresholds' \"\n                            \"parameters on command line.\\n\");\n                    return EINVAL;\n                }\n\n                rh_strncpy(opt->policy_string, optarg,\n                           sizeof(opt->policy_string));\n            }\n            /* Set it at the end, to check if a previous --run\n             * or --check-threshold was specified */\n            *action_mask |= ACTION_MASK_RUN_POLICIES;\n            break;\n\n        case 't':\n            if (!EMPTY_STRING(opt->target_string)) {\n                fprintf(stderr,\n                        \"ERROR: multiple target definition on command line: '%s' and '%s'.\\n\",\n                        opt->target_string, optarg);\n                return EINVAL;\n            }\n            rh_strncpy(opt->target_string, optarg, sizeof(opt->target_string));\n            break;\n        case TGT_USAGE:\n            if (opt->usage_target != TGT_NOT_SET) {\n                fprintf(stderr,\n                        \"ERROR: multiple target-usage specified on command line.\\n\");\n                return EINVAL;\n            }\n            /* parse float argument */\n            if (parse_target_usage(optarg, &opt->usage_target))\n                return EINVAL;\n            break;\n\n        case 's':\n            fprintf(stderr,\n                    \"ERROR: --sync option is deprecated. Instead, use --run=<policy_name> --force-all\\n\");\n            return EINVAL;\n            break;\n\n        case 'O':\n            opt->flags |= RUNFLG_ONCE;\n            break;\n        case NO_LIMIT:\n            opt->flags |= RUNFLG_NO_LIMIT;\n            break;\n        case NO_GC:\n            opt->flags |= RUNFLG_NO_GC;\n            break;\n        case DRY_RUN:\n            opt->flags |= RUNFLG_DRY_RUN;\n            break;\n        case 'I':\n            opt->flags |= RUNFLG_IGNORE_POL;\n            break;\n        case 'F':\n            opt->flags |= RUNFLG_FORCE_RUN;\n            break;\n        case FORCE_ALL:\n            opt->flags |= RUNFLG_ONCE | RUNFLG_NO_LIMIT | RUNFLG_IGNORE_POL\n                | RUNFLG_FORCE_RUN;\n            break;\n\n        case 'd':\n            opt->detach = true;\n            break;\n        case ALTER_DB:\n            opt->db_flags |= LIF_ALTER_DB;\n            break;\n        case 'f':\n            rh_strncpy(opt->config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'T':\n            if (optarg) /* optional argument */\n                rh_strncpy(opt->template_file, optarg, MAX_OPT_LEN);\n            opt->write_template = true;\n            break;\n        case TEST_SYNTAX:\n            opt->test_syntax = true;\n            break;\n        case 'D':\n            opt->write_defaults = true;\n            break;\n        case 'L':\n            force_log_file(optarg);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                return EINVAL;\n            }\n            /* mark it forced, so it is not overridden later by config */\n            force_debug_level(log_level);\n            break;\n        }\n        case 'p':\n            opt->pid_file = true;\n            rh_strncpy(opt->pid_filepath, optarg, MAX_OPT_LEN);\n            break;\n        case 'h':\n            display_help(bin);\n            return -1;\n            break;\n        case 'V':\n            display_version(bin);\n            return -1;\n            break;\n\n            /* Deprecated options */\n        case 'P':\n        case 'R':\n        case 'M':\n            fprintf(stderr,\n                    \"ERROR: option -%c is deprecated. Instead, use: --run=<policyname>\\n\",\n                    c);\n            return EINVAL;\n            break;\n        case 'i':\n            fprintf(stderr,\n                    \"ERROR: option '-i' is deprecated: use '-I' instead.\\n\");\n            return EINVAL;\n            break;\n        case FORCE_OST_PURGE:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=ost:<idx> --target-usage=<pct>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_FS_PURGE:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=all --target-usage=<pct>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_CLASS_PURGE:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=class:<name>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_OST_MIGR:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=ost:<name>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_USER_MIGR:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=user:<name>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_GROUP_MIGR:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=group:<name>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case FORCE_CLASS_MIGR:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=class:<name>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n        case MIGR_ONE_FILE:\n            fprintf(stderr,\n                    \"ERROR: option --%s is deprecated.\\nInstead, use: --run=<policyname> --target=file:<path>\\n\",\n                    option_tab[option_index].name);\n            return EINVAL;\n\n        case ':':\n        case '?':\n        default:\n            fprintf(stderr, \"Run '%s --help' for more details.\\n\", bin);\n            return EINVAL;\n            break;\n        }\n    }\n\n    /* check there is no extra arguments */\n    if (optind != argc) {\n        fprintf(stderr, \"Error: unexpected argument on command line: %s\\n\",\n                argv[optind]);\n        return EINVAL;\n    }\n\n    if (!attr_mask_is_null(opt->diff_mask) && (*action_mask != ACTION_MASK_SCAN)\n        && (*action_mask != ACTION_MASK_HANDLE_EVENTS)) {\n        fprintf(stderr,\n                \"Error: --diff option only applies to --scan and --readlog actions\\n\");\n        return EINVAL;\n    }\n\n    return 0;\n}   /* rh_read_parameters */\n\n#ifdef _LUSTRE\n#define TGT_HELP \"Allowed values: 'all', 'user:<username>', \"\\\n                 \"'group:<groupname>', 'file:<path>', 'class:<fileclass>', \"\\\n                 \"'ost:<ost_idx>', 'pool:<poolname>', 'projid:<numeric_id>'.\"\n#else\n#define TGT_HELP \"Allowed values: 'all', 'user:<username>', \"\\\n                 \"'group:<groupname>', 'file:<path>', 'class:<fileclass>'.\"\n#endif\n\n/** convert a target string option to a policy_opt_t structure */\nstatic int policyopt_set_target(char *opt_string, policy_opt_t *opt)\n{\n    char *next;\n    char *c;\n\n    if (EMPTY_STRING(opt_string)) {\n        opt->target = TGT_NONE;\n        return 0;\n    }\n\n    if (!strcasecmp(opt_string, \"all\")) {\n        opt->target = TGT_FS;\n        return 0;\n    }\n\n    if ((c = strchr(opt_string, ':')) == NULL) {\n        fprintf(stderr, \"Invalid target '%s'. \" TGT_HELP \"\\n\", opt_string);\n        return EINVAL;\n    }\n    *c = '\\0';\n    next = c + 1;\n    c = opt_string;\n\n    if (!strcasecmp(c, \"all\")) {\n        fprintf(stderr, \"No ':' expected after 'all' target.\\n\");\n        return EINVAL;\n    } else if (!strcasecmp(c, \"user\")) {\n        opt->target = TGT_USER;\n        opt->optarg_u.name = next;\n    } else if (!strcasecmp(c, \"group\")) {\n        opt->target = TGT_GROUP;\n        opt->optarg_u.name = next;\n    } else if (!strcasecmp(c, \"file\")) {\n        opt->target = TGT_FILE;\n        opt->optarg_u.name = next;\n    } else if (!strcasecmp(c, \"class\")) {\n        opt->target = TGT_CLASS;\n        opt->optarg_u.name = next;\n    }\n#ifdef _LUSTRE\n    else if (!strcasecmp(c, \"ost\")) {\n        char extra_chr[MAX_OPT_LEN];\n        opt->target = TGT_OST;\n        if (sscanf(next, \"%i%s\", &opt->optarg_u.index, extra_chr) != 1\n            || opt->optarg_u.index < 0) {\n            fprintf(stderr, \"Invalid ost target specification: index expected. \"\n                    \"E.g. --target=ost:42\\n\");\n            return EINVAL;\n        }\n    }\n    else if (!strcasecmp(c, \"projid\")) {\n        char extra_chr[MAX_OPT_LEN];\n        opt->target = TGT_PROJID;\n        if (sscanf(next, \"%i%s\", &opt->optarg_u.index, extra_chr) != 1\n            || opt->optarg_u.index < 0) {\n            fprintf(stderr, \"Invalid projid: numerical id expected. \"\n                    \"E.g. --target=projid:34\\n\");\n            return EINVAL;\n        }\n    } else if (!strcasecmp(c, \"pool\")) {\n        opt->target = TGT_POOL;\n        opt->optarg_u.name = next;\n    }\n#endif\n    else {\n        fprintf(stderr, \"Invalid target type '%s'. \" TGT_HELP \"\\n\", c);\n        return EINVAL;\n    }\n    return 0;\n}\n\n/** policy and options for each policy run */\ntypedef struct run_item {\n    int policy_index;\n    policy_opt_t run_opt;\n} run_item_t;\n\n/** add a policy run to the list */\nstatic int add_policy_run(run_item_t **runs, unsigned int *count,\n                          const char *name, const policy_opt_t *opts)\n{\n    int index = -1;\n    run_item_t *prun;\n    int i;\n\n    /* opts should be policy specific or the default */\n    assert(opts != NULL);\n\n    if (!policy_exists(name, &index)) {\n        fprintf(stderr, \"ERROR: policy '%s' is not declared in config file.\\n\",\n                name);\n        return EINVAL;\n    }\n\n    /** check duplicates */\n    for (i = 0; i < *count; i++) {\n        if ((*runs)[i].policy_index == index) {\n            fprintf(stderr, \"ERROR: policy '%s' is invoked multiple times \"\n                    \"in '--run' arguments\\n\", name);\n            return EINVAL;\n        }\n    }\n\n    (*count)++;\n    *runs = realloc(*runs, *count * sizeof(run_item_t));\n    if (*runs == NULL)\n        return ENOMEM;\n\n    prun = &((*runs)[(*count) - 1]);\n    prun->policy_index = index;\n    prun->run_opt = *opts;\n\n    /* If any of the policy runs in a once shot run, assume all actions are\n     * one-shot. */\n    if (prun->run_opt.target != TGT_NONE)\n        options.flags |= RUNFLG_ONCE;\n\n    return 0;\n}\n\n/** parse a single argument for a policy run\n * @param[in,out] opts  Policy options resulting from policy run arguments.\n * @param[in,out] arg   String of the argument to be parsed.\n * @param[in]     name  Policy name (for error messages).\n * @param[in]     implicit  true if the argument name is implicit\n *                          (policy target).\n */\nstatic int parse_policy_single_arg(policy_opt_t *opts, char *arg,\n                                   const char *name, bool implicit)\n{\n    char *val = strchr(arg, '=');\n\n    if (!val) {\n        if (!implicit) {\n            fprintf(stderr, \"Invalid '--run' argument: missing parameter name \"\n                    \"at '%s' (policy '%s')\\n\", arg, name);\n            return EINVAL;\n        } else {\n            /* implicit arg name => arg is a policy target */\n            return policyopt_set_target(arg, opts);\n        }\n    }\n    *val = '\\0';\n    val++;\n\n    if (!strcmp(arg, \"target\")) {\n        return policyopt_set_target(val, opts);\n    } else if (!strcmp(arg, \"target-usage\")) {\n        return parse_target_usage(val, &opts->usage_pct);\n    } else if (!strcmp(arg, \"max-count\")) {\n        /* support for 'KMG...' suffixes */\n        uint64_t tmp = str2size(val);\n\n        if (tmp == (uint64_t)-1LL || tmp > UINT_MAX) {\n            fprintf(stderr, \"ERROR: invalid value '%s' for max-count: \"\n                    \"integer (32 bits) expected.\\n\", val);\n            return EINVAL;\n        }\n        opts->max_action_nbr = tmp;\n    } else if (!strcmp(arg, \"max-volume\") || !strcmp(arg, \"max-vol\")) {\n        /* parse val */\n        uint64_t tmp;\n\n        tmp = str2size(val);\n        if (tmp == (uint64_t)-1LL) {\n            fprintf(stderr, \"ERROR: invalid value '%s' for max-vol: \"\n                    \"<int>[KMGTPE] expected.\\n\", val);\n            return EINVAL;\n        }\n        opts->max_action_vol = tmp;\n    } else {\n        /* error */\n        fprintf(stderr, \"ERROR: unexpected parameter name '%s' in run \"\n                \"arguments for policy '%s'.\\n\\t'target', 'target-usage', \"\n                \"'max-count', or 'max-vol' expected.\\n\", arg, name);\n        return EINVAL;\n    }\n\n    return 0;\n}\n\n/** parse a list of arguments for a policy run */\nstatic int parse_policy_args(policy_opt_t *opts, char *args, const char *name)\n{\n    char *curr = NULL;\n    char *param;\n    int rc;\n\n    param = strtok_r(args, \",\", &curr);\n    if (!param)\n        return 0;\n\n    /* allow implicit parameter name for first argument (policy target) */\n    rc = parse_policy_single_arg(opts, param, name, true);\n    if (rc)\n        return rc;\n\n    while ((param = strtok_r(NULL, \",\", &curr))) {\n        /* don't allow implicit parameter name for next arguments */\n        rc = parse_policy_single_arg(opts, param, name, false);\n        if (rc)\n            return rc;\n    }\n\n    return 0;\n}\n\n/** Extract a list of arguments from a string\n * like '(xxxxxxxxxxxx'),....\n * @param[out] next is set to the next policy run string\n *             or points to final '\\0' if end of string is reached.\n * @return an malloc'ated string that must be freed by the caller.\n * @retval NULL on error.\n */\nstatic char *extract_arg_list(const char *str, char **next)\n{\n    char *arg_end;\n    char *args_tmp;\n    int arg_len;\n\n    /*  The first char is a '(' */\n    assert(str != NULL && str[0] == '(');\n\n    /*  Match the next ')' */\n    arg_end = strchr(str + 1, ')');\n    if (arg_end == NULL) {\n        fprintf(stderr, \"Error in policy run specification: unmatched \"\n                \"'(' in '%s'\\n\", str);\n        return NULL;\n    }\n    /* then ',' or '\\0' is expected. */\n    if (arg_end[1] != ',' && arg_end[1] != '\\0') {\n        fprintf(stderr, \"Error in policy run specification: ',' or \"\n                \"end of string expected after ')', but '%s' found\\n\",\n                arg_end + 1);\n        return NULL;\n    }\n\n    /* first args char is str + 1, last args char is arg_end - 1,\n     * length = last - first + 1\n     */\n    arg_len = (arg_end - 1) - (str + 1) + 1;\n    args_tmp = strndup(str + 1, arg_len);\n\n    if (arg_end[1] == ',')\n        /* skip ',' */\n        *next = arg_end + 2;\n    else\n        /* end of string reached */\n        *next = arg_end + 1;\n\n    return args_tmp;\n}\n\n/**\n * Read the next policy in '--run' argument.\n * @return Pointer to the next policy run string.\n * @retvall NULL on error.\n */\nstatic const char *read_next_policy_run(run_item_t **runs, unsigned int *count,\n                                        const char *param_str,\n                                        const policy_opt_t *default_opt)\n{\n    const char *curr;\n    char *name = NULL;\n    char *args = NULL;\n\n    assert(runs != NULL);\n    assert(count != NULL);\n    assert(param_str != NULL);\n    assert(default_opt != NULL);\n\n    /* stop at the first '(' or ',' */\n    for (curr = param_str;; curr++) {\n        /* reached end of current run */\n        if (*curr == ',' || *curr == '\\0') {\n            name = strndup(param_str, curr - param_str);\n            if (!name)\n                return NULL;\n\n            if (add_policy_run(runs, count, name, default_opt))\n                goto err;\n\n            free(name);\n\n            /* return pointer to end-of-string or to the next policy run */\n            return (*curr == '\\0') ? curr : curr + 1;\n        }\n        /* starting argument list */\n        if (*curr == '(') {\n            char *next = NULL;\n            policy_opt_t opts = *default_opt;\n\n            name = strndup(param_str, curr - param_str);\n            if (!name)\n                return NULL;\n\n            args = extract_arg_list(curr, &next);\n            if (!args)\n                goto err;\n\n            /* parse arguments between parenthesis */\n            if (parse_policy_args(&opts, args, name))\n                goto err;\n\n            if (add_policy_run(runs, count, name, &opts))\n                goto err;\n\n            free(name);\n\n            return next;\n        }\n    }\n err:\n    free(name);\n    free(args);\n    return NULL;\n}\n\n/**\n * Parse policy (--run) parameters.\n * --run argument can be a list of comma-separated list of policy runs.\n *  Each policy run can include a list of arguments between parenthesis,\n *  and separated by commas.\n *  Argument names can be explicit e.g. 'target=user:foo'\n *  or implicit 'user:foo'. Arguments may depend on the target type.\n *  Example:\n *      --run=lhsm_archive(user:foo,max-count=100k),lhsm_release(ost:1,target-usage=85%)\n *\n *  Arguments from command line (e.g. --target=file:/x/y/z) are used\n *  as default for policy runs that have no specified target.\n */\nstatic int parse_policy_runs(run_item_t **runs, unsigned int *count,\n                             const char *param_str,\n                             const policy_opt_t *default_opt)\n{\n    const char *curr;\n    int i;\n\n    /* if no policy is specified (or \"all\") run them all. */\n    if (EMPTY_STRING(param_str) || !strcasecmp(param_str, \"all\")) {\n        /* return the list of all policies */\n        *count = policies.policy_count;\n        *runs = calloc(*count, sizeof(run_item_t));\n        if (!runs) {\n            fprintf(stderr, \"ERROR: cannot allocate memory\\n\");\n            return ENOMEM;\n        }\n        for (i = 0; i < *count; i++) {\n            (*runs)[i].policy_index = i;\n            (*runs)[i].run_opt = *default_opt;\n            (*runs)[i].run_opt.flags = options.flags;\n        }\n        return 0;\n    }\n\n    /* split the string as: 'policy(args),policy(args),...' */\n    for (curr = param_str; curr != NULL && *curr != '\\0';\n         curr = read_next_policy_run(runs, count, curr, default_opt))\n        /* noop */ ;\n\n    if (curr == NULL)   /* error */\n        return EINVAL;\n\n    /* Copyback general flag to policies */\n    for (i = 0; i < *count; i++)\n        (*runs)[i].run_opt.flags = options.flags;\n\n    return 0;\n}\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int rc;\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n    int action_mask = 0;\n    char err_msg[4096];\n\n    /* policy runs */\n    run_item_t *runs = NULL;\n    unsigned int run_count = 0;\n\n    policy_opt_t default_policy_opt = {.target = TGT_NONE };\n    const char *bin;\n\n    bin = rh_basename(argv[0]);\n\n    boot_time = time(NULL);\n\n    rc = rh_read_parameters(bin, argc, argv, &action_mask, &options);\n    if (rc)\n        exit((rc == -1 ? 0 : rc));  /* -1 is returned for normal exit */\n\n    /* Template or Defaults options specified ? */\n    if (options.write_template) {\n        rc = do_write_template(options.template_file);\n        exit(rc);\n    }\n\n    if (options.write_defaults) {\n        rc = rbh_cfg_write_default(stdout);\n        if (rc) {\n            fprintf(stderr, \"Error %d retrieving default configuration: %s\\n\",\n                    rc, strerror(rc));\n        }\n        exit(rc);\n    }\n\n    /* initialize internal resources (glib, llapi, internal resources...) */\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(options.config_file, options.config_file, &chgd,\n                     badcfg, MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(ENOENT);\n    } else if (chgd)\n        fprintf(stderr, \"Using config file '%s'.\\n\", options.config_file);\n\n    /* build config parsing mask */\n    if (options.test_syntax)\n        /* parse all configs */\n        parsing_mask = 0xFFFFFFFF;\n    else\n        parsing_mask = action2parsing_mask(action_mask);\n\n    /* load and set modules configuration */\n    if (rbh_cfg_load(parsing_mask, options.config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                options.config_file, err_msg);\n        exit(1);\n    }\n\n    if (options.test_syntax) {\n        printf(\"Configuration file '%s' has been read successfully\\n\",\n               options.config_file);\n        exit(0);\n    }\n\n    /* log to stderr if the command runs in a tty */\n    if (!log_config.force_log_file\n        && isatty(fileno(stderr)) && !options.detach) {\n        force_log_file(\"stderr\");\n    }\n\n    if (action_mask & ACTION_MASK_RUN_POLICIES) {\n        /* Parse 'target' option, if any.\n         * The resulting policy_opt is used as default for policy runs. */\n        rc = policyopt_set_target(options.target_string, &default_policy_opt);\n        if (rc)\n            exit(rc);\n\n        /* add usage target */\n        default_policy_opt.usage_pct = options.usage_target;\n\n        /* Parse 'run' arguments. */\n        rc = parse_policy_runs(&runs, &run_count, options.policy_string,\n                               &default_policy_opt);\n        if (rc)\n            exit(rc);\n    } else if (!EMPTY_STRING(options.target_string)) {\n        fprintf(stderr, \"Warning: --target option has no effect \"\n                \"without --run or --check-thresholds.\\n\");\n    }\n#ifdef HAVE_CHANGELOGS\n    /* Only enable changelog processing for Lustre filesystems */\n    if ((action_mask & ACTION_MASK_HANDLE_EVENTS)\n        && (strcmp(global_config.fs_type, \"lustre\") != 0)) {\n        DisplayLog(LVL_MAJOR, MAIN_TAG,\n                   \"Disabling ChangeLogs for this non-lustre filesystem\");\n        action_mask &= ~ACTION_MASK_HANDLE_EVENTS;\n    }\n\n/* if the filesystem supports changelogs and a scan is requested\n * and the once option is not set, display a warning */\n    if ((action_mask & ACTION_MASK_SCAN) && !(options.flags & RUNFLG_ONCE)\n        && !(action_mask & ACTION_MASK_HANDLE_EVENTS)\n        && strcmp(global_config.fs_type, \"lustre\") == 0) {\n        fprintf(stderr,\n                \"ADVICE: this filesystem is changelog-capable, you should use changelogs instead of scanning.\\n\");\n    }\n#endif\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* deamonize program if detach flag is set */\n    if (options.detach) {\n        rc = daemon(0, 0);\n\n        if (rc) {\n            DisplayLog(LVL_CRIT, MAIN_TAG,\n                       \"Error detaching process from parent: %s\",\n                       strerror(errno));\n            fprintf(stderr, \"Error detaching process from parent: %s\\n\",\n                    strerror(errno));\n            exit(1);\n        }\n    }\n\n    if (options.pid_file)\n        create_pid_file(options.pid_filepath);\n\n    /* Initialize filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(rc);\n\n    /* Initialize status managers */\n    rc = smi_init_all(options.flags);\n    if (rc)\n        exit(rc);\n\n    /* create signal handling thread */\n    rc = pthread_create(&sig_thr, NULL, signal_handler_thr, NULL);\n    if (rc) {\n        DisplayLog(LVL_CRIT, MAIN_TAG,\n                   \"Error starting signal handler thread: %s\", strerror(errno));\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, MAIN_TAG,\n                   \"Signal handler thread started successfully\");\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(options.db_flags);\n    if (rc) {\n        DisplayLog(LVL_CRIT, MAIN_TAG,\n                   \"Error initializing list manager: %s (%d)\", lmgr_err2str(rc),\n                   rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_VERB, MAIN_TAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    if (options.flags & RUNFLG_ONCE) {\n        /* used for dumping stats in one shot mode */\n        pthread_create(&stat_thread, NULL, stats_thr, NULL);\n    }\n\n    if (action_mask & (ACTION_MASK_SCAN | ACTION_MASK_HANDLE_EVENTS)) {\n        if (!attr_mask_is_null(options.diff_mask))\n            /* convert status[0] to all status flags */\n            options.diff_mask = translate_all_status_mask(options.diff_mask);\n\n        /* Initialize Pipeline */\n#ifdef _BENCH_PIPELINE\n        int nb_stages = 3;\n        rc = EntryProcessor_Init(0, options.flags, &nb_stages);\n#else\n        rc = EntryProcessor_Init(STD_PIPELINE, options.flags,\n                                 &options.diff_mask);\n#endif\n        if (rc) {\n            DisplayLog(LVL_CRIT, MAIN_TAG,\n                       \"Error %d initializing EntryProcessor pipeline\", rc);\n            exit(rc);\n        } else\n            DisplayLog(LVL_VERB, MAIN_TAG,\n                       \"EntryProcessor successfully initialized\");\n    }\n\n    /* Note: in 'one-shot' mode, we must take care of performing action in\n     * the correct order:\n     * first scan, then process changelogs, then migrate, then purge, etc.\n     */\n\n    if (!terminate_sig && action_mask & ACTION_MASK_SCAN) {\n\n        /* Start FS scan */\n        if (options.partial_scan)\n            rc = FSScan_Start(options.flags, options.partial_scan_path);\n        else\n            rc = FSScan_Start(options.flags, NULL);\n\n        if (rc) {\n            DisplayLog(LVL_CRIT, MAIN_TAG,\n                       \"Error %d initializing FS Scan module\", rc);\n            exit(rc);\n        } else\n            DisplayLog(LVL_VERB, MAIN_TAG,\n                       \"FS Scan module successfully initialized\");\n\n        /* Flush logs now, to have a trace in the logs */\n        FlushLogs();\n\n        if (options.flags & RUNFLG_ONCE)\n            running_mask = MODULE_MASK_FS_SCAN | MODULE_MASK_ENTRY_PROCESSOR;\n        else\n            running_mask |= MODULE_MASK_FS_SCAN | MODULE_MASK_ENTRY_PROCESSOR;\n\n        if (options.flags & RUNFLG_ONCE) {\n            FSScan_Wait();\n            DisplayLog(LVL_MAJOR, MAIN_TAG, \"FS Scan finished\");\n            /* Did it finish because of a termination signal?\n             * If so, don't continue unless we get the shutdown mutex */\n            if (terminate_sig)\n                pthread_mutex_lock(&shutdown_mtx);\n        }\n    }\n#ifdef HAVE_CHANGELOGS\n    if (!terminate_sig && action_mask & ACTION_MASK_HANDLE_EVENTS) {\n\n        /* Start reading changelogs */\n        rc = cl_reader_start(options.flags, options.mdtidx);\n        if (rc) {\n            DisplayLog(LVL_CRIT, MAIN_TAG,\n                       \"Error %d initializing ChangeLog Reader\", rc);\n            exit(rc);\n        } else\n            DisplayLog(LVL_VERB, MAIN_TAG,\n                       \"ChangeLog Reader successfully initialized\");\n\n        /* Flush logs now, to have a trace in the logs */\n        FlushLogs();\n\n        if (options.flags & RUNFLG_ONCE)\n            running_mask = MODULE_MASK_EVENT_HDLR | MODULE_MASK_ENTRY_PROCESSOR;\n        else\n            running_mask |=\n                MODULE_MASK_EVENT_HDLR | MODULE_MASK_ENTRY_PROCESSOR;\n\n        if (options.flags & RUNFLG_ONCE) {\n            cl_reader_wait();\n            DisplayLog(LVL_MAJOR, MAIN_TAG, \"Event Processing finished\");\n            /* Did it finish because of a termination signal?\n             * If so, don't continue unless we get the shutdown mutex */\n            if (terminate_sig)\n                pthread_mutex_lock(&shutdown_mtx);\n        }\n    }\n#endif\n\n    if ((options.flags & RUNFLG_ONCE)\n        && (action_mask & (ACTION_MASK_SCAN | ACTION_MASK_HANDLE_EVENTS))) {\n        /* Pipeline must be flushed */\n        EntryProcessor_Terminate(true);\n\n#ifdef HAVE_CHANGELOGS\n        if (action_mask & ACTION_MASK_HANDLE_EVENTS) {\n            /* Ack last changelog records. */\n            cl_reader_done();\n        }\n#endif\n        running_mask = 0;\n    }\n\n    if (!terminate_sig && action_mask & ACTION_MASK_RUN_POLICIES) {\n        int i;\n        /* allocate policy_run structure */\n        policy_run = calloc(run_count, sizeof(policy_info_t));\n        if (!policy_run) {\n            DisplayLog(LVL_CRIT, MAIN_TAG, \"Cannot allocate memory\");\n            exit(1);\n        }\n        policy_run_cpt = run_count;\n\n        for (i = 0; i < run_count; i++) {\n            unsigned int pol_idx = runs[i].policy_index;\n\n            rc = policy_module_start(&policy_run[i],\n                                     &policies.policy_list[pol_idx],\n                                     &run_cfgs.configs[pol_idx],\n                                     &runs[i].run_opt);\n            if (rc == ENOENT) {\n                DisplayLog(LVL_CRIT, MAIN_TAG, \"Policy %s is disabled.\",\n                           policies.policy_list[pol_idx].name);\n                continue;\n            } else if (rc) {\n                fprintf(stderr, \"Error %d initializing Migration module\\n\", rc);\n                exit(rc);\n            } else {\n                DisplayLog(LVL_VERB, MAIN_TAG,\n                           \"Policy %s successfully initialized\",\n                           policies.policy_list[pol_idx].name);\n                /* Flush logs now, to have a trace in the logs */\n                FlushLogs();\n            }\n\n            /* For 'one-shot' mode, run policy after policy */\n            if (options.flags & RUNFLG_ONCE) {\n                running_mask = MODULE_MASK_POLICY_RUN;\n                policy_run_mask = (1LL << i);\n                rc = policy_module_wait(&policy_run[i]);\n                policy_run_mask = 0;\n                running_mask = 0;\n                DisplayLog(LVL_MAJOR, MAIN_TAG,\n                           \"%s: policy run terminated (rc = %d).\",\n                           policies.policy_list[pol_idx].name, rc);\n            } else\n                policy_run_mask |= (1LL << i);\n        }\n        if (!(options.flags & RUNFLG_ONCE) && (policy_run_mask != 0))\n            running_mask |= MODULE_MASK_POLICY_RUN;\n    }\n\n    if (!(options.flags & RUNFLG_ONCE)) {\n        char tmpstr[1024];\n\n        if (!running_mask) {\n            DisplayLog(LVL_MAJOR, MAIN_TAG, \"Nothing started.\");\n            exit(1);\n        }\n\n        running_mask2str(running_mask, policy_run_mask, tmpstr);\n        DisplayLog(LVL_MAJOR, MAIN_TAG, \"Daemon started (running modules: %s)\",\n                   tmpstr);\n        FlushLogs();\n\n        /* dump stats periodically */\n        stats_thr(&running_mask);\n\n        /* should never return */\n        exit(1);\n    } else {\n        DisplayLog(LVL_MAJOR, MAIN_TAG, \"All tasks done! Exiting.\");\n        exit(0);\n    }\n\n    return 0;   /* for compiler */\n\n}\n"
  },
  {
    "path": "src/robinhood/rbh_diff.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"policy_run.h\"\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_basename.h\"\n\n/* needed to dump their stats */\n#include \"fs_scan_main.h\"\n#include \"chglog_reader.h\"\n#include \"entry_processor.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <pthread.h>\n#include <fcntl.h>\n#include <signal.h>\n\n#ifdef _LUSTRE\n#include \"lustre_extended_types.h\"\n#endif\n\n#define DIFF_TAG    \"diff\"\n\n#ifdef _HAVE_FID\n#ifndef _MDT_SPECIFIC_LOVEA\n#define LUSTRE_DUMP_FILES 1\n#define LOVEA_FNAME \"lovea\"\n#define FIDREMAP_FNAME \"fid_remap\"\n#endif\n#endif\n\nstatic time_t start_time;\n\n/* Array of options for getopt_long().\n * Each record consists of: {const char *name, int has_arg, int *flag, int val}\n */\n\nstatic struct option option_tab[] = {\n\n    /* diff options */\n    /* for partial scan */\n    {\"scan\", required_argument, NULL, 's'},\n    /* to apply on DB or FS */\n    {\"apply\", optional_argument, NULL, 'a'},\n    /* list of diff attrs (default is all) */\n    {\"diff\", required_argument, NULL, 'd'},\n    /* dry-run */\n    {\"dry-run\", no_argument, NULL, 'D'},\n#ifdef _HSM_LITE /** FIXME check policies */\n    /* recover lost files from backend */\n    {\"from-backend\", no_argument, NULL, 'b'},\n#endif\n#ifdef LUSTRE_DUMP_FILES\n    /* output directory to write information for MDT/OST rebuild */\n    {\"output-dir\", required_argument, NULL, 'o'},\n#endif\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n};\n\n#define SHORT_OPT_STRING    \"s:a:d:f:l:hVDbo:\"\n\n#define MAX_OPT_LEN 1024\n#define MAX_TYPE_LEN 256\n\ntypedef struct diff_options {\n    run_flags_t    flags;\n    char           config_file[MAX_OPT_LEN];\n    char           partial_scan_path[RBH_PATH_MAX];\n    diff_arg_t     diff_arg;\n    char           output_dir[MAX_OPT_LEN];\n\n    /* bit field */\n    unsigned int   partial_scan:1;\n} diff_options;\n\nstatic inline void zero_options(struct diff_options *opts)\n{\n    /* default value is 0 for most options */\n    memset(opts, 0, sizeof(struct diff_options));\n    opts->flags = RUNFLG_ONCE;\n    strcpy(opts->output_dir, \".\");\n}\n\n/* program options from command line  */\nstatic struct diff_options options;\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options]\\n\"\n    \"\\n\"\n    _B \"Options:\" B_ \"\\n\"\n    \"    \" _B \"-s\" B_ \" \" _U \"dir\" U_ \", \" _B \"--scan\" B_ \"=\" _U \"dir\" U_ \"\\n\"\n    \"        Only scan the specified subdir.\\n\"\n    \"    \" _B \"-d\" B_ \" \" _U \"attrset\" U_ \", \" _B \"--diff\" B_ \"=\" _U \"attrset\"\n    U_ \" :\\n\" \"        Display changes for the given set of attributes.\\n\"\n    \"        \" _U \"attrset\" U_\n    \" is a list of options in: path,posix,stripe,all,status,notimes,noatime.\\n\"\n    \"    \" _B \"-a\" B_ \" {fs|db}, \" _B \"--apply\" B_ \"[={fs|db}]\\n\" \"        \" _B\n    \"db\" B_\n    \" (default): apply changes to the database using the filesystem as the reference.\\n\"\n    \"        \" _B \"fs\" B_\n    \": revert changes in the filesystem using the database as the reference.\\n\"\n    \"    \" _B \"--dry-run\" B_ \"\\n\"\n    \"        If --apply=fs, display operations on filesystem without performing them.\\n\"\n#ifdef _HSM_LITE\n    \"    \" _B \"-b\" B_ \", \" _B \"--from-backend\" B_ \"\\n\"\n    \"        When applying changes to the filesystem (--apply=fs), recover objects from the backend storage\\n\"\n    \"        (otherwise, recover orphaned objects on OSTs).\\n\"\n#endif\n#ifdef LUSTRE_DUMP_FILES\n    \"    \" _B \"-o\" B_ \" \" _U \"dir\" U_ \", --output-dir\" B_ \"=\" _U \"dir\" U_ \"\\n\"\n    \"        For MDS disaster recovery, write needed information to files in \"\n    _U \"dir\" U_ \".\\n\"\n#endif\n    \"\\n\"\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"file\" U_ \", \" _B \"--config-file=\" B_ _U\n    \"configfile\" U_ \"\\n\" \"        Path to configuration file (or short name).\\n\"\n    \"\\n\" _B \"Miscellaneous options:\" B_ \"\\n\" \"    \" _B \"-l\" B_ \" \" _U \"level\" U_\n    \", \" _B \"--log-level=\" B_ _U \"loglevel\" U_ \"\\n\"\n    \"        Force the log verbosity level (overrides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\" _B \"    \"\n    _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\" \"    \" _B \"-V\"\n    B_ \", \" _B \"--version\" B_ \"\\n\" \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \"\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic pthread_t stat_thread;\n\n/* database connexion for updating stats */\nstatic lmgr_t lmgr;\nstatic bool lmgr_init = false;\nstatic char start_time_str[256];\n\nstatic inline int ensure_db_access(void)\n{\n    if (!lmgr_init) {\n        if (ListMgr_InitAccess(&lmgr) != DB_SUCCESS)\n            return 0;\n        lmgr_init = true;\n    }\n    return 1;\n}\n\nstatic void dump_stats(lmgr_t *lmgr)\n{\n    char tmp_buff[256];\n    time_t now;\n    struct tm date;\n\n    now = time(NULL);\n    strftime(tmp_buff, 256, \"%Y/%m/%d %T\", localtime_r(&now, &date));\n\n    DisplayLog(LVL_MAJOR, \"STATS\",\n               \"==================== Dumping stats at %s =====================\",\n               tmp_buff);\n    DisplayLog(LVL_MAJOR, \"STATS\", \"Diff start time: %s\", start_time_str);\n\n    FSScan_DumpStats();\n    EntryProcessor_DumpCurrentStages();\n\n    /* Flush stats */\n    FlushLogs();\n}\n\nstatic void *stats_thr(void *arg)\n{\n    struct tm date;\n\n    strftime(start_time_str, 256, \"%Y/%m/%d %T\",\n             localtime_r(&start_time, &date));\n\n    if (!ensure_db_access())\n        return NULL;\n\n    DisplayLog(LVL_VERB, DIFF_TAG, \"Statistics thread started\");\n\n    while (1) {\n        WaitStatsInterval();\n        dump_stats(&lmgr);\n    }\n}\n\nstatic int terminate_sig = 0;\nstatic bool dump_sig = false;\nstatic pthread_t sig_thr;\n\n#define SIGHDL_TAG  \"SigHdlr\"\n\nstatic void terminate_handler(int sig)\n{\n    terminate_sig = sig;\n}\n\nstatic void usr_handler(int sig)\n{\n    dump_sig = true;\n}\n\nstatic void *signal_handler_thr(void *arg)\n{\n    struct sigaction act_sigterm;\n    struct sigaction act_sigusr;\n\n    /* create signal handlers */\n    memset(&act_sigterm, 0, sizeof(act_sigterm));\n    act_sigterm.sa_flags = 0;\n    act_sigterm.sa_handler = terminate_handler;\n    if (sigaction(SIGTERM, &act_sigterm, NULL) == -1\n        || sigaction(SIGINT, &act_sigterm, NULL) == -1) {\n        DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                   \"Error while setting signal handlers for SIGTERM and SIGINT: %s\",\n                   strerror(errno));\n        if (options.diff_arg.db_tag != NULL && ensure_db_access()) {\n            fprintf(stderr, \"Cleaning diff table...\\n\");\n            ListMgr_DestroyTag(&lmgr, options.diff_arg.db_tag);\n        }\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, SIGHDL_TAG,\n                   \"Signals SIGTERM and SIGINT (daemon shutdown) are ready to be used\");\n\n    memset(&act_sigusr, 0, sizeof(act_sigusr));\n    act_sigusr.sa_flags = 0;\n    act_sigusr.sa_handler = usr_handler;\n    if (sigaction(SIGUSR1, &act_sigusr, NULL) == -1) {\n        DisplayLog(LVL_CRIT, SIGHDL_TAG,\n                   \"Error while setting signal handlers for SIGUSR1: %s\",\n                   strerror(errno));\n        if (options.diff_arg.db_tag != NULL && ensure_db_access()) {\n            fprintf(stderr, \"Cleaning diff table...\\n\");\n            ListMgr_DestroyTag(&lmgr, options.diff_arg.db_tag);\n\n            /* make sure written data is flushed */\n            if (options.diff_arg.lovea_file)\n                fflush(options.diff_arg.lovea_file);\n            if (options.diff_arg.fid_remap_file)\n                fflush(options.diff_arg.fid_remap_file);\n        }\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, SIGHDL_TAG,\n                   \"Signal SIGUSR1 (stats dump) is ready to be used\");\n\n    /* signal flag checking loop */\n\n    while (1) {\n        /* check for signal every second */\n        rh_sleep(1);\n\n        if (terminate_sig != 0) {\n            if (terminate_sig == SIGTERM)\n                DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                           \"SIGTERM received: performing clean daemon shutdown\");\n            else if (terminate_sig == SIGINT)\n                DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                           \"SIGINT received: performing clean daemon shutdown\");\n            FlushLogs();\n\n            /* stop FS scan (blocking) */\n            FSScan_Terminate();\n            FlushLogs();\n\n            /* drop pipeline waiting operations and terminate threads */\n            EntryProcessor_Terminate(false);\n            FlushLogs();\n\n            DisplayLog(LVL_MAJOR, SIGHDL_TAG, \"Exiting.\");\n            FlushLogs();\n\n            if (options.diff_arg.db_tag != NULL && ensure_db_access()) {\n                fprintf(stderr, \"Cleaning diff table...\\n\");\n                ListMgr_DestroyTag(&lmgr, options.diff_arg.db_tag);\n\n                /* make sure written data is flushed */\n                if (options.diff_arg.lovea_file)\n                    fflush(options.diff_arg.lovea_file);\n                if (options.diff_arg.fid_remap_file)\n                    fflush(options.diff_arg.fid_remap_file);\n            }\n\n            /* indicate the process terminated due to a signal */\n            exit(128 + terminate_sig);\n        } else if (dump_sig) {\n            DisplayLog(LVL_MAJOR, SIGHDL_TAG,\n                       \"SIGUSR1 received: dumping stats\");\n\n            if (!ensure_db_access())\n                return NULL;\n            dump_stats(&lmgr);\n            dump_sig = false;\n        }\n    }\n}\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, i, option_index = 0;\n    const char *bin;\n    int rc;\n    char err_msg[4096];\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n    char tag_name[256] = \"\";\n\n    bin = rh_basename(argv[0]);\n\n    start_time = time(NULL);\n\n    zero_options(&options);\n\n    /* parse command line options */\n    while ((c =\n            getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                        &option_index)) != -1) {\n        switch (c) {\n        case 's':\n            options.partial_scan = 1;\n            rh_strncpy(options.partial_scan_path, optarg, RBH_PATH_MAX);\n            /* clean final slash */\n            if (FINAL_SLASH(options.partial_scan_path))\n                REMOVE_FINAL_SLASH(options.partial_scan_path);\n            break;\n\n        case 'd':\n            if (parse_diff_mask(optarg, &options.diff_arg.diff_mask, err_msg)) {\n                fprintf(stderr, \"Invalid argument for --diff: %s\\n\", err_msg);\n                exit(1);\n            }\n            break;\n\n        case 'a':\n            if (optarg) {\n                if (!strcasecmp(optarg, \"fs\"))\n                    options.diff_arg.apply = APPLY_FS;\n                else if (!strcasecmp(optarg, \"db\"))\n                    options.diff_arg.apply = APPLY_DB;\n                else {\n                    fprintf(stderr,\n                            \"Invalid argument for --apply: '%s' (fs or db expected)\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                options.diff_arg.apply = APPLY_DB;\n            break;\n\n        case 'D':\n            options.flags |= RUNFLG_DRY_RUN;\n            break;\n\n        case 'f':\n            rh_strncpy(options.config_file, optarg, MAX_OPT_LEN);\n            break;\n#ifdef _HSM_LITE\n        case 'b':\n            options.diff_arg.recov_from_backend = 1;\n            break;\n#endif\n#ifdef _HAVE_FID    /* only for lustre 2.x */\n        case 'o':\n            rh_strncpy(options.output_dir, optarg, MAX_OPT_LEN);\n            break;\n#endif\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case ':':\n        case '?':\n        default:\n            fprintf(stderr, \"Run '%s --help' for more details.\\n\", bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* check there is no extra arguments */\n    if (optind != argc) {\n        fprintf(stderr, \"Error: unexpected argument on command line: %s\\n\",\n                argv[optind]);\n        exit(1);\n    }\n\n    /* initialize internal resources (glib, llapi, internal resources...) */\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(options.config_file, options.config_file, &chgd,\n                     badcfg, MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", options.config_file);\n    }\n\n    if (rbh_cfg_load(MODULE_MASK_FS_SCAN | MODULE_MASK_ENTRY_PROCESSOR,\n                     options.config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                options.config_file, err_msg);\n        exit(1);\n    }\n\n    if (!log_config.force_debug_level)\n        log_config.debug_level = LVL_CRIT;  /* least messages as possible */\n\n    /* Set logging to stderr */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(rc);\n\n    /* Initialize status managers */\n    rc = smi_init_all(options.flags);\n    if (rc)\n        exit(rc);\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, DIFF_TAG,\n                   \"Error initializing list manager: %s (%d)\", lmgr_err2str(rc),\n                   rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_VERB, DIFF_TAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    if (attr_mask_is_null(options.diff_arg.diff_mask)) {\n        /* parse \"all\" */\n        char tmpstr[] = \"all\";\n        rc = parse_diff_mask(tmpstr, &options.diff_arg.diff_mask, err_msg);\n        if (rc) {\n            DisplayLog(LVL_CRIT, DIFF_TAG,\n                       \"unexpected error parsing diff mask: %s\", err_msg);\n            exit(1);\n        }\n    }\n    options.diff_arg.diff_mask =\n        translate_all_status_mask(options.diff_arg.diff_mask);\n\n#ifdef LUSTRE_DUMP_FILES\n    if (options.diff_arg.apply == APPLY_FS\n        && !(options.flags & RUNFLG_DRY_RUN)) {\n        /* open the file to write LOV EA and FID remapping */\n        if (!EMPTY_STRING(options.output_dir)) {\n            char fname[RBH_PATH_MAX];\n            if (mkdir(options.output_dir, 0700) && (errno != EEXIST)) {\n                DisplayLog(LVL_CRIT, DIFF_TAG,\n                           \"Failed to create directory %s: %s\",\n                           options.output_dir, strerror(errno));\n                exit(1);\n            }\n            snprintf(fname, RBH_PATH_MAX - 1, \"%s/\" LOVEA_FNAME,\n                     options.output_dir);\n            options.diff_arg.lovea_file = fopen(fname, \"w\");\n            if (options.diff_arg.lovea_file == NULL) {\n                DisplayLog(LVL_CRIT, DIFF_TAG,\n                           \"Failed to open %s for writing: %s\", fname,\n                           strerror(errno));\n                exit(1);\n            }\n            snprintf(fname, RBH_PATH_MAX - 1, \"%s/\" FIDREMAP_FNAME,\n                     options.output_dir);\n            options.diff_arg.fid_remap_file = fopen(fname, \"w\");\n            if (options.diff_arg.fid_remap_file == NULL) {\n                DisplayLog(LVL_CRIT, DIFF_TAG,\n                           \"Failed to open %s for writing: %s\", fname,\n                           strerror(errno));\n                exit(1);\n            }\n        }\n    }\n#endif\n\n    /* if no DB apply action is specified, can't use md_update field for\n     * checking removed entries. So, create a special tag for that. */\n    if ((options.diff_arg.apply != APPLY_DB)\n        || (options.flags & RUNFLG_DRY_RUN)) {\n        fprintf(stderr, \"Preparing diff table...\\n\");\n\n        /* create a connexion to the DB. this is safe to use the global lmgr var\n         * as statistics thread is not running */\n        if (!ensure_db_access())\n            exit(1);\n        /* create a tag to clear entries after the scan */\n\n        /* There could be several diff running in parallel,\n         * so set a suffix to avoid conflicts */\n        sprintf(tag_name, \"DIFF_%u\", (unsigned int)getpid());\n        options.diff_arg.db_tag = tag_name;\n\n        /* add filter for partial scan */\n        if (options.partial_scan) {\n            lmgr_filter_t filter;\n            filter_value_t val;\n            lmgr_simple_filter_init(&filter);\n\n            char tmp[RBH_PATH_MAX];\n            strcpy(tmp, options.partial_scan_path);\n            strcat(tmp, \"/*\");\n            val.value.val_str = tmp;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_fullpath, LIKE, val, 0);\n\n            rc = ListMgr_CreateTag(&lmgr, tag_name, &filter, false);\n            lmgr_simple_filter_free(&filter);\n        } else\n            rc = ListMgr_CreateTag(&lmgr, tag_name, NULL, false);\n\n        if (rc)\n            exit(rc);\n    }\n\n    /* Initialise Pipeline */\n    rc = EntryProcessor_Init(DIFF_PIPELINE, options.flags, &options.diff_arg);\n    if (rc) {\n        DisplayLog(LVL_CRIT, DIFF_TAG,\n                   \"Error %d initializing EntryProcessor pipeline\", rc);\n        goto clean_tag;\n    } else\n        DisplayLog(LVL_VERB, DIFF_TAG,\n                   \"EntryProcessor successfully initialized\");\n\n    fprintf(stderr, \"Starting scan\\n\");\n\n    /* print header to indicate the content of diff\n     * #<diff cmd>\n     * ---fs[=/subdir]\n     * +++db\n     */\n    for (i = 0; i < argc; i++)\n        printf(\"%s%s\", i == 0 ? \"# \" : \" \", argv[i]);\n    printf(\"\\n\");\n    if (options.diff_arg.apply == APPLY_FS) {\n        if (options.partial_scan)\n            printf(\"---fs=%s\\n\", options.partial_scan_path);\n        else\n            printf(\"---fs\\n\");\n        printf(\"+++db\\n\");\n    } else {\n        printf(\"---db\\n\");\n        if (options.partial_scan)\n            printf(\"+++fs=%s\\n\", options.partial_scan_path);\n        else\n            printf(\"+++fs\\n\");\n    }\n\n    /* Start FS scan */\n    if (options.partial_scan)\n        rc = FSScan_Start(options.flags, options.partial_scan_path);\n    else\n        rc = FSScan_Start(options.flags, NULL);\n\n    if (rc) {\n        DisplayLog(LVL_CRIT, DIFF_TAG, \"Error %d initializing FS Scan module\",\n                   rc);\n        goto clean_tag;\n    } else\n        DisplayLog(LVL_VERB, DIFF_TAG,\n                   \"FS Scan module successfully initialized\");\n\n    /* Flush logs now, to have a trace in the logs */\n    FlushLogs();\n\n    /* both pipeline and scan are now running, can now trap events and\n     * display stats */\n\n    /* create signal handling thread */\n    rc = pthread_create(&sig_thr, NULL, signal_handler_thr, NULL);\n    if (rc) {\n        DisplayLog(LVL_CRIT, DIFF_TAG,\n                   \"Error starting signal handler thread: %s\", strerror(errno));\n        goto clean_tag;\n    } else\n        DisplayLog(LVL_VERB, DIFF_TAG,\n                   \"Signal handler thread started successfully\");\n\n    pthread_create(&stat_thread, NULL, stats_thr, NULL);\n\n    /* wait for FS scan to end */\n    FSScan_Wait();\n    DisplayLog(LVL_MAJOR, DIFF_TAG, \"FS Scan finished\");\n\n    /* Pipeline must be flushed */\n    EntryProcessor_Terminate(true);\n\n#ifdef LUSTRE_DUMP_FILES\n    /* flush the lovea file */\n    if (options.diff_arg.lovea_file) {\n        fprintf(stderr, \" > LOV EA information written to %s/\" LOVEA_FNAME \"\\n\",\n                options.output_dir);\n        fclose(options.diff_arg.lovea_file);\n    }\n    if (options.diff_arg.fid_remap_file) {\n        fprintf(stderr, \" > FID remapping written to %s/\" FIDREMAP_FNAME \"\\n\",\n                options.output_dir);\n        fclose(options.diff_arg.fid_remap_file);\n    }\n#endif\n\n    fprintf(stderr, \"End of scan\\n\");\n\n    DisplayLog(LVL_MAJOR, DIFF_TAG, \"All tasks done! Exiting.\");\n    rc = 0;\n\n clean_tag:\n    /* destroy the tag before exit */\n    if (options.diff_arg.db_tag != NULL && ensure_db_access()) {\n        fprintf(stderr, \"Cleaning diff table...\\n\");\n        ListMgr_DestroyTag(&lmgr, options.diff_arg.db_tag);\n    }\n\n    exit(rc);\n    return rc;  /* for compiler */\n}\n"
  },
  {
    "path": "src/robinhood/rbh_du.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Du clone based on robinhood DB.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n\n#define DU_TAG \"du\"\n\nstatic struct option option_tab[] = {\n    {\"user\", required_argument, NULL, 'u'},\n    {\"group\", required_argument, NULL, 'g'},\n    {\"type\", required_argument, NULL, 't'},\n    {\"status\", required_argument, NULL, 'S'},\n\n    /* output options */\n    {\"sum\", no_argument, NULL, 's'},\n    {\"count\", no_argument, NULL, 'c'},\n    {\"bytes\", no_argument, NULL, 'b'},\n    {\"kilo\", no_argument, NULL, 'k'},\n    {\"mega\", no_argument, NULL, 'm'},\n    {\"human-readable\", no_argument, NULL, 'H'},\n    {\"details\", no_argument, NULL, 'd'},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"u:g:t:S:scbkmHdf:l:hV\"\n#define TYPE_HELP \"'f' (file), 'd' (dir), 'l' (symlink), 'b' (block), \"\\\n                  \"'c' (char), 'p' (named pipe/FIFO), 's' (socket)\"\n\n/* global variables */\n\nstatic lmgr_t lmgr;\n\ntypedef enum { disp_usage, disp_count, disp_size, disp_details } display_mode;\ntypedef enum { disp_byte, disp_kilo, disp_mega, disp_human } display_unit;\n\n/* program options */\nstruct du_opt {\n    const char *user;\n    const char *group;\n    const char *type;\n\n    /* status name and value for -status */\n    sm_instance_t *smi;\n    char *status_name;\n    char *status_value;\n\n    /* condition flags */\n    unsigned int match_user:1;\n    unsigned int match_group:1;\n    unsigned int match_type:1;\n    unsigned int match_status:1;\n\n    /* behavior flags */\n    display_mode disp_what;\n    display_unit disp_how;\n    unsigned int sum:1;\n\n} prog_options = {\n    .disp_what = disp_usage, .disp_how = disp_kilo\n};\n\n/** filter on entries to be summed */\nstatic lmgr_filter_t    entry_filter;\n/** same as entry_filter + condition on parent id */\nstatic lmgr_filter_t    parent_filter;\n\n/* filter for root entries */\nstatic bool_node_t      match_expr;\nstatic int              is_expr = 0; /**< is it set? */\n\nstatic attr_mask_t disp_mask =\n    { .std = ATTR_MASK_type | ATTR_MASK_blocks | ATTR_MASK_size };\nstatic attr_mask_t query_mask = { 0 };\n\ntypedef struct stats_du_t {\n    const char *type;\n    uint64_t    count;\n    uint64_t    blocks;\n    uint64_t    size;\n} stats_du_t;\n\n#define TYPE_COUNT  (TYPE_SOCK+1)\nstatic const stats_du_t stats_zero[TYPE_COUNT] = {\n    {\"?\", 0, 0, 0},\n    {STR_TYPE_LINK, 0, 0, 0},\n    {STR_TYPE_DIR, 0, 0, 0},\n    {STR_TYPE_FILE, 0, 0, 0},\n    {STR_TYPE_CHR, 0, 0, 0},\n    {STR_TYPE_BLK, 0, 0, 0},\n    {STR_TYPE_FIFO, 0, 0, 0},\n    {STR_TYPE_SOCK, 0, 0, 0}\n};\n\nstatic void reset_stats(stats_du_t *stats)\n{\n    int i;\n\n    for (i = 0; i < TYPE_COUNT; i++)\n        stats[i] = stats_zero[i];\n}\n\nstatic char *sprint_size(char *buf, uint64_t sz)\n{\n    switch (prog_options.disp_how) {\n    case disp_byte:\n        sprintf(buf, \"%\" PRIu64, sz);\n        break;\n    case disp_kilo:\n        if (sz % KB)\n            sprintf(buf, \"%llu\", 1 + (sz / KB));\n        else\n            sprintf(buf, \"%llu\", sz / KB);\n        break;\n    case disp_mega:\n        if (sz % MB)\n            sprintf(buf, \"%llu\", 1 + (sz / MB));\n        else\n            sprintf(buf, \"%llu\", sz / MB);\n        break;\n    case disp_human:\n        if (sz < KB)\n            sprintf(buf, \"%\" PRIu64, sz);\n        else if (sz < MB)\n            sprintf(buf, \"%.1fK\", 1.0 * sz / KB);\n        else if (sz < GB)\n            sprintf(buf, \"%.1fM\", 1.0 * sz / MB);\n        else if (sz < TB)\n            sprintf(buf, \"%.1fG\", 1.0 * sz / GB);\n        else if (sz < PB)\n            sprintf(buf, \"%.1fT\", 1.0 * sz / TB);\n        else if (sz < EB)\n            sprintf(buf, \"%.1fP\", 1.0 * sz / PB);\n        else\n            sprintf(buf, \"%.1fE\", 1.0 * sz / EB);\n        break;\n    }\n    return buf;\n}\n\nstatic void print_stats(const char *name, stats_du_t *stats)\n{\n    int i;\n    char b1[1024];\n    char b2[1024];\n    uint64_t total = 0;\n\n    switch (prog_options.disp_what) {\n    case disp_details:\n        printf(\"%s\\n\", name);\n        for (i = 0; i < TYPE_COUNT; i++)\n            if (stats[i].count > 0)\n                printf(\"\\t%s count:%\" PRIu64 \", size:%s, spc_used:%s\\n\",\n                       stats[i].type, stats[i].count,\n                       sprint_size(b1, stats[i].size),\n                       sprint_size(b2, stats[i].blocks * DEV_BSIZE));\n        break;\n    case disp_usage:\n        for (i = 0; i < TYPE_COUNT; i++)\n            if (stats[i].count > 0)\n                total += stats[i].blocks * DEV_BSIZE;\n        printf(\"%s\\t%s\\n\", sprint_size(b1, total), name);\n        break;\n    case disp_size:\n        for (i = 0; i < TYPE_COUNT; i++)\n            if (stats[i].count > 0)\n                total += stats[i].size;\n        printf(\"%s\\t%s\\n\", sprint_size(b1, total), name);\n        break;\n    case disp_count:\n        for (i = 0; i < TYPE_COUNT; i++)\n            total += stats[i].count;\n        printf(\"%\" PRIu64 \"\\t%s\\n\", total, name);\n        break;\n    }\n}\n\n/* build filters depending on program options */\nstatic int mkfilters(void)\n{\n    /* create boolean expression for matching root entries */\n    if (prog_options.match_user) {\n        compare_value_t val;\n        compare_direction_t comp;\n\n        if (global_config.uid_gid_as_numbers) {\n            val.integer = atoi(prog_options.user);\n            comp = COMP_EQUAL;\n        } else {\n            strcpy(val.str, prog_options.user);\n            comp = COMP_LIKE;\n        }\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_OWNER, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_OWNER, val, 0);\n\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_uid;\n    }\n\n    if (prog_options.match_group) {\n        compare_value_t val;\n        compare_direction_t comp;\n\n        if (global_config.uid_gid_as_numbers) {\n            val.integer = atoi(prog_options.group);\n            comp = COMP_EQUAL;\n        } else {\n            strcpy(val.str, prog_options.group);\n            comp = COMP_LIKE;\n        }\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_GROUP, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_GROUP, val, 0);\n\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_gid;\n    }\n\n    if (prog_options.match_type) {\n        compare_value_t val;\n        val.type = db2type(prog_options.type);\n        if (!is_expr)\n            CreateBoolCond(&match_expr, COMP_EQUAL, CRITERIA_TYPE, val, 0);\n        else\n            AppendBoolCond(&match_expr, COMP_EQUAL, CRITERIA_TYPE, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_type;\n    }\n\n    if (prog_options.match_status) {\n        compare_value_t val;\n\n        strcpy(val.str, prog_options.status_value);\n        if (!is_expr)\n            CreateBoolCond(&match_expr, COMP_EQUAL, CRITERIA_STATUS, val, 0);\n        else\n            AppendBoolCond(&match_expr, COMP_EQUAL, CRITERIA_STATUS, val, 0);\n\n        is_expr = 1;\n        query_mask.status |= SMI_MASK(prog_options.smi->smi_index);\n    }\n\n    /* create DB filters */\n    lmgr_simple_filter_init(&entry_filter);\n    lmgr_simple_filter_init(&parent_filter);\n\n    if (is_expr) {\n        char expr[RBH_PATH_MAX];\n        /* for debug */\n        if (BoolExpr2str(&match_expr, expr, RBH_PATH_MAX) > 0)\n            DisplayLog(LVL_FULL, DU_TAG, \"Expression matching: %s\", expr);\n\n        /* append bool expr to entry filter */\n        /* Do not use 'OR' expression there */\n        convert_boolexpr_to_simple_filter(&match_expr, &entry_filter,\n                                          prog_options.smi, NULL, 0, BOOL_AND);\n        convert_boolexpr_to_simple_filter(&match_expr, &parent_filter,\n                                          prog_options.smi, NULL, 0, BOOL_AND);\n    }\n\n    return 0;\n}\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options] [path|fid]\\n\"\n    \"\\n\"\n    _B \"Filters:\" B_ \"\\n\"\n    \"    \" _B \"-u\" B_ \" \" _U \"user\" U_ \"\\n\"\n    \"    \" _B \"-g\" B_ \" \" _U \"group\" U_ \"\\n\"\n    \"    \" _B \"-t\" B_ \" \" _U \"type\" U_ \"\\n\"\n    \"       \" TYPE_HELP \"\\n\"\n    \"    \" _B \"-S\" B_ \" \" _U \"<status_name>\" U_ \":\" _U \"<status_value>\" U_ \"\\n\"\n    \"\\n\"\n    _B \"Output options:\" B_ \"\\n\"\n    \"    \" _B \"-s\" B_ \", \" _B \"--sum\" B_ \"\\n\"\n    \"       display total instead of stats per argument\\n\"\n    \"    \" _B \"-c\" B_ \", \" _B \"--count\" B_ \"\\n\"\n    \"       display entry count instead of disk usage\\n\"\n    \"    \" _B \"-b\" B_ \", \" _B \"--bytes\" B_ \"\\n\"\n    \"       display size instead of disk usage (display in bytes)\\n\"\n    \"    \" _B \"-k\" B_ \", \" _B \"--kilo\" B_ \"\\n\"\n    \"       display disk usage in blocks of 1K (default)\\n\"\n    \"    \" _B \"-m\" B_ \", \" _B \"--mega\" B_ \"\\n\"\n    \"       display disk usage in blocks of 1M\\n\"\n    \"    \" _B \"-H\" B_ \", \" _B \"--human-readable\" B_ \"\\n\"\n    \"       display in human readable format (e.g 512K 123.7M)\\n\"\n    \"    \" _B \"-d\" B_ \", \" _B \"--details\" B_ \"\\n\"\n    \"       show detailed stats: type, count, size, disk usage\\n\"\n    \"       (display in bytes by default)\\n\"\n    \"\\n\"\n    _B \"Program options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"config_file\" U_ \"\\n\"\n    \"    \" _B \"-l\" B_ \" \" _U \"log_level\" U_ \"\\n\"\n    \"    \" _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" 'du' command\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic const char *opt2type(const char *type_opt)\n{\n    if (strlen(type_opt) != 1)\n        return NULL;\n\n    switch (type_opt[0]) {\n    case 'b':\n        return STR_TYPE_BLK;\n    case 'c':\n        return STR_TYPE_CHR;\n    case 'd':\n        return STR_TYPE_DIR;\n    case 'p':\n        return STR_TYPE_FIFO;\n    case 'f':\n        return STR_TYPE_FILE;\n    case 'l':\n        return STR_TYPE_LINK;\n    case 's':\n        return STR_TYPE_SOCK;\n    default:\n        return NULL;\n    }\n}\n\n/**\n *  Get id of root dir\n */\nstatic int retrieve_root_id(entry_id_t *root_id)\n{\n    int rc;\n    char value[1024];\n\n    /* try to get root id from DB */\n    rc = ListMgr_GetVar(&lmgr, ROOT_ID_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS)\n        if (sscanf(value, SFID, RFID(root_id)) == FID_SCAN_CNT)\n            return 0;\n\n    rc = Path2Id(global_config.fs_path, root_id);\n    if (rc)\n        DisplayLog(LVL_MAJOR, DU_TAG, \"Can't access filesystem's root %s: %s\",\n                   global_config.fs_path, strerror(-rc));\n    return rc;\n}\n\n#define REPCNT    4\nstatic report_field_descr_t dir_info[REPCNT] = {\n    {ATTR_INDEX_type, REPORT_GROUP_BY, SORT_NONE, false, 0, FV_NULL},\n    {0, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL},\n    {ATTR_INDEX_blocks, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n    {ATTR_INDEX_size, REPORT_SUM, SORT_NONE, false, 0, FV_NULL}\n};\n\n/* directory callback */\nstatic int dircb(wagon_t *id_list, attr_set_t *attr_list,\n                 unsigned int entry_count, void *arg)\n{\n    /* sum child entries stats for all directories */\n    int i, rc;\n    filter_value_t fv;\n    struct lmgr_report_t *it;\n    db_value_t result[REPCNT];\n    unsigned int result_count;\n    stats_du_t *stats = (stats_du_t *) arg;\n\n    /* filter on parent_id */\n\n    for (i = 0; i < entry_count; i++) {\n        fv.value.val_id = id_list[i].id;\n        rc = lmgr_simple_filter_add_or_replace(&parent_filter,\n                                               ATTR_INDEX_parent_id,\n                                               EQUAL, fv, 0);\n        if (rc)\n            return rc;\n\n        it = ListMgr_Report(&lmgr, dir_info, REPCNT, NULL, &parent_filter,\n                            NULL);\n        if (it == NULL)\n            return -1;\n\n        result_count = REPCNT;\n        while ((rc =\n                ListMgr_GetNextReportItem(it, result, &result_count,\n                                          NULL)) == DB_SUCCESS) {\n            unsigned int idx = db2type(result[0].value_u.val_str);\n            stats[idx].count += result[1].value_u.val_biguint;\n            stats[idx].blocks += result[2].value_u.val_biguint;\n            stats[idx].size += result[3].value_u.val_biguint;\n\n            result_count = REPCNT;\n        }\n\n        ListMgr_CloseReport(it);\n    }\n\n    return 0;\n}\n\n/**\n * perform du command on the entire FS\n * \\param stats array to be filled in\n * \\param display_stats the function display the stats by itself\n */\nstatic int list_all(stats_du_t *stats, bool display_stats)\n{\n    attr_set_t root_attrs;\n    entry_id_t root_id;\n    int rc;\n    struct stat st;\n    struct lmgr_report_t *it;\n\n    db_value_t result[REPCNT];\n    unsigned int result_count;\n\n    ATTR_MASK_INIT(&root_attrs);\n\n    rc = retrieve_root_id(&root_id);\n    if (rc)\n        memset(&root_id, 0, sizeof(root_id));\n\n    /* root is not a part of the DB: sum it now if it matches */\n    ATTR_MASK_SET(&root_attrs, fullpath);\n    strcpy(ATTR(&root_attrs, fullpath), global_config.fs_path);\n\n    if (lstat(ATTR(&root_attrs, fullpath), &st) == 0) {\n        stat2rbh_attrs(&st, &root_attrs, true);\n        ListMgr_GenerateFields(&root_attrs,\n                               attr_mask_or(&disp_mask, &query_mask));\n    }\n\n    /* sum root if it matches */\n    if (!is_expr || (entry_matches(&root_id, &root_attrs,\n                                   &match_expr, NULL,\n                                   prog_options.smi) == POLICY_MATCH)) {\n        unsigned int idx = db2type(ATTR(&root_attrs, type));\n        stats[idx].count++;\n        stats[idx].blocks += ATTR(&root_attrs, blocks);\n        stats[idx].size += ATTR(&root_attrs, size);\n    }\n\n    it = ListMgr_Report(&lmgr, dir_info, REPCNT, NULL, &entry_filter, NULL);\n    if (it == NULL)\n        return -1;\n\n    result_count = REPCNT;\n    while ((rc =\n            ListMgr_GetNextReportItem(it, result, &result_count,\n                                      NULL)) == DB_SUCCESS) {\n        unsigned int idx = db2type(result[0].value_u.val_str);\n        stats[idx].count += result[1].value_u.val_biguint;\n        stats[idx].blocks += result[2].value_u.val_biguint;\n        stats[idx].size += result[3].value_u.val_biguint;\n\n        result_count = REPCNT;\n    }\n\n    ListMgr_CloseReport(it);\n\n    if (display_stats)\n        print_stats(global_config.fs_path, stats);\n\n    return 0;\n}\n\n/**\n * List the content of the given id/path list\n */\nstatic int list_content(char **id_list, int id_count)\n{\n    wagon_t *ids;\n    int i, rc;\n    attr_set_t root_attrs;\n    entry_id_t root_id;\n    bool is_id;\n    stats_du_t stats[TYPE_COUNT];\n\n    if (prog_options.sum)\n        reset_stats(stats);\n\n    rc = retrieve_root_id(&root_id);\n    if (rc)\n        return rc;\n\n    ids = MemCalloc(id_count, sizeof(wagon_t));\n    if (!ids)\n        return -ENOMEM;\n\n    for (i = 0; i < id_count; i++) {\n        if (!prog_options.sum)\n            reset_stats(stats);\n\n        is_id = true;\n        /* is it a path or fid? */\n        if (sscanf(id_list[i], SFID, RFID(&ids[i].id)) != FID_SCAN_CNT) {\n            is_id = false;\n            /* take it as a path */\n            rc = Path2Id(id_list[i], &ids[i].id);\n            if (!rc)\n                ids[i].fullname = id_list[i];\n        } else {\n#if _HAVE_FID\n            /* Take it as an FID. */\n            char path[RBH_PATH_MAX];\n            rc = Lustre_GetFullPath(&ids[i].id, path, sizeof(path));\n            if (!rc)\n                ids[i].fullname = strdup(path);\n#endif\n        }\n\n        if (rc) {\n            DisplayLog(LVL_MAJOR, DU_TAG, \"Invalid parameter: %s: %s\",\n                       id_list[i], strerror(-rc));\n            goto out;\n        }\n\n        if (entry_id_equal(&ids[i].id, &root_id)) {\n            /* the ID is FS root: use list_all instead */\n            DisplayLog(LVL_DEBUG, DU_TAG,\n                       \"Optimization: command argument is filesystem's root: performing bulk sum in DB\");\n            rc = list_all(stats, !prog_options.sum);\n            if (rc)\n                goto out;\n            continue;\n        }\n\n        /* get root attrs to print it (if it matches program options) */\n        root_attrs.attr_mask = attr_mask_or(&disp_mask, &query_mask);\n        rc = ListMgr_Get(&lmgr, &ids[i].id, &root_attrs);\n        if (rc == 0)\n            dircb(&ids[i], &root_attrs, 1, stats);\n        else {\n            DisplayLog(LVL_VERB, DU_TAG, \"Notice: no attrs in DB for %s\",\n                       id_list[i]);\n\n            if (!is_id) {\n                struct stat st;\n                ATTR_MASK_SET(&root_attrs, fullpath);\n                strcpy(ATTR(&root_attrs, fullpath), id_list[i]);\n\n                if (lstat(ATTR(&root_attrs, fullpath), &st) == 0) {\n                    stat2rbh_attrs(&st, &root_attrs, true);\n                    ListMgr_GenerateFields(&root_attrs,\n                                           attr_mask_or(&disp_mask,\n                                                        &query_mask));\n                }\n            } else if (entry_id_equal(&ids[i].id, &root_id)) {\n                /* this is root id */\n                struct stat st;\n                ATTR_MASK_SET(&root_attrs, fullpath);\n                strcpy(ATTR(&root_attrs, fullpath), global_config.fs_path);\n\n                if (lstat(ATTR(&root_attrs, fullpath), &st) == 0) {\n                    stat2rbh_attrs(&st, &root_attrs, true);\n                    ListMgr_GenerateFields(&root_attrs,\n                                           attr_mask_or(&disp_mask,\n                                                        &query_mask));\n                }\n            }\n\n            dircb(&ids[i], &root_attrs, 1, stats);\n        }\n\n        /* sum root if it matches */\n        if (!is_expr || (entry_matches(&ids[i].id, &root_attrs,\n                                       &match_expr, NULL,\n                                       prog_options.smi) == POLICY_MATCH)) {\n            unsigned int idx = db2type(ATTR(&root_attrs, type));\n            stats[idx].count++;\n            stats[idx].blocks += ATTR(&root_attrs, blocks);\n            stats[idx].size += ATTR(&root_attrs, size);\n        }\n\n        if (!prog_options.sum) {\n            /* if not group all, run and display stats now */\n            rc = rbh_scrub(&lmgr, &ids[i], 1, disp_mask, dircb, stats);\n\n            if (rc)\n                goto out;\n\n            print_stats(ids[i].fullname, stats);\n        }\n    }\n\n    if (prog_options.sum) {\n        rc = rbh_scrub(&lmgr, ids, id_count, disp_mask, dircb, stats);\n        if (rc)\n            goto out;\n        print_stats(\"total\", stats);\n    }\n\n out:\n    /* ids have been processed, free them */\n    MemFree(ids);\n    return rc;\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n    char config_file[MAX_OPT_LEN] = \"\";\n    int rc;\n    bool chgd = false;\n    char err_msg[4096];\n    char badcfg[RBH_PATH_MAX];\n\n    bin = rh_basename(argv[0]);\n\n    /* parse command line options */\n    while ((c = getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                            &option_index)) != -1) {\n        switch (c) {\n        case 's':\n            prog_options.sum = 1;\n            break;\n        case 'c':\n            prog_options.disp_what = disp_count;\n            break;\n        case 'b':\n            /* only change the default */\n            if (prog_options.disp_what == disp_usage)\n                prog_options.disp_what = disp_size;\n            /* only change the default */\n            if (prog_options.disp_how == disp_kilo)\n                prog_options.disp_how = disp_byte;\n            break;\n        case 'k':\n            prog_options.disp_how = disp_kilo;\n            break;\n        case 'm':\n            prog_options.disp_how = disp_mega;\n            break;\n        case 'd':\n            prog_options.disp_what = disp_details;\n            /* only change the default for display */\n            if (prog_options.disp_how == disp_kilo)\n                prog_options.disp_how = disp_byte;\n            break;\n        case 'H':\n            prog_options.disp_how = disp_human;\n            break;\n\n        case 'u':\n            prog_options.match_user = 1;\n            prog_options.user = optarg;\n            break;\n        case 'g':\n            prog_options.match_group = 1;\n            prog_options.group = optarg;\n            break;\n        case 't':\n            prog_options.match_type = 1;\n            prog_options.type = opt2type(optarg);\n            if (prog_options.type == NULL) {\n                fprintf(stderr,\n                        \"invalid type '%s': expected types: \" TYPE_HELP \".\\n\",\n                        optarg);\n                exit(1);\n            }\n            break;\n\n        case 'S':\n            rc = parse_status_arg(\"-status\", optarg, &prog_options.status_name,\n                                  &prog_options.status_value, true);\n            if (rc)\n                exit(rc);\n            prog_options.match_status = 1;\n            break;\n\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* initialize internal resources (glib, llapi, internal resources...) */\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read common config (listmgr, ...) (mask=0) */\n    if (rbh_cfg_load(0, config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (!log_config.force_debug_level)\n        log_config.debug_level = LVL_MAJOR; /* no event message */\n\n    /* Set logging to stderr */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize filesystem access */\n    rc = InitFS();\n    if (rc)\n        fprintf(stderr,\n                \"WARNING: cannot access filesystem %s (%s), du output may be wrong or incomplete.\\n\",\n                global_config.fs_path, strerror(abs(rc)));\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(LIF_REPORT_ONLY);\n    if (rc) {\n        DisplayLog(LVL_CRIT, DU_TAG, \"Error initializing list manager: %s (%d)\",\n                   lmgr_err2str(rc), rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_DEBUG, DU_TAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, DU_TAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n\n    if (prog_options.match_status) {\n        const char *strval;\n\n        rc = check_status_args(prog_options.status_name,\n                               prog_options.status_value, &strval,\n                               &prog_options.smi);\n        if (rc)\n            exit(rc);\n        prog_options.status_value = (char *)strval;\n    }\n\n    mkfilters();\n\n    if (argc == optind) {\n        stats_du_t stats[TYPE_COUNT];\n        reset_stats(stats);\n\n        /* no path in argument: du the entire FS */\n        rc = list_all(stats, true); /* display the stats by itself */\n    } else\n        rc = list_content(argv + optind, argc - optind);\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return rc;\n\n}\n"
  },
  {
    "path": "src/robinhood/rbh_find.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n * Copyright 2013 Cray Inc. All Rights Reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Find clone based on robinhood DB\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"Memory.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n\n#include \"rbh_find.h\"\n\n#define LSSTATUS_OPT 260\n#define PRINTF_OPT 261\n#define LSCLASS_OPT 262\n#define ESCAPED_OPT 263\n#define INAME_OPT   264\n#define PRINT0_OPT  265\n#define NLINK_OPT   266\n#define PRINT_OPT   267\n\nstatic struct option option_tab[] = {\n    {\"user\", required_argument, NULL, 'u'},\n    {\"group\", required_argument, NULL, 'g'},\n    {\"nouser\", no_argument, NULL, 'U'},\n    {\"nogroup\", no_argument, NULL, 'G'},\n    {\"type\", required_argument, NULL, 't'},\n    {\"size\", required_argument, NULL, 's'},\n    {\"name\", required_argument, NULL, 'n'},\n    {\"iname\", required_argument, NULL, INAME_OPT},\n    {\"links\", required_argument, NULL, NLINK_OPT},\n    {\"mtime\", required_argument, NULL, 'M'},\n    {\"crtime\", required_argument, NULL, 'c'},\n    {\"mmin\", required_argument, NULL, 'm'},\n    {\"msec\", required_argument, NULL, 'z'},\n    {\"atime\", required_argument, NULL, 'A'},\n    {\"amin\", required_argument, NULL, 'a'},\n    {\"ctime\", required_argument, NULL, 'C'},\n    {\"class\", required_argument, NULL, 'F'},\n    {\"status\", required_argument, NULL, 'S'},\n#ifdef _LUSTRE\n    {\"ost\", required_argument, NULL, 'o'},\n    {\"pool\", required_argument, NULL, 'P'},\n    {\"projid\", required_argument, NULL, 'p'},\n    {\"lsost\", no_argument, NULL, 'O'},\n#endif\n    {\"lsclass\", no_argument, NULL, LSCLASS_OPT},\n    {\"lsstatus\", optional_argument, NULL, LSSTATUS_OPT},\n\n    {\"ls\", no_argument, NULL, 'l'},\n    {\"print\", no_argument, NULL, PRINT_OPT},\n    {\"printf\", required_argument, NULL, PRINTF_OPT},\n    {\"print0\", no_argument, NULL, PRINT0_OPT},\n    {\"escaped\", no_argument, NULL, ESCAPED_OPT},\n    {\"exec\", required_argument, NULL, 'E'},\n    /* TODO dry-run mode for exec ? */\n\n    /* query options */\n    {\"not\", no_argument, NULL, '!'},\n    {\"nobulk\", no_argument, NULL, 'b'},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"debug-level\", required_argument, NULL, 'd'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"lpOu:g:t:s:n:S:o:P:E:A:M:C:m:z:f:d:hV!bUGc:\"\n\n#define TYPE_HELP \"'f' (file), 'd' (dir), 'l' (symlink), 'b' (block), \"\\\n                  \"'c' (char), 'p' (named pipe/FIFO), 's' (socket)\"\n#define SIZE_HELP \"[-|+]<val>[K|M|G|T]\"\n#define TIME_HELP \"[-|+]<val>[s|m|h|d|y] (s: sec, m: min, h: hour, d:day, \"\\\n                  \"y:year. default unit is days)\"\n\n/* global variables */\n\nstatic lmgr_t lmgr;\n\n/* program options */\nstruct find_opt prog_options = {\n    .bulk = bulk_unspec,\n    .print = 1,\n};\n\nstatic const attr_mask_t LS_DISPLAY_MASK = {.std = ATTR_MASK_nlink\n        | ATTR_MASK_mode | ATTR_MASK_uid\n        | ATTR_MASK_gid | ATTR_MASK_size | ATTR_MASK_last_mod | ATTR_MASK_link\n};\n\n#ifdef _LUSTRE\nstatic const attr_mask_t LSOST_DISPLAY_MASK = {.std = ATTR_MASK_size\n        | ATTR_MASK_stripe_items\n};\n#endif\nstatic const attr_mask_t LSCLASS_DISPLAY_MASK = {.std = ATTR_MASK_size\n        | ATTR_MASK_fileclass\n};\nstatic const attr_mask_t LSSTATUS_DISPLAY_MASK = {.std = ATTR_MASK_size };\n\nattr_mask_t disp_mask = {.std = ATTR_MASK_type };\nstatic attr_mask_t query_mask = { 0 };\n\n//static lmgr_filter_t    dir_filter;\n\n/* for filtering entries from DB */\nstatic lmgr_filter_t entry_filter;\n\n/* post filter for all entries */\nstatic bool_node_t match_expr;\nstatic int is_expr = 0; /* is it set? */\n\n/* printf string, when prog_options.printf is set. */\nconst char *printf_str;\nGArray *printf_chunks;\n\n/* build filters depending on program options */\nstatic int mkfilters(bool exclude_dirs)\n{\n    filter_value_t fv;\n    compare_direction_t comp;\n\n    /* create DB filters */\n    lmgr_simple_filter_init(&entry_filter);\n\n    /* Create boolean expression for matching.\n     * All expressions are then converted to a DB filter.\n     */\n    if (prog_options.match_user) {\n        compare_value_t val;\n\n        if (global_config.uid_gid_as_numbers) {\n            val.integer = atoi(prog_options.user);\n            if (prog_options.userneg)\n                comp = COMP_DIFF;\n            else\n                comp = COMP_EQUAL;\n        } else {\n            strcpy(val.str, prog_options.user);\n            if (prog_options.userneg)\n                comp = COMP_UNLIKE;\n            else\n                comp = COMP_LIKE;\n        }\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_OWNER, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_OWNER, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_uid;\n    }\n\n    if (prog_options.match_group) {\n        compare_value_t val;\n\n        if (global_config.uid_gid_as_numbers) {\n            val.integer = atoi(prog_options.group);\n            if (prog_options.groupneg)\n                comp = COMP_DIFF;\n            else\n                comp = COMP_EQUAL;\n        } else {\n            strcpy(val.str, prog_options.group);\n            if (prog_options.groupneg)\n                comp = COMP_UNLIKE;\n            else\n                comp = COMP_LIKE;\n        }\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_GROUP, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_GROUP, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_gid;\n    }\n\n    if (prog_options.match_name) {\n        compare_value_t val;\n        enum compare_flags flg = 0;\n\n        if (prog_options.iname)\n            flg = CMP_FLG_INSENSITIVE;\n\n        strcpy(val.str, prog_options.name);\n        if (prog_options.nameneg)\n            comp = COMP_UNLIKE;\n        else\n            comp = COMP_LIKE;\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_NAME, val, flg);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_NAME, val, flg);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_name;\n    }\n\n    if (prog_options.match_size) {\n        compare_value_t val;\n        val.size = prog_options.sz_val;\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.sz_compar, CRITERIA_SIZE,\n                           val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.sz_compar, CRITERIA_SIZE,\n                           val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_size;\n    }\n\n    if (prog_options.match_nlink) {\n        compare_value_t val;\n        val.integer = prog_options.nlink_val;\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.nlink_compar,\n                           CRITERIA_NLINK, val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.nlink_compar,\n                           CRITERIA_NLINK, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_nlink;\n    }\n\n    if (prog_options.match_crtime) {\n        compare_value_t val;\n        val.duration = prog_options.crt_val;\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.crt_compar,\n                           CRITERIA_CREATION, val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.crt_compar,\n                           CRITERIA_CREATION, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_creation_time;\n    }\n\n    if (prog_options.match_mtime) {\n        compare_value_t val;\n        val.duration = prog_options.mod_val;\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.mod_compar,\n                           CRITERIA_LAST_MOD, val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.mod_compar,\n                           CRITERIA_LAST_MOD, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_last_mod;\n    }\n\n    if (prog_options.match_ctime) {\n        compare_value_t val;\n        val.duration = prog_options.chg_val;\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.chg_compar,\n                           CRITERIA_LAST_MDCHANGE, val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.chg_compar,\n                           CRITERIA_LAST_MDCHANGE, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_last_mdchange;\n    }\n\n    if (prog_options.match_atime) {\n        compare_value_t val;\n        val.duration = prog_options.acc_val;\n        if (!is_expr)\n            CreateBoolCond(&match_expr, prog_options.acc_compar,\n                           CRITERIA_LAST_ACCESS, val, 0);\n        else\n            AppendBoolCond(&match_expr, prog_options.acc_compar,\n                           CRITERIA_LAST_ACCESS, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_last_access;\n    }\n\n    if (prog_options.match_class) {\n        compare_value_t val;\n\n        strcpy(val.str, prog_options.class);\n        if (prog_options.classneg)\n            comp = COMP_UNLIKE;\n        else\n            comp = COMP_LIKE;\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_FILECLASS, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_FILECLASS, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_fileclass;\n    }\n#ifdef _LUSTRE\n    if (prog_options.match_ost) {\n        /* this partially converted to DB filter, and will be fully used\n         * in post checking */\n        filter_value_t fv;\n\n        if (prog_options.ost_set.count == 1) {\n            fv.value.val_uint = prog_options.ost_set.values[0].val_uint;\n            lmgr_simple_filter_add(&entry_filter, ATTR_INDEX_stripe_items,\n                                   EQUAL, fv, 0);\n        } else {\n            fv.list = prog_options.ost_set;\n            lmgr_simple_filter_add(&entry_filter, ATTR_INDEX_stripe_items,\n                                   IN, fv, 0);\n        }\n        query_mask.std |= ATTR_MASK_stripe_items;\n\n        /* lmgr will also return fs root and traversal directories on\n         * scan, work around with no_dir as directories cannot be bound\n         * to ost */\n        prog_options.no_dir = 1;\n    }\n\n    if (prog_options.match_pool) {\n        compare_value_t val;\n\n        strcpy(val.str, prog_options.pool);\n        if (!is_expr)\n            CreateBoolCond(&match_expr, COMP_LIKE, CRITERIA_POOL, val, 0);\n        else\n            AppendBoolCond(&match_expr, COMP_LIKE, CRITERIA_POOL, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_stripe_info;\n    }\n\n    if (prog_options.match_projid) {\n        compare_value_t val;\n        val.integer = prog_options.projid;\n\n        if (prog_options.projidneg)\n            comp = COMP_DIFF;\n        else\n            comp = COMP_EQUAL;\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_PROJID, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_PROJID, val, 0);\n        is_expr = 1;\n        query_mask.std |= ATTR_MASK_projid;\n    }\n#endif\n\n    if (prog_options.match_status) {\n        compare_value_t val;\n\n        strcpy(val.str, prog_options.filter_status_value);\n\n        if (prog_options.statusneg)\n            comp = COMP_DIFF;\n        else\n            comp = COMP_EQUAL;\n\n        if (!is_expr)\n            CreateBoolCond(&match_expr, comp, CRITERIA_STATUS, val, 0);\n        else\n            AppendBoolCond(&match_expr, comp, CRITERIA_STATUS, val, 0);\n\n        is_expr = 1;\n        query_mask.status |= SMI_MASK(prog_options.filter_smi->smi_index);\n    }\n\n    /* analyze type filter */\n    if (prog_options.match_type) {\n        if (!strcasecmp(prog_options.type, STR_TYPE_DIR)) {\n            /* only match dirs */\n            prog_options.dir_only = 1;\n            if (!exclude_dirs) {\n                fv.value.val_str = STR_TYPE_DIR;\n                lmgr_simple_filter_add(&entry_filter, ATTR_INDEX_type, EQUAL,\n                                       fv, 0);\n            }\n        } else {\n            /* smthg different from dir */\n            prog_options.no_dir = 1;\n            fv.value.val_str = prog_options.type;\n            lmgr_simple_filter_add(&entry_filter, ATTR_INDEX_type, EQUAL, fv,\n                                   0);\n        }\n    } else if (exclude_dirs) {\n        /* no specific type specified => exclude dirs if required */\n        /* filter non directories (directories are handled during recursive\n         * DB scan) */\n        fv.value.val_str = STR_TYPE_DIR;\n        lmgr_simple_filter_add(&entry_filter, ATTR_INDEX_type, NOTEQUAL, fv, 0);\n    }\n\n    if (is_expr) {\n        char expr[RBH_PATH_MAX];\n        /* for debug */\n        if (BoolExpr2str(&match_expr, expr, RBH_PATH_MAX) > 0)\n            DisplayLog(LVL_FULL, FIND_TAG, \"Expression matching: %s\", expr);\n\n        /* append bool expr to entry filter */\n        /* Do not use 'OR' expression there */\n        convert_boolexpr_to_simple_filter(&match_expr, &entry_filter,\n                                          prog_options.filter_smi, NULL, 0,\n                                          BOOL_AND);\n    }\n\n    return 0;\n}\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options] [path|fid]...\\n\"\n    \"\\n\"\n    _B \"Filters:\" B_ \"\\n\"\n    \"    \" _B \"-user\" B_ \" \" _U \"user\" U_ \"\\n\"\n    \"    \" _B \"-group\" B_ \" \" _U \"group\" U_ \"\\n\"\n    \"    \" _B \"-nouser\" B_ \"\\n\"\n    \"    \" _B \"-nogroup\" B_ \"\\n\"\n    \"    \" _B \"-type\" B_ \" \" _U \"type\" U_ \"\\n\"\n    \"       \" TYPE_HELP \"\\n\"\n    \"    \" _B \"-size\" B_ \" \" _U \"size_crit\" U_ \"\\n\"\n    \"       \" SIZE_HELP \"\\n\"\n    \"    \" _B \"-name\" B_ \" \" _U \"filename\" U_ \"\\n\"\n    \"    \" _B \"-links\" B_ \" \" _U \"count\" U_ \"\\n\"\n    \"    \" _B \"-crtime\" B_ \" \" _U \"time_crit\" U_ \"\\n\"\n    \"       \" TIME_HELP \"\\n\"\n    \"    \" _B \"-ctime\" B_ \" \" _U \"time_crit\" U_ \"\\n\"\n    \"       \" TIME_HELP \"\\n\"\n    \"    \" _B \"-mtime\" B_ \" \" _U \"time_crit\" U_ \"\\n\"\n    \"       \" TIME_HELP \"\\n\"\n    \"    \" _B \"-mmin\" B_ \" \" _U \"minute_crit\" U_ \"\\n\"\n    \"        same as '-mtime \" _U \"N\" U_ \"m'\\n\"\n    \"    \" _B \"-msec\" B_ \" \" _U \"second_crit\" U_ \"\\n\"\n    \"        same as '-mtime \" _U \"N\" U_ \"s'\\n\"\n    \"    \" _B \"-atime\" B_ \" \" _U \"time_crit\" U_ \"\\n\"\n    \"       \" TIME_HELP \"\\n\"\n    \"    \" _B \"-amin\" B_ \" \" _U \"minute_crit\" U_ \"\\n\"\n    \"        same as '-atime \" _U \"N\" U_ \"m'\\n\"\n#ifdef _LUSTRE\n    \"    \" _B \"-ost\" B_ \" \" _U \"ost_index|ost_set\" U_ \"\\n\"\n    \"    \" _B \"-pool\" B_ \" \" _U \"ost_pool\" U_ \"\\n\"\n    \"    \" _B \"-projid\" B_ \" \" _U \"projid\" U_ \"\\n\"\n#endif\n    \"    \" _B \"-class\" B_ \" \" _U \"class\" U_ \"\\n\"\n    \"    \" _B \"-status\" B_ \" \" _U \"status_name\" U_ \":\" _U \"status_value\" U_ \"\\n\"\n    \"\\n\"\n    \"    \" _B \"-not\" B_ \", \" _B \"-!\" B_ \" \\t Negate next argument\\n\"\n    \"\\n\"\n    _B \"Output options:\" B_ \"\\n\" \"    \" _B \"-ls\" B_ \" \\t Display attributes\\n\"\n#ifdef _LUSTRE\n    \"    \" _B \"-lsost\" B_ \" \\t Display OST information\\n\"\n#endif\n    \"    \" _B \"-lsclass\" B_ \" \\t Display fileclass information\\n\"\n    \"    \" _B \"-lsstatus\" B_ \"[=\" _U \"policy\" U_\n    \"] \\t Display status information (optionally: only for the given \" _U\n    \"policy\" U_ \").\\n\" \"    \" _B \"-print\" B_\n    \" \\t Display the fullpath of matching entries (this is the default, unless -ls, -lsost or -exec are used).\\n\"\n    \"    \" _B \"-printf\" B_\n    \" \\t Format string to display the matching entries.\\n\\n\"\n    \"       The supported escapes and directives are a subset of those of `find`,\\n\"\n    \"       with some Robinhood additions prefixed with %R:\\n\" \"            \" _B\n    \"%%%%\" B_ \"\\t Escapes %\\n\" \"            \" _B \"%%A\" B_\n    \"\\t Robinhood’s \\\"last access time\\\", which is a compound of the file's atime and mtime, unless the global configuration \"\n    _B \"last_access_only_atime\" B_\n    \" is set, in which case it is exactly the atime of the file. An \" _B\n    \"strftime\" B_ \"(1) directive must be added. For example: \" _B \"%%Ap %%AT\" B_\n    \". This option can also take an strftime format option between brackets. For instance: \"\n    _B \"%%A{%%A, %%B %%dth, %%Y %%F}\" B_ \".\\n\" \"            \" _B \"%%b\" B_\n    \"\\t Number of blocks\\n\" \"            \" _B \"%%C\" B_\n    \"\\t Robinhood’s \\\"last MD change\\\" which is the ctime of the file.\\n\"\n    \"            \" _B \"%%d\" B_ \"\\t Depth\\n\" \"            \" _B \"%%f\" B_\n    \"\\t File name, without its path\\n\" \"            \" _B \"%%g\" B_\n    \"\\t Group name\\n\" \"            \" _B \"%%M\" B_\n    \"\\t File mode as a string, similar to the output of `ls`\\n\" \"            \"\n    _B \"%%m\" B_ \"\\t File mode in octal\\n\" \"            \" _B \"%%n\" B_\n    \"\\t Number of hard links\\n\" \"            \" _B \"%%p\" B_ \"\\t Full file name\\n\"\n    \"            \" _B \"%%s\" B_ \"\\t File size\\n\" \"            \" _B \"%%T\" B_\n    \"\\t Robinhood’s \\\"modification time\\\", which is the file's mtime.\\n\"\n    \"            \" _B \"%%u\" B_ \"\\t File owner\\n\" \"            \" _B \"%%Y\" B_\n    \"\\t Full file type (file, dir, fifo, ...)\\n\" \"            \" _B \"%%y\" B_\n    \"\\t File type as one letter (f, d, p, ...)\\n\" \"            \" _B \"%%RC\" B_\n    \"\\t Robinhood’s \\\"creation time\\\", which is the oldest ctime seen for that file. It is always lesser or equal to the current ctime of the file. When Lustre changelogs are used, \\\"creation time\\\" is really the creation time. An \"\n    _B \"strftime\" B_ \"(1) directive must be added. For example: \" _B \"%%RCc\" B_\n    \". This option can also take an strftime format option between curly brackets. For instance: \"\n    _B \"%%RC{%%A, %%B %%dth, %%Y %%F}\" B_ \".\\n\"\n    \"            \" _B \"%%Rc\" B_ \"\\t File class\\n\"\n    \"            \" _B \"%%Rf\" B_ \"\\t Lustre FID\\n\"\n    \"            \" _B \"%%Rm\" B_\n    \"\\t Status manager module attribute, with the name specified between curly bracket. The name is the status manager module name, followed by a dot, followed by the attribute name. For example: \"\n    _B \"%%Rm{lhsm.archive_id}\" B_ \".\\n\" \"            \" _B \"%%Ro\" B_\n    \"\\t Lustre OSTS\\n\"\n    \"            \" _B \"%%Rp\" B_ \"\\t Lustre parent FID\\n\"\n    \"            \" _B \"%%RP\" B_ \"\\t Lustre project id\\n\"\n    \"            \" _B \"\\\\\\\\\" B_ \"\\t Escapes \\\\\\n\" \"            \" _B \"\\\\n\" B_\n    \"\\t Newline\\n\" \"            \" _B \"\\\\t\" B_ \"\\t Tab\\n\"\n    \"            \" _B \"\\\\NNN\" B_ \" Byte with octal value NNN (1 to 3 digits)\\n\"\n    \"            \" _B \"\\\\xHH\" B_\n    \" Byte with hexadecimal value HH (1 to 2 digits)\\n\"\n    \"    \" _B \"-escaped\"  B_\n    \" \\t When -printf is used, escape unprintable characters.\\n\"\n    \"    \" _B \"-print0\" B_ \" \\t Print file name followed by a null character\\n\"\n    \"\\n\"\n    _B \"Actions:\" B_ \"\\n\" \"    \" _B \"-exec\" B_ \" \" _U \"\\\"cmd\\\"\" U_ \"\\n\"\n    \"       Execute the given command for each matching entry. Unlike classical 'find',\\n\"\n    \"       cmd must be a single (quoted) shell param, not necessarily terminated with ';'.\\n\"\n    \"       '{}' is replaced by the entry path. Example: -exec 'md5sum {}'\\n\"\n    \"\\n\" _B \"Behavior:\" B_ \"\\n\" \"    \" _B \"-nobulk\" B_ \"\\n\"\n    \"       When running rbh-find on the filesystem root, rbh-find automatically switches\\n\"\n    \"       to bulk DB request instead of browsing the namespace from the DB.\\n\"\n    \"       This speeds up the query, but this may result in an arbitrary output ordering,\\n\"\n    \"       and a single path may be displayed in case of multiple hardlinks.\\n\"\n    \"       Use -nobulk to disable this optimization.\\n\" \"\\n\" _B\n    \"Program options:\" B_ \"\\n\" \"    \" _B \"-f\" B_ \" \" _U \"config_file\" U_ \"\\n\"\n    \"    \" _B \"-d\" B_ \" \" _U \"log_level\" U_ \"\\n\"\n    \"       CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\\n\" \"    \" _B \"-h\" B_ \", \" _B\n    \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\" \"    \" _B \"-V\"\n    B_ \", \" _B \"--version\" B_ \"\\n\" \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" 'find' command\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nconst char *type2char(const char *type)\n{\n    if (!strcasecmp(type, STR_TYPE_DIR))\n        return \"dir\";\n    else if (!strcasecmp(type, STR_TYPE_FILE))\n        return \"file\";\n    else if (!strcasecmp(type, STR_TYPE_LINK))\n        return \"link\";\n    else if (!strcasecmp(type, STR_TYPE_CHR))\n        return \"char\";\n    else if (!strcasecmp(type, STR_TYPE_BLK))\n        return \"blk\";\n    else if (!strcasecmp(type, STR_TYPE_FIFO))\n        return \"fifo\";\n    else if (!strcasecmp(type, STR_TYPE_SOCK))\n        return \"sock\";\n    return \"?\";\n}\n\n/* Return the type of the file like find does. */\nconst char type2onechar(const char *type)\n{\n    if (!strcasecmp(type, STR_TYPE_DIR))\n        return 'd';\n    else if (!strcasecmp(type, STR_TYPE_FILE))\n        return 'f';\n    else if (!strcasecmp(type, STR_TYPE_LINK))\n        return 'l';\n    else if (!strcasecmp(type, STR_TYPE_CHR))\n        return 'c';\n    else if (!strcasecmp(type, STR_TYPE_BLK))\n        return 'b';\n    else if (!strcasecmp(type, STR_TYPE_FIFO))\n        return 'p';\n    else if (!strcasecmp(type, STR_TYPE_SOCK))\n        return 's';\n    return '?';\n}\n\nstatic const char *opt2type(const char *type_opt)\n{\n    if (strlen(type_opt) != 1)\n        return NULL;\n\n    switch (type_opt[0]) {\n    case 'b':\n        return STR_TYPE_BLK;\n    case 'c':\n        return STR_TYPE_CHR;\n    case 'd':\n        return STR_TYPE_DIR;\n    case 'p':\n        return STR_TYPE_FIFO;\n    case 'f':\n        return STR_TYPE_FILE;\n    case 'l':\n        return STR_TYPE_LINK;\n    case 's':\n        return STR_TYPE_SOCK;\n    default:\n        return NULL;\n    }\n}\n\nstatic compare_direction_t prefix2comp(char **curr, bool neg)\n{\n    char *str = *curr;\n\n    if (str[0] == '+') {\n        (*curr)++;\n        return neg ? COMP_LSTHAN_EQ : COMP_GRTHAN;\n    } else if (str[0] == '-') {\n        (*curr)++;\n        return neg ? COMP_GRTHAN_EQ : COMP_LSTHAN;\n    } else\n        return neg ? COMP_DIFF : COMP_EQUAL;\n}\n\n/* parse size filter and set prog_options struct */\nstatic int set_size_filter(char *str, bool neg)\n{\n    compare_direction_t comp;\n    char *curr = str;\n    uint64_t val;\n    char suffix[1024];\n    int n;\n\n    comp = prefix2comp(&curr, neg);\n\n    n = sscanf(curr, \"%\" PRIu64 \"%s\", &val, suffix);\n    if (n < 1 || n > 2) {\n        fprintf(stderr,\n                \"Invalid size '%s' : expected size format: \" SIZE_HELP \"\\n\",\n                str);\n        return -EINVAL;\n    }\n    if ((n == 1) || !strcmp(suffix, \"\")) {\n        prog_options.sz_compar = comp;\n        prog_options.sz_val = val;\n    } else {\n        switch (suffix[0]) {\n        case 'k':\n        case 'K':\n            val *= 1024LL;\n            break;\n        case 'm':\n        case 'M':\n            val *= 1024LL * 1024LL;\n            break;\n        case 'g':\n        case 'G':\n            val *= 1024LL * 1024LL * 1024LL;\n            break;\n        case 't':\n        case 'T':\n            val *= 1024LL * 1024LL * 1024LL * 1024LL;\n            break;\n\n        case 'p':\n        case 'P':\n            val *= 1024LL * 1024LL * 1024LL * 1024LL * 1024LL;\n            break;\n        default:\n            fprintf(stderr,\n                    \"Invalid suffix for size: '%s'. Expected size format: \"\n                    SIZE_HELP \"\\n\", str);\n            return -EINVAL;\n        }\n        prog_options.sz_compar = comp;\n        prog_options.sz_val = val;\n    }\n    return 0;\n}\n\ntypedef enum { atime, rh_crtime, mtime, rh_ctime } e_time;\n/* parse time filter and set prog_options struct */\nstatic int set_time_filter(char *str, unsigned int multiplier,\n                           bool allow_suffix, e_time what,\n                           bool neg)\n{\n    compare_direction_t comp;\n    char *curr = str;\n    uint64_t val;\n    char suffix[1024];\n    int n;\n\n    comp = prefix2comp(&curr, neg);\n\n    n = sscanf(curr, \"%\" PRIu64 \"%s\", &val, suffix);\n    /* allow_suffix => 1 or 2 is allowed\n       else => only 1 is allowed */\n    if (allow_suffix && (n < 1 || n > 2)) {\n        fprintf(stderr,\n                \"Invalid time '%s' : expected time format: \" TIME_HELP \"\\n\",\n                str);\n        return -EINVAL;\n    } else if (!allow_suffix && (n != 1)) {\n        fprintf(stderr, \"Invalid value '%s' : [+|-]<integer> expected\\n\", str);\n        return -EINVAL;\n    }\n\n    if ((n == 1) || !strcmp(suffix, \"\")) {\n        switch (what) {\n        case rh_crtime:\n            prog_options.crt_compar = comp;\n            if (multiplier != 0)\n                prog_options.crt_val = val * multiplier;\n            else    /* default multiplier is days */\n                prog_options.crt_val = val * 86400;\n            break;\n        case mtime:\n            prog_options.mod_compar = comp;\n            if (multiplier != 0)\n                prog_options.mod_val = val * multiplier;\n            else    /* default multiplier is days */\n                prog_options.mod_val = val * 86400;\n            break;\n        case rh_ctime:\n            prog_options.chg_compar = comp;\n            if (multiplier != 0)\n                prog_options.chg_val = val * multiplier;\n            else    /* default multiplier is days */\n                prog_options.chg_val = val * 86400;\n            break;\n        case atime:\n            prog_options.acc_compar = comp;\n            if (multiplier != 0)\n                prog_options.acc_val = val * multiplier;\n            else    /* default multiplier is days */\n                prog_options.acc_val = val * 86400;\n            break;\n        }\n    } else {\n        switch (suffix[0]) {\n        case 's':\n            /* keep unchanged */\n            break;\n        case 'm':\n            val *= 60;\n            break;\n        case 'h':\n            val *= 3600;\n            break;\n        case 'd':\n            val *= 86400;\n            break;\n        case 'y':\n            val *= 31557600;    /* 365.25 * 86400 */\n            break;\n        default:\n            fprintf(stderr,\n                    \"Invalid suffix for time: '%s'. Expected time format: \"\n                    TIME_HELP \"\\n\", str);\n            return -EINVAL;\n        }\n\n        switch (what) {\n        case rh_crtime:\n            prog_options.crt_compar = comp;\n            prog_options.crt_val = val;\n            break;\n        case rh_ctime:\n            prog_options.chg_compar = comp;\n            prog_options.chg_val = val;\n            break;\n        case mtime:\n            prog_options.mod_compar = comp;\n            prog_options.mod_val = val;\n            break;\n        case atime:\n            prog_options.acc_compar = comp;\n            prog_options.acc_val = val;\n            break;\n        }\n    }\n    return 0;\n}\n\nstatic void print_entry(const wagon_t *id, const attr_set_t *attrs)\n{\n    char classbuf[1028] = \"\";\n    char statusbuf[1024] = \"\";\n    GString *osts = NULL;\n\n    /* HERE: post-filter attributes that are not part of the DB request */\n\n#ifdef _LUSTRE\n    /* prepare OST display buffer */\n    if (prog_options.lsost && ATTR_MASK_TEST(attrs, stripe_items)\n        && (ATTR(attrs, stripe_items).count > 0)) {\n        /* separate from the beginning of the line by 2 spaces */\n        osts = g_string_new(\"  \");\n        append_stripe_list(osts, &ATTR(attrs, stripe_items), true);\n    }\n#endif\n\n    /* prepare class display buffer */\n    if (prog_options.lsclass) {\n        /* leave a space before and after */\n        snprintf(classbuf, sizeof(classbuf), \" %-20s \",\n                 class_format(ATTR_MASK_TEST(attrs, fileclass) ?\n                              ATTR(attrs, fileclass) : NULL));\n    }\n\n    /* prepare status display buffer */\n    if (prog_options.lsstatus) {\n        /* if a status is specified: display it */\n        if (prog_options.smi) {\n            /* if matching a status != lsstatus: display both (filter first) */\n            if (prog_options.match_status) {\n                snprintf(statusbuf, sizeof(statusbuf), \" %s:%s,%s:%s \",\n                         prog_options.filter_smi->instance_name,\n                         status_format(ATTR_MASK_STATUS_TEST\n                                       (attrs,\n                                        prog_options.filter_smi->\n                                        smi_index) ? STATUS_ATTR(attrs,\n                                                                 prog_options.\n                                                                 filter_smi->\n                                                                 smi_index) :\n                                       NULL), prog_options.smi->instance_name,\n                         status_format(ATTR_MASK_STATUS_TEST\n                                       (attrs,\n                                        prog_options.smi->\n                                        smi_index) ? STATUS_ATTR(attrs,\n                                                                 prog_options.\n                                                                 smi->\n                                                                 smi_index) :\n                                       NULL));\n            } else  /* just the requested lsstatus, with no prefix */\n                snprintf(statusbuf, sizeof(statusbuf), \" %-15s \",\n                         status_format(ATTR_MASK_STATUS_TEST\n                                       (attrs,\n                                        prog_options.smi->\n                                        smi_index) ? STATUS_ATTR(attrs,\n                                                                 prog_options.\n                                                                 smi->\n                                                                 smi_index) :\n                                       NULL));\n        } else {\n            int i;\n            char *curr = statusbuf;\n            int remain = sizeof(statusbuf);\n\n            /* if no status is specified: display them all\n             * (no extra display for filter_status in this case) */\n            for (i = 0; i < sm_inst_count && remain > 0; i++) {\n                curr += snprintf(curr, remain, \"%s%s:%s\",\n                                 i == 0 ? \" \" : \",\",\n                                 get_sm_instance(i)->instance_name,\n                                 status_format(ATTR_MASK_STATUS_TEST(attrs, i) ?\n                                               STATUS_ATTR(attrs, i) : NULL));\n                remain = (ptrdiff_t) (sizeof(statusbuf) - (curr - statusbuf));\n            }\n            strncat(curr, \" \", remain);\n        }\n    } else if (prog_options.filter_smi) {\n        /* just the matched status, with no prefix */\n        snprintf(statusbuf, sizeof(statusbuf), \" %-15s \",\n                 status_format(ATTR_MASK_STATUS_TEST\n                               (attrs,\n                                prog_options.filter_smi->\n                                smi_index) ? STATUS_ATTR(attrs,\n                                                         prog_options.\n                                                         filter_smi->\n                                                         smi_index) : NULL));\n    }\n\n    if (prog_options.ls) {\n        const char *type;\n        char date_str[128];\n        char mode_str[128];\n        char uid_str[20];\n        char gid_str[20];\n        const char *uid;\n        const char *gid;\n\n        /* type2char */\n        if (!ATTR_MASK_TEST(attrs, type))\n            type = \"?\";\n        else\n            type = type2char(ATTR(attrs, type));\n\n        memset(mode_str, 0, sizeof(mode_str));\n        mode_string(ATTR(attrs, mode), mode_str);\n\n        if (global_config.uid_gid_as_numbers) {\n            sprintf(uid_str, \"%d\", ATTR(attrs, uid).num);\n            uid = uid_str;\n            sprintf(gid_str, \"%d\", ATTR(attrs, gid).num);\n            gid = gid_str;\n        } else {\n            uid = ATTR(attrs, uid).txt;\n            gid = ATTR(attrs, gid).txt;\n        }\n\n        if (!ATTR_MASK_TEST(attrs, last_mod))\n            strcpy(date_str, \"\");\n        else {\n            time_t tt;\n            struct tm stm;\n            tt = ATTR(attrs, last_mod);\n            strftime(date_str, 128, \"%Y/%m/%d %T\", localtime_r(&tt, &stm));\n        }\n\n        if (ATTR_MASK_TEST(attrs, type)\n            && !strcmp(ATTR(attrs, type), STR_TYPE_LINK)\n            && ATTR_MASK_TEST(attrs, link))\n            /* display: id, type, mode, nlink, (status,) owner, group, size,\n             *          mtime, path -> link */\n            printf(DFID \" %-4s %s %3u  %-10s %-10s %15\" PRIu64\n                   \" %20s %s%s%s -> %s\\n\", PFID(&id->id), type, mode_str,\n                   ATTR(attrs, nlink), uid, gid, ATTR(attrs, size), date_str,\n                   statusbuf, classbuf, id->fullname, ATTR(attrs, link));\n        else\n            /* display all: id, type, mode, nlink, (status,) owner, group,\n             *              size, mtime, path */\n            printf(DFID \" %-4s %s %3u  %-10s %-10s %15\" PRIu64\n                   \" %20s %s%s%s%s\\n\", PFID(&id->id), type, mode_str,\n                   ATTR(attrs, nlink), uid, gid, ATTR(attrs, size), date_str,\n                   statusbuf, classbuf, id->fullname, osts ? osts->str : \"\");\n    } else if (prog_options.lsost || prog_options.lsclass\n               || prog_options.lsstatus) {\n        /* lsost or lsclass without -ls */\n        const char *type;\n\n        /* type2char */\n        if (!ATTR_MASK_TEST(attrs, type))\n            type = \"?\";\n        else\n            type = type2char(ATTR(attrs, type));\n\n        /* display: id, type, size, path */\n        printf(DFID \" %-4s %15\" PRIu64 \" %s%s%s%s\\n\",\n               PFID(&id->id), type, ATTR(attrs, size), statusbuf, classbuf,\n               id->fullname, osts ? osts->str : \"\");\n\n    } else if (prog_options.print) {\n        /* just display name */\n        if (id->fullname)\n            printf(\"%s\\n\", id->fullname);\n        else\n            printf(DFID \"\\n\", PFID(&id->id));\n    } else if (prog_options.printf) {\n        printf_entry(printf_chunks, id, attrs);\n    }\n\n    if (prog_options.exec) {\n        const char *vars[] = {\n            \"\", id->fullname,\n            NULL, NULL\n        };\n        int rc;\n        char **cmd;\n\n        rc = subst_shell_params(prog_options.exec_cmd, \"exec option\",\n                                &id->id, attrs, NULL, vars, NULL, true, &cmd);\n        if (!rc) {\n            /* display both stdout and stderr */\n            execute_shell_command(cmd, cb_redirect_all, NULL);\n            g_strfreev(cmd);\n        }\n    }\n    if (osts)\n        g_string_free(osts, TRUE);\n}\n\n/* directory callback */\nstatic int dircb(wagon_t *id_list, attr_set_t *attr_list,\n                 unsigned int entry_count, void *dummy)\n{\n    /* retrieve child entries for all directories */\n    int i, rc;\n\n    for (i = 0; i < entry_count; i++) {\n        wagon_t *chids = NULL;\n        attr_set_t *chattrs = NULL;\n        unsigned int chcount = 0;\n        int j;\n\n        /* match condition on dirs parent */\n        if (!is_expr || (entry_matches(&id_list[i].id, &attr_list[i],\n                                       &match_expr, NULL,\n                                       prog_options.filter_smi)\n                         == POLICY_MATCH)) {\n            /* don't display dirs if no_dir is specified */\n            if (!(prog_options.no_dir && ATTR_MASK_TEST(&attr_list[i], type)\n                  && !strcasecmp(ATTR(&attr_list[i], type), STR_TYPE_DIR)))\n                print_entry(&id_list[i], &attr_list[i]);\n        }\n\n        if (!prog_options.dir_only) {\n            rc = ListMgr_GetChild(&lmgr, &entry_filter, id_list + i, 1,\n                                  attr_mask_or(&disp_mask, &query_mask),\n                                  &chids, &chattrs, &chcount);\n            if (rc) {\n                DisplayLog(LVL_MAJOR, FIND_TAG,\n                           \"ListMgr_GetChild() failed with error %d\", rc);\n                return rc;\n            }\n\n            for (j = 0; j < chcount; j++) {\n                if (!is_expr || (entry_matches(&chids[j].id, &chattrs[j],\n                                               &match_expr, NULL,\n                                               prog_options.filter_smi)\n                                 == POLICY_MATCH))\n                    print_entry(&chids[j], &chattrs[j]);\n\n                ListMgr_FreeAttrs(&chattrs[j]);\n            }\n\n            free_wagon(chids, 0, chcount);\n            MemFree(chids);\n            MemFree(chattrs);\n        }\n    }\n    return 0;\n}\n\n/**\n *  Get id of root dir\n */\nstatic int retrieve_root_id(entry_id_t *root_id)\n{\n    int rc;\n    char value[1024];\n\n    /* try to get root id from DB */\n    rc = ListMgr_GetVar(&lmgr, ROOT_ID_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS)\n        if (sscanf(value, SFID, RFID(root_id)) == FID_SCAN_CNT)\n            return 0;\n\n    /* else, try to retrieve it from filesystem */\n    rc = Path2Id(global_config.fs_path, root_id);\n    if (rc)\n        DisplayLog(LVL_MAJOR, FIND_TAG, \"Can't access filesystem's root %s: %s\",\n                   global_config.fs_path, strerror(-rc));\n    return rc;\n}\n\n/** retrieve attributes for filesystem root.\n * Assumes fullpath attribute is set\n */\nstatic void set_root_attrs(attr_set_t *root_attrs)\n{\n    struct stat st;\n    const char *path;\n\n    if (!ATTR_MASK_TEST(root_attrs, fullpath))\n        return;\n\n    path = ATTR(root_attrs, fullpath);\n    if (lstat(path, &st) != 0)\n        return;\n\n    stat2rbh_attrs(&st, root_attrs, true);\n\n#ifdef _LUSTRE\n    int rc;\n\n\trc = lustre_project_get_id(path);\n\tif (rc > 0) {\n\t\tATTR(root_attrs, projid) = rc;\n\t\tATTR_MASK_SET(root_attrs, projid);\n\t}\n#endif\n\n    ListMgr_GenerateFields(root_attrs, attr_mask_or(&disp_mask, &query_mask));\n}\n\n/**\n * Bulk filtering in the DB.\n */\nstatic int list_bulk(void)\n{\n    attr_set_t root_attrs, attrs;\n    entry_id_t root_id, id;\n    int rc;\n    struct lmgr_iterator_t *it;\n\n    /* no transversal => no wagon\n     * so we need the path from the DB.\n     */\n    query_mask.std |= ATTR_MASK_fullpath;\n\n    ATTR_MASK_INIT(&root_attrs);\n\n    rc = retrieve_root_id(&root_id);\n    if (rc)\n        memset(&root_id, 0, sizeof(root_id));\n\n    /* root is not a part of the DB: print it now */\n    ATTR_MASK_SET(&root_attrs, fullpath);\n    strcpy(ATTR(&root_attrs, fullpath), global_config.fs_path);\n\n    set_root_attrs(&root_attrs);\n\n    /* root has no name... */\n    ATTR_MASK_SET(&root_attrs, name);\n    ATTR(&root_attrs, name)[0] = '\\0';\n\n    /* match condition on dirs parent */\n    if (!is_expr || (entry_matches(&root_id, &root_attrs,\n                                   &match_expr, NULL,\n                                   prog_options.filter_smi) == POLICY_MATCH)) {\n        /* don't display dirs if no_dir is specified */\n        if (!(prog_options.no_dir && ATTR_MASK_TEST(&root_attrs, type)\n              && !strcasecmp(ATTR(&root_attrs, type), STR_TYPE_DIR))) {\n            wagon_t w;\n            w.id = root_id;\n            w.fullname = ATTR(&root_attrs, fullpath);\n            print_entry(&w, &root_attrs);\n        }\n    }\n\n    /* list all, including dirs */\n    it = ListMgr_Iterator(&lmgr, &entry_filter, NULL, NULL);\n    if (!it) {\n        DisplayLog(LVL_MAJOR, FIND_TAG,\n                   \"ERROR: cannot retrieve entry list from database\");\n        return -1;\n    }\n\n    attrs.attr_mask = attr_mask_or(&disp_mask, &query_mask);\n    while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n        if (!is_expr || (entry_matches(&id, &attrs, &match_expr, NULL,\n                                       prog_options.filter_smi) ==\n                         POLICY_MATCH)) {\n            /* don't display dirs if no_dir is specified */\n            if (!(prog_options.no_dir && ATTR_MASK_TEST(&attrs, type)\n                  && !strcasecmp(ATTR(&attrs, type), STR_TYPE_DIR))) {\n                wagon_t w;\n                w.id = id;\n                w.fullname = ATTR(&attrs, fullpath);\n                print_entry(&w, &attrs);\n            }\n            /* don't display non dirs is dir_only is specified */\n            else if (!(prog_options.dir_only && ATTR_MASK_TEST(&attrs, type)\n                       && strcasecmp(ATTR(&attrs, type), STR_TYPE_DIR))) {\n                wagon_t w;\n                w.id = id;\n                w.fullname = ATTR(&attrs, fullpath);\n                print_entry(&w, &attrs);\n            } else\n                /* return entry don't match? */\n                DisplayLog(LVL_DEBUG, FIND_TAG,\n                           \"Warning: returned DB entry doesn't match filter: %s\",\n                           ATTR(&attrs, fullpath));\n        }\n        ListMgr_FreeAttrs(&attrs);\n\n        /* prepare next call */\n        attrs.attr_mask = attr_mask_or(&disp_mask, &query_mask);\n    }\n    ListMgr_CloseIterator(it);\n\n    return 0;\n}\n\n/**\n * List contents of the given id/path list\n */\nstatic int list_contents(char **id_list, int id_count)\n{\n    wagon_t *ids;\n    int i, rc;\n    attr_set_t root_attrs;\n    entry_id_t root_id;\n    bool is_id;\n\n    rc = retrieve_root_id(&root_id);\n    if (rc)\n        return rc;\n\n    ids = MemCalloc(id_count, sizeof(wagon_t));\n    if (!ids)\n        return -ENOMEM;\n\n    for (i = 0; i < id_count; i++) {\n        is_id = true;\n        /* is it a path or fid? */\n        if (sscanf(id_list[i], SFID, RFID(&ids[i].id)) != FID_SCAN_CNT) {\n            is_id = false;\n            /* take it as a path */\n            rc = Path2Id(id_list[i], &ids[i].id);\n            if (!rc) {\n                ids[i].fullname = id_list[i];\n                if (FINAL_SLASH(ids[i].fullname))\n                    REMOVE_FINAL_SLASH(ids[i].fullname);\n            }\n        } else {\n#if _HAVE_FID\n            /* Take it as an FID. */\n            char path[RBH_PATH_MAX];\n            rc = Lustre_GetFullPath(&ids[i].id, path, sizeof(path));\n            if (!rc)\n                ids[i].fullname = strdup(path);\n#endif\n        }\n\n        if (rc) {\n            DisplayLog(LVL_MAJOR, FIND_TAG, \"Invalid parameter: %s: %s\",\n                       id_list[i], strerror(-rc));\n            goto out;\n        }\n\n        if ((prog_options.bulk != force_nobulk) &&\n            (id_count == 1) && entry_id_equal(&ids[i].id, &root_id)) {\n            /* the ID is FS root: use list_bulk instead */\n            DisplayLog(LVL_DEBUG, FIND_TAG,\n                       \"Optimization: switching to bulk DB request mode\");\n            mkfilters(false);   /* keep dirs */\n            MemFree(ids);\n            return list_bulk();\n        }\n\n        /* get root attrs to print it (if it matches program options) */\n        root_attrs.attr_mask = attr_mask_or(&disp_mask, &query_mask);\n        rc = ListMgr_Get(&lmgr, &ids[i].id, &root_attrs);\n        if (rc == 0)\n            dircb(&ids[i], &root_attrs, 1, NULL);\n        else {\n            DisplayLog(LVL_VERB, FIND_TAG, \"Notice: no attrs in DB for %s\",\n                       id_list[i]);\n\n            if (!is_id) {\n                ATTR_MASK_SET(&root_attrs, fullpath);\n                strcpy(ATTR(&root_attrs, fullpath), id_list[i]);\n\n                /* guess root name */\n                ATTR_MASK_SET(&root_attrs, name);\n                rh_strncpy(ATTR(&root_attrs, name), rh_basename(id_list[i]),\n                           sizeof(ATTR(&root_attrs, name)));\n\n                set_root_attrs(&root_attrs);\n\n            } else if (entry_id_equal(&ids[i].id, &root_id)) {\n                /* this is root id */\n                ATTR_MASK_SET(&root_attrs, fullpath);\n                strcpy(ATTR(&root_attrs, fullpath), global_config.fs_path);\n\n                set_root_attrs(&root_attrs);\n\n                /* root has no name... */\n                ATTR_MASK_SET(&root_attrs, name);\n                ATTR(&root_attrs, name)[0] = '\\0';\n            }\n\n            dircb(&ids[i], &root_attrs, 1, NULL);\n        }\n\n        rc = rbh_scrub(&lmgr, &ids[i], 1, attr_mask_or(&disp_mask, &query_mask),\n                       dircb, NULL);\n    }\n\n out:\n    /* ids have been processed, free them */\n    MemFree(ids);\n    return rc;\n}\n\n#define toggle_option(_opt, _name)             \\\n            do {                               \\\n                if (prog_options. _opt)        \\\n                    fprintf(stderr, \"warning: -%s option already specified: \"\\\n                            \"will be overridden\\n\", _name); \\\n                prog_options. _opt = 1;         \\\n            } while (0)\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n    char config_file[MAX_OPT_LEN] = \"\";\n    int rc;\n    char err_msg[4096];\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n    bool neg = false;\n    GError *err_desc = NULL;\n\n    bin = rh_basename(argv[0]);\n\n    /* parse command line options */\n    while ((c = getopt_long_only(argc, argv, SHORT_OPT_STRING, option_tab,\n                                 &option_index)) != -1) {\n        switch (c) {\n        case '!':\n            neg = true;\n            break;\n\n        case 'u':\n            toggle_option(match_user, \"user\");\n            prog_options.user = optarg;\n            prog_options.userneg = neg;\n            neg = false;\n            break;\n\n        case 'g':\n            toggle_option(match_group, \"group\");\n            prog_options.group = optarg;\n            prog_options.groupneg = neg;\n            neg = false;\n            break;\n\n        case 'U':  /* match numerical (non resolved) users */\n            toggle_option(match_user, \"user\");\n            prog_options.user = \"[0-9]*\";\n            prog_options.userneg = neg;\n            neg = false;\n            break;\n\n        case 'G':  /* match numerical (non resolved) groups */\n            toggle_option(match_group, \"group\");\n            prog_options.group = \"[0-9]*\";\n            prog_options.groupneg = neg;\n            neg = false;\n            break;\n\n        case 'n':\n            toggle_option(match_name, \"name\");\n            prog_options.name = optarg;\n            prog_options.nameneg = neg;\n            neg = false;\n            break;\n\n        case INAME_OPT:\n            toggle_option(match_name, \"name\");\n            prog_options.name = optarg;\n            prog_options.nameneg = neg;\n            prog_options.iname = 1;\n            neg = false;\n            break;\n\n        case NLINK_OPT:\n            toggle_option(match_nlink, \"nlink\");\n            prog_options.nlink_compar = prefix2comp(&optarg, neg);\n            prog_options.nlink_val = str2int(optarg);\n            if (prog_options.nlink_val == (unsigned int)-1) {\n                fprintf(stderr,\n                        \"invalid links value '%s': integer expected\\n\",\n                        optarg);\n                exit(1);\n            }\n            neg = false;\n            break;\n\n        case 'F':\n            toggle_option(match_class, \"class\");\n            prog_options.class = optarg;\n            prog_options.classneg = neg;\n            neg = false;\n            break;\n\n#ifdef _LUSTRE\n        case 'o':\n            toggle_option(match_ost, \"ost\");\n            if (lmgr_range2list(optarg, DB_UINT, &prog_options.ost_set)) {\n                fprintf(stderr,\n                        \"invalid value '%s' for -ost: unsigned integer or set expected (e.g. 2 or 3,5-8,10-12)\\n\",\n                        optarg);\n                exit(1);\n            }\n            if (neg) {\n                fprintf(stderr, \"! () is not supported for ost criteria\\n\");\n                exit(1);\n            }\n            break;\n\n        case 'P':\n            toggle_option(match_pool, \"pool\");\n            prog_options.pool = optarg;\n            break;\n\n        case 'p':\n            toggle_option(match_projid, \"projid\");\n            if (optarg == NULL) {\n                fprintf(stderr, \"missing argument to option --projid\\n\");\n                exit(1);\n            }\n            prog_options.projid = atoi(optarg);\n            prog_options.projidneg = neg;\n            neg = false;\n            break;\n\n        case 'O':\n            prog_options.lsost = 1;\n            prog_options.print = 0;\n            disp_mask = attr_mask_or(&disp_mask, &LSOST_DISPLAY_MASK);\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -lsost option\\n\");\n                exit(1);\n            }\n            break;\n#endif\n\n        case LSCLASS_OPT:\n            prog_options.lsclass = 1;\n            prog_options.print = 0;\n            disp_mask = attr_mask_or(&disp_mask, &LSCLASS_DISPLAY_MASK);\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -lsclass option\\n\");\n                exit(1);\n            }\n            break;\n\n        case LSSTATUS_OPT:\n            prog_options.lsstatus = 1;\n            prog_options.print = 0;\n            prog_options.lsstatus_name = optarg;\n            disp_mask = attr_mask_or(&disp_mask, &LSSTATUS_DISPLAY_MASK);\n            if (neg) {\n                fprintf(stderr,\n                        \"! (-not) unexpected before -lsstatus option\\n\");\n                exit(1);\n            }\n            break;\n\n        case 't':\n            toggle_option(match_type, \"type\");\n            prog_options.type = opt2type(optarg);\n            if (prog_options.type == NULL) {\n                fprintf(stderr,\n                        \"invalid type '%s': expected types: \" TYPE_HELP \".\\n\",\n                        optarg);\n                exit(1);\n            }\n            if (neg) {\n                fprintf(stderr,\n                        \"! (-not) is not supported for type criteria\\n\");\n                exit(1);\n            }\n            break;\n\n        case 's':\n            toggle_option(match_size, \"size\");\n            if (set_size_filter(optarg, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'A':\n            toggle_option(match_atime, \"atime/amin\");\n            if (set_time_filter(optarg, 0, true, atime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'a':\n            toggle_option(match_atime, \"atime/amin\");\n            if (set_time_filter(optarg, 60, true, atime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'c':\n            toggle_option(match_crtime, \"crtime\");\n            if (set_time_filter(optarg, 0, true, rh_crtime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'C':\n            toggle_option(match_ctime, \"ctime\");\n            if (set_time_filter(optarg, 0, true, rh_ctime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'M':\n            toggle_option(match_mtime, \"mtime/mmin/msec\");\n            if (set_time_filter(optarg, 0, true, mtime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'm':\n            toggle_option(match_mtime, \"mtime/mmin/msec\");\n            /* don't allow suffix (multiplier is 1min) */\n            if (set_time_filter(optarg, 60, false, mtime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'z':\n            toggle_option(match_mtime, \"mtime/mmin/msec\");\n            /* don't allow suffix (multiplier is 1sec) */\n            if (set_time_filter(optarg, 1, false, mtime, neg))\n                exit(1);\n            neg = false;\n            break;\n\n        case 'S':\n            toggle_option(match_status, \"status\");\n            rc = parse_status_arg(\"-status\", optarg,\n                                  &prog_options.filter_status_name,\n                                  &prog_options.filter_status_value, true);\n            if (rc)\n                exit(rc);\n            prog_options.statusneg = neg;\n            neg = false;\n            break;\n\n        case 'l':\n            prog_options.ls = 1;\n            prog_options.print = 0;\n            disp_mask = attr_mask_or(&disp_mask, &LS_DISPLAY_MASK);\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -ls option\\n\");\n                exit(1);\n            }\n            break;\n\n        case PRINT_OPT:\n            prog_options.print = 1;\n            disp_mask = attr_mask_or(&disp_mask, &LS_DISPLAY_MASK);\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -ls option\\n\");\n                exit(1);\n            }\n            break;\n\n        case PRINTF_OPT:\n            prog_options.print = 0;\n            prog_options.printf = 1;\n            printf_str = optarg;\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -printf option\\n\");\n                exit(1);\n            }\n            break;\n\n        case PRINT0_OPT:\n            prog_options.print = 0;\n            prog_options.printf = 1;\n            printf_str = \"%p\\\\0\";\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -print0 option\\n\");\n                exit(1);\n            }\n            break;\n\n        case ESCAPED_OPT:\n            prog_options.escaped = 1;\n            break;\n\n        case 'E':\n            toggle_option(exec, \"exec\");\n            if (!g_shell_parse_argv(optarg, NULL, &prog_options.exec_cmd,\n                                    &err_desc)) {\n                fprintf(stderr, \"Could not parse command %s: %s\\n\",\n                        optarg, err_desc->message);\n                g_error_free(err_desc);\n                exit(1);\n            }\n            prog_options.print = 0;\n            break;\n\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -f option\\n\");\n                exit(1);\n            }\n            break;\n\n        case 'd':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            if (neg) {\n                fprintf(stderr, \"! (-not) unexpected before -d option\\n\");\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'b':\n            prog_options.bulk = force_nobulk;\n            break;\n\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read common config (listmgr, ...) (mask=0) */\n    if (rbh_cfg_load(0, config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (!log_config.force_debug_level)\n        log_config.debug_level = LVL_MAJOR; /* no event message */\n\n    /* Set logging to stderr */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize filesystem access */\n    rc = InitFS();\n    if (rc)\n        fprintf(stderr,\n                \"WARNING: cannot access filesystem %s (%s), find output may be incomplete.\\n\",\n                global_config.fs_path, strerror(abs(rc)));\n\n    /* Initialize list manager (report only) */\n    rc = ListMgr_Init(LIF_REPORT_ONLY);\n    if (rc) {\n        DisplayLog(LVL_CRIT, FIND_TAG,\n                   \"Error initializing list manager: %s (%d)\", lmgr_err2str(rc),\n                   rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_DEBUG, FIND_TAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, FIND_TAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n\n    /* manage status args:\n     * lsstatus: check optional argument\n     *           set the display mask appropriately.\n     */\n    if (prog_options.lsstatus) {\n        if (prog_options.lsstatus_name) {\n            const char *dummy;\n            rc = check_status_args(prog_options.lsstatus_name, NULL, &dummy,\n                                   &prog_options.smi);\n            if (rc)\n                exit(rc);\n            disp_mask.status |= SMI_MASK(prog_options.smi->smi_index);\n        } else  /* display all status */\n            disp_mask.status |= all_status_mask();\n    }\n\n    if (prog_options.match_status) {\n        const char *strval;\n\n        rc = check_status_args(prog_options.filter_status_name,\n                               prog_options.filter_status_value, &strval,\n                               &prog_options.filter_smi);\n        if (rc)\n            exit(rc);\n        /* add it to display mask */\n        disp_mask.status |= SMI_MASK(prog_options.filter_smi->smi_index);\n        prog_options.filter_status_value = (char *)strval;\n    }\n\n    if (prog_options.printf) {\n        printf_chunks = prepare_printf_format(printf_str);\n        if (printf_chunks == NULL)\n            exit(EINVAL);\n    }\n\n    if (argc == optind) {\n        /* no argument: default is root\n         * => switch to bulk mode (unless nobulk is specified)\n         */\n        if (prog_options.bulk != force_nobulk) {\n            DisplayLog(LVL_DEBUG, FIND_TAG,\n                       \"Optimization: switching to bulk DB request mode\");\n            mkfilters(false);   /* keep dirs */\n            return list_bulk();\n        } else {\n            char *id = global_config.fs_path;\n            mkfilters(true);    /* exclude dirs */\n            /* no path specified, list all entries */\n            rc = list_contents(&id, 1);\n        }\n    } else {\n        mkfilters(true);    /* exclude dirs */\n        rc = list_contents(argv + optind, argc - optind);\n    }\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return rc;\n}\n"
  },
  {
    "path": "src/robinhood/rbh_find.h",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifndef _RBH_FIND_H\n#define _RBH_FIND_H\n\n#define FIND_TAG \"find\"\n\nextern attr_mask_t disp_mask;\n\n/* program options */\nstruct find_opt {\n    const char     *user;\n    const char     *group;\n    const char     *type;\n    // size cond: gt/eq/lt <val>\n    compare_direction_t sz_compar;\n    uint64_t        sz_val;\n    const char     *name;\n    const char     *class;\n    value_list_t    ost_set;\n    const char     *pool;\n    /* status manager for -lsstatus */\n    const char     *lsstatus_name;\n    int            projid;\n    sm_instance_t  *smi;\n\n    /* status name and value for -status */\n    sm_instance_t  *filter_smi;\n    char           *filter_status_name;\n    char           *filter_status_value;\n\n    // crtime cond: gt/eq/lt <time>\n    compare_direction_t crt_compar;\n    time_t              crt_val;\n\n    // ctime cond: gt/eq/lt <time>\n    compare_direction_t chg_compar;\n    time_t              chg_val;\n\n    // mtime cond: gt/eq/lt <time>\n    compare_direction_t mod_compar;\n    time_t              mod_val;\n\n    // atime cond: gt/eq/lt <time>\n    compare_direction_t acc_compar;\n    time_t              acc_val;\n\n    compare_direction_t nlink_compar;\n    uint32_t            nlink_val;\n\n    char              **exec_cmd;\n\n    /* query option */\n    enum {\n        bulk_unspec = 0,\n        force_bulk,\n        force_nobulk\n    } bulk;\n\n    /* output flags */\n    unsigned int ls:1;\n    unsigned int lsost:1;\n    unsigned int lsclass:1;\n    unsigned int lsstatus:1;\n    unsigned int print:1;\n    unsigned int printf:1;\n    unsigned int escaped:1;\n\n    /* condition flags */\n    unsigned int match_user:1;\n    unsigned int match_group:1;\n    unsigned int match_type:1;\n    unsigned int match_size:1;\n    unsigned int match_name:1;\n    unsigned int match_class:1;\n    unsigned int match_crtime:1;\n    unsigned int match_mtime:1;\n    unsigned int match_atime:1;\n    unsigned int match_ctime:1;\n    unsigned int match_nlink:1;\n#ifdef _LUSTRE\n    unsigned int match_ost:1;\n    unsigned int match_pool:1;\n    unsigned int match_projid:1;\n#endif\n    unsigned int match_status:1;\n    unsigned int statusneg:1;\n\n    /* -not flags */\n    unsigned int userneg:1;\n    unsigned int groupneg:1;\n    unsigned int nameneg:1;\n    unsigned int classneg:1;\n    unsigned int projidneg:1;\n\n    /* case insensitive name */\n    unsigned int iname:1;\n\n    /* behavior flags */\n    unsigned int no_dir:1;   /* if -t != dir => no dir to be displayed */\n    unsigned int dir_only:1; /* if -t dir => only display dir */\n\n    /* actions */\n    unsigned int exec:1;\n\n};\nextern struct find_opt prog_options;\n\nconst char *type2char(const char *type);\nconst char type2onechar(const char *type);\n\nGArray *prepare_printf_format(const char *format);\nvoid printf_entry(GArray *chunks, const wagon_t *id,\n                  const attr_set_t *attrs);\nvoid free_printf_formats(GArray *chunks);\n\n#endif\n"
  },
  {
    "path": "src/robinhood/rbh_find_printf.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright 2016 Cray Inc. All Rights Reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <ctype.h>\n\n#include <glib.h>\n\n#include \"cmd_helpers.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"status_manager.h\"\n\n#include \"rbh_find.h\"\n\n/**\n * Handle the printf option for rbh-find\n *\n * The format string is split into chunks with only one % in\n * each. That format in the chunk is then adapted to be fed to\n * printf() like functions. The bulk of the processing such as looking\n * for directives, finding their arguments, validating them, ... is\n * done only once.\n *\n * For instance a format like\n *   \"file is %p and its archive is %R{lhsm.archive_id} (neat!)\"\n * becomes the 2 following chunks:\n *   \"file is %s and its archive is \"\n *   \"%u (neat!)\"\n * Their type of argument is stored in one fchunk each.\n */\n\nstruct fchunk {\n    unsigned int directive;     /* main directive (e.g. \"%p\" -> 'p') */\n    unsigned int sub_directive; /* sub directive (e.g. \"%Rf\" -> 'f') */\n\n    GString *format;\n\n    /* A simple time format directive will be stored directly in\n     * 'format', while a whole strftime format string is stored in\n     * 'time_format'. This is an enhancement over find's printf\n     * option. */\n    GString *time_format;\n\n    /* For directives that refer to a status module attribute (for\n     * instance \"%R{lhsm.archive_id}\"). */\n    const sm_instance_t *smi;\n    unsigned int attr_index; /**< absolute attr index */\n    unsigned int rel_sm_info_index; /**< relative index of sm_info attr */\n    const sm_info_def_t *def;\n};\n\n/* The SM status cannot be retrieved or read like the other SM\n * attributes. So make a special case for it. */\n#define SUB_DIRECTIVE_STATUS 0x7876\n\n/* Escape a file name to create a valid string. Valid filenames\n * characters are all except NULL and /. But not everything else is\n * printable. This function returns a statically allocated string\n * which will be overwritten by subsequent calls. */\nstatic const char *escape_name(const char *fullname)\n{\n    const unsigned char *src = (const unsigned char *)fullname;\n    static GString *dest;\n\n    if (dest == NULL)\n        dest = g_string_sized_new(100);\n    else\n        g_string_truncate(dest, 0);\n\n    while (*src) {\n        if (isprint(*src) && *src != '\\\\')\n            g_string_append_c(dest, *src);\n        else\n            g_string_append_printf(dest, \"\\\\%03o\", *src);\n\n        src++;\n    }\n\n    return dest->str;\n}\n\n/* Extract the field length from a printf specifier. e.g. \"%-20s\" will\n * set the field to \"-20\" and return the next position in the string. */\nstatic const char *extract_field_width(const char *str, GString *format)\n{\n    if (*str == '-') {\n        g_string_append_c(format, '-');\n        str++;\n    }\n\n    while (*str && *str >= '0' && *str <= '9') {\n        g_string_append_c(format, *str);\n        str++;\n    }\n\n    return str;\n}\n\n/* Extract a module attribute name. Sets the smi and attribute index\n * in the chunk. Return NULL when the module doesn't exist. Typical\n * input string: \"{lhsm.archive_id}\". If not NULL, the returned\n * pointer points to '}'. */\nstatic const char *extract_mod_attr(const char *str, struct fchunk *chunk)\n{\n    const char *start = str;\n    const char *end;\n    char *name = NULL;\n    char *p;\n    int rc;\n\n    if (*start != '{')\n        goto err;\n\n    start++;\n    end = start;\n\n    /* Find the closing bracket */\n    while (*end && *end != '}')\n        end++;\n\n    if (*end != '}')\n        goto err;\n\n    name = strndup(start, end - start);\n    if (name == NULL)\n        goto err;\n\n    /* Find the dot so we can get the module name */\n    p = strchr(name, '.');\n    if (p == NULL)\n        goto err;\n\n    *p = 0;\n\n    chunk->smi = smi_by_name(name);\n    if (chunk->smi == NULL)\n        goto err;\n\n    rc = sm_attr_get(chunk->smi, NULL, p + 1, NULL,\n                     &chunk->def, &chunk->attr_index);\n    if (rc)\n        goto err;\n\n    free(name);\n\n    return end;\n\n err:\n    free(name);\n    return NULL;\n}\n\n/* Append a time format, for %RA, %RC and %RM. */\nstatic const char *append_time_format(const char *str, struct fchunk *chunk)\n{\n    str++;\n    if (*str == 0) {\n        DisplayLog(LVL_CRIT, FIND_TAG,\n                   \"Error: incomplete time format at end of format string\");\n        return NULL;\n    }\n\n    if (*str == '{') {\n        /* Format is in a substring. */\n        chunk->time_format = g_string_sized_new(50);\n        g_string_append_c(chunk->format, 's');\n\n        str++;\n\n        /* Copy until the closing bracket */\n        while (*str && *str != '}') {\n            g_string_append_c(chunk->time_format, *str);\n            str++;\n        }\n\n        if (*str != '}') {\n            DisplayLog(LVL_CRIT, FIND_TAG, \"Error: invalid string format\");\n            return NULL;\n        }\n    } else if (*str == 'E' || *str == 'O') {\n        /* Format starts with an strftime format modifier. Next\n         * character is the directive. */\n        g_string_append_c(chunk->format, *str);\n        str++;\n        if (*str == 0) {\n            DisplayLog(LVL_CRIT, FIND_TAG,\n                       \"Error: incomplete time format at end of format string\");\n            return NULL;\n        }\n\n        g_string_append_c(chunk->format, *str);\n    } else {\n        /* Straight directive. */\n        g_string_append_c(chunk->format, *str);\n    }\n\n    return str;\n}\n\n/* Helper function to parse int escape codes (\\NNN octal or \\xHH hex)\n * return the number of bytes read in str, or -1 if need a go-again for 0 */\nstatic int parse_escaped_int(const char *str, struct fchunk *chunk, int base)\n{\n    long int print_val;\n    char value_string[4], *endptr;\n    int numread;\n\n    if (base == 8) {\n        numread = 3;\n    } else if (base == 16) {\n        numread = 2;\n    } else {\n        RBH_BUG(\"Error: invalid base\");\n    }\n\n    /* copy next few chars and try to read as int */\n    rh_strncpy(value_string, str, numread+1); /* +1 for final '\\0' */\n    print_val = strtol(value_string, &endptr, base);\n    if (endptr == value_string) {\n        /* invalid hex */\n        return 0;\n    }\n\n    /* printf will copy any byte we put in format string unchanged,\n     * except for '\\0' and '%'.\n     * '\\0' is handled as a directive since printf will stop on a 0-byte\n     * even with a length specifier */\n    if (print_val == 0) {\n        /* if chunk->directive is already set, request a new chunk and\n         * parse this again */\n        if (chunk->directive)\n            return -1;\n        g_string_append(chunk->format, \"%c\");\n        chunk->directive = 'z';\n    } else if ((char)print_val == '%') {\n        g_string_append(chunk->format, \"%%\");\n    } else {\n        /* printf-agnostic character */\n        g_string_append_c(chunk->format, (char)print_val);\n    }\n\n    return endptr - value_string;\n}\n\n/* Analyze a format string, and find the next chunk. Each argument is\n * transformed into its real printf type. For instance \"%s\" means the\n * size, and is stored as an large integer in the database, needs to\n * be displayed as an \"%zu\". */\nstatic const char *extract_chunk(const char *str, struct fchunk *chunk)\n{\n    int rc;\n    chunk->directive = 0;\n\n    while (*str) {\n        if (*str != '%') {\n            if (*str == '\\\\') {\n                str++;\n\n                switch (*str) {\n                case '\\\\':\n                    g_string_append_c(chunk->format, '\\\\');\n                    break;\n\n                case 'n':\n                    g_string_append_c(chunk->format, '\\n');\n                    break;\n\n                case 't':\n                    g_string_append_c(chunk->format, '\\t');\n                    break;\n\n                case 'x':\n                    rc = parse_escaped_int(str+1, chunk, 16);\n                    if (rc == 0) {\n                        DisplayLog(LVL_CRIT, FIND_TAG,\n                                   \"Error: invalid \\\\x not followed by hex in format string\");\n                        return NULL;\n                    }\n                    /* need a new chunk for \\0, return back to \\\\ */\n                    if (rc == -1)\n                        return str - 1;\n                    str += rc;\n                    break;\n\n                case 0:\n                    DisplayLog(LVL_CRIT, FIND_TAG,\n                               \"Error: lone \\\\ at end of format string\");\n                    return NULL;\n\n                default:\n                    /* check for octal value */\n                    if (*str >= '0' && *str <= '7') {\n                        rc = parse_escaped_int(str, chunk, 8);\n                        /* need a new chunk for \\0, return back to \\\\ */\n                        if (rc == -1)\n                            return str - 1;\n                        str += rc - 1; /* -1 because no leading character */\n                        break;\n                    }\n\n                    DisplayLog(LVL_CRIT, FIND_TAG,\n                               \"Error: unrecognized escape code \\\\%c\", *str);\n                    return NULL;\n                }\n            } else {\n                g_string_append_c(chunk->format, *str);\n            }\n\n            str++;\n            continue;\n        }\n\n        if (chunk->directive) {\n            /* Already have a directive. Stop here. */\n            return str;\n        }\n\n        str++;\n\n        if (*str == 0) {\n            DisplayLog(LVL_CRIT, FIND_TAG,\n                       \"Error: lone %% at end of format string\");\n            return NULL;\n        }\n\n        /* Ignore %% as it is a valid printf directive, which saves a\n         * chunk. */\n        if (*str == '%') {\n            g_string_append(chunk->format, \"%%\");\n            str++;\n            continue;\n        }\n\n        /* Found a new directive */\n        g_string_append_c(chunk->format, '%');\n        str = extract_field_width(str, chunk->format);\n        if (str == NULL) {\n            DisplayLog(LVL_CRIT, FIND_TAG,\n                       \"Error: invalid length field in format string at %s\",\n                       str);\n            return NULL;\n        }\n\n        chunk->directive = *str;\n\n        switch (*str) {\n        case 'A':\n            disp_mask.std |= ATTR_MASK_last_access;\n            str = append_time_format(str, chunk);\n            if (str == NULL)\n                return NULL;\n            break;\n\n        case 'b':\n            disp_mask.std |= ATTR_MASK_blocks;\n            g_string_append(chunk->format, \"zu\");\n            break;\n\n        case 'C':\n            disp_mask.std |= ATTR_MASK_last_mdchange;\n            str = append_time_format(str, chunk);\n            if (str == NULL)\n                return NULL;\n            break;\n\n        case 'd':\n            disp_mask.std |= ATTR_MASK_depth;\n            g_string_append_c(chunk->format, 'u');\n            break;\n\n        case 'f':\n            disp_mask.std |= ATTR_MASK_name;\n            g_string_append_c(chunk->format, 's');\n            break;\n\n        case 'g':\n            disp_mask.std |= ATTR_MASK_gid;\n            if (global_config.uid_gid_as_numbers)\n                g_string_append_c(chunk->format, 'd');\n            else\n                g_string_append_c(chunk->format, 's');\n            break;\n\n        case 'M':\n            disp_mask.std |= ATTR_MASK_mode;\n            g_string_append_c(chunk->format, 's');\n            break;\n\n        case 'm':\n            disp_mask.std |= ATTR_MASK_mode;\n            g_string_append_c(chunk->format, 'o');\n            break;\n\n        case 'n':\n            disp_mask.std |= ATTR_MASK_nlink;\n            g_string_append_c(chunk->format, 'u');\n            break;\n\n        case 'p':\n            g_string_append_c(chunk->format, 's');\n            break;\n\n        case 's':\n            disp_mask.std |= ATTR_MASK_size;\n            g_string_append(chunk->format, \"zu\");\n            break;\n\n        case 'T':\n            disp_mask.std |= ATTR_MASK_last_mod;\n            str = append_time_format(str, chunk);\n            if (str == NULL)\n                return NULL;\n            break;\n\n        case 'u':\n            disp_mask.std |= ATTR_MASK_uid;\n            if (global_config.uid_gid_as_numbers)\n                g_string_append_c(chunk->format, 'd');\n            else\n                g_string_append_c(chunk->format, 's');\n            break;\n\n        case 'Y':\n            disp_mask.std |= ATTR_MASK_type;\n            g_string_append_c(chunk->format, 's');\n            break;\n\n        case 'y':\n            disp_mask.std |= ATTR_MASK_type;\n            g_string_append_c(chunk->format, 'c');\n            break;\n\n        case 'R':\n            str++;\n            chunk->sub_directive = *str;\n\n            switch (*str) {\n            case 'C':\n                disp_mask.std |= ATTR_MASK_creation_time;\n                str = append_time_format(str, chunk);\n                if (str == NULL)\n                    return NULL;\n                break;\n\n            case 'c':\n                disp_mask.std |= ATTR_MASK_fileclass;\n                g_string_append_c(chunk->format, 's');\n                break;\n\n            case 'f':\n                g_string_append_c(chunk->format, 's');\n                break;\n\n            case 'P':\n                disp_mask.std |= ATTR_MASK_projid;\n                g_string_append_c(chunk->format, 'u');\n                break;\n\n            case 'm':\n                /* Module name and attribute followed by\n                 * format. e.g. \"%Rm{lhsm.archive_id}\". */\n                str++;\n\n                str = extract_mod_attr(str, chunk);\n                if (str == NULL) {\n                    DisplayLog(LVL_CRIT, FIND_TAG,\n                               \"Error: cannot extract module attribute name, or invalid name\");\n                    return NULL;\n                }\n\n                attr_mask_set_index(&disp_mask, chunk->attr_index);\n\n                if (strcmp(chunk->def->user_name, \"status\") == 0) {\n                    /* status is a special case. Change the directive\n                     * for print_entry(). */\n                    chunk->sub_directive = SUB_DIRECTIVE_STATUS;\n                    g_string_append_c(chunk->format, 's');\n\n                    break;\n                }\n\n                chunk->rel_sm_info_index = attr2sminfo_index(chunk->attr_index)\n                                           - chunk->smi->sm_info_offset;\n\n                /* The format for that attribute */\n                switch (chunk->def->db_type) {\n                case DB_UINT:\n                case DB_BOOL:\n                    g_string_append_c(chunk->format, 'u');\n                    break;\n\n                case DB_INT:\n                    g_string_append_c(chunk->format, 'i');\n                    break;\n\n                case DB_TEXT:\n                    g_string_append_c(chunk->format, 's');\n                    break;\n\n                default:\n                    DisplayLog(LVL_CRIT, FIND_TAG,\n                               \"Error: unsupported database format %d\",\n                               chunk->def->db_type);\n                    break;\n                }\n                break;\n\n            case 'o':\n                disp_mask.std |= ATTR_MASK_stripe_items;\n                g_string_append_c(chunk->format, 's');\n                break;\n\n            case 'p':\n                disp_mask.std |= ATTR_MASK_parent_id;\n                g_string_append_c(chunk->format, 's');\n                break;\n\n            case 0:\n                DisplayLog(LVL_CRIT, FIND_TAG,\n                           \"Error: lone %%R at end of format string\");\n                return NULL;\n\n            default:\n                DisplayLog(LVL_CRIT, FIND_TAG,\n                           \"Error: unrecognized format %%R%c\", *str);\n                return NULL;\n            }\n            break;\n\n        case 0:\n            DisplayLog(LVL_CRIT, FIND_TAG,\n                       \"Error: lone %% at end of format string\");\n            return NULL;\n\n        default:\n            DisplayLog(LVL_CRIT, FIND_TAG, \"Error: unrecognized format %%%c\",\n                       *str);\n            return NULL;\n        }\n\n        str++;\n    }\n\n    return str;\n}\n\nstatic void printf_date(const struct fchunk *chunk, time_t date)\n{\n    char str[1000];\n    struct tm *tmp;\n    size_t sret;\n\n    tmp = localtime(&date);\n    if (tmp == NULL) {\n        printf(\"(none)\");\n        return;\n    }\n\n    if (chunk->time_format)\n        sret = strftime(str, sizeof(str), chunk->time_format->str, tmp);\n    else\n        sret = strftime(str, sizeof(str), chunk->format->str, tmp);\n\n    if (sret >= sizeof(str) - 1) {\n        /* Overflow. 1000 bytes should be big enough for that to never\n         * happen in any locale. */\n        printf(\"(date output truncated)\");\n    } else if (sret == 0) {\n        /* According to the man page, a return of 0 is either an error\n         * or an empty string. In both cases, don't print anything. */\n    } else {\n        if (chunk->time_format)\n            printf(chunk->format->str, str);\n        else\n            printf(\"%s\", str);\n    }\n}\n\n/**\n * Output the desired information for one file.\n */\nvoid printf_entry(GArray *chunks, const wagon_t *id, const attr_set_t *attrs)\n{\n    int i;\n\n    for (i = 0; i < chunks->len; i++) {\n        struct fchunk *chunk = &g_array_index(chunks, struct fchunk, i);\n        const char *format = chunk->format->str;\n\n        switch (chunk->directive) {\n        case 0:\n#if __GNUC__ >= 7\n/*\n * \"format\" below is constructed safely, ignore the warning.\n * Old GCC versions do not like these statements\n */\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wformat-security\"\n#endif\n            printf(format);\n#if __GNUC__ >= 7\n#pragma GCC diagnostic pop\n#endif\n            break;\n\n        case 'A':\n            printf_date(chunk, ATTR(attrs, last_access));\n            break;\n\n        case 'b':\n            printf(format, ATTR(attrs, blocks));\n            break;\n\n        case 'C':\n            printf_date(chunk, ATTR(attrs, last_mdchange));\n            break;\n\n        case 'd':\n            printf(format, ATTR(attrs, depth));\n            break;\n\n        case 'f':\n            printf(format, ATTR(attrs, name));\n            break;\n\n        case 'g':\n            if (global_config.uid_gid_as_numbers)\n                printf(format, ATTR(attrs, gid).num);\n            else\n                printf(format, ATTR(attrs, gid).txt);\n            break;\n\n        case 'm':\n            printf(format, ATTR(attrs, mode));\n            break;\n\n        case 'M':\n            {\n                char mode_str[10];  /* mask + final '\\0' */\n\n                mode_str[9] = 0;\n                mode_string(ATTR(attrs, mode), mode_str);\n\n                printf(format, mode_str);\n            }\n            break;\n\n        case 'n':\n            printf(format, ATTR(attrs, nlink));\n            break;\n\n        case 'p':\n            if (prog_options.escaped)\n                printf(format, escape_name(id->fullname));\n            else\n                printf(format, id->fullname);\n            break;\n\n        case 's':\n            printf(format, ATTR(attrs, size));\n            break;\n\n        case 'T':\n            printf_date(chunk, ATTR(attrs, last_mod));\n            break;\n\n        case 'u':\n            if (global_config.uid_gid_as_numbers)\n                printf(format, ATTR(attrs, uid).num);\n            else\n                printf(format, ATTR(attrs, uid).txt);\n            break;\n\n        case 'Y':\n            {\n                const char *type;\n\n                if (!ATTR_MASK_TEST(attrs, type))\n                    type = \"?\";\n                else\n                    type = type2char(ATTR(attrs, type));\n\n                printf(format, type);\n            }\n            break;\n\n        case 'y':\n            {\n                char type;\n\n                if (!ATTR_MASK_TEST(attrs, type))\n                    type = '?';\n                else\n                    type = type2onechar(ATTR(attrs, type));\n\n                printf(format, type);\n            }\n            break;\n\n        case 'z':\n            printf(format, 0);\n            break;\n\n        case 'R':\n            /* Robinhood specifiers */\n            switch (chunk->sub_directive) {\n            case 'C':\n                printf_date(chunk, ATTR(attrs, creation_time));\n                break;\n\n            case 'c':\n                printf(format,\n                       class_format(ATTR_MASK_TEST(attrs, fileclass) ?\n                                    ATTR(attrs, fileclass) : NULL));\n                break;\n\n            case 'f':\n                {\n                    char fid_str[RBH_FID_LEN];\n\n                    sprintf(fid_str, DFID_NOBRACE, PFID(&id->id));\n                    printf(format, fid_str);\n                }\n                break;\n\n            case 'P':\n                printf(format, ATTR_MASK_TEST(attrs, projid)?ATTR(attrs, projid):0);\n                break;\n\n            case 'm':\n                if (ATTR_MASK_INFO_TEST(attrs, chunk->smi,\n                                        chunk->rel_sm_info_index)) {\n                    switch (chunk->def->db_type) {\n                    case DB_UINT:\n                        printf(format,\n                               *(unsigned int *)SMI_INFO(attrs, chunk->smi,\n                                                         chunk->\n                                                         rel_sm_info_index));\n                        break;\n\n                    case DB_INT:\n                        printf(format,\n                               *(int *)SMI_INFO(attrs, chunk->smi,\n                                                chunk->rel_sm_info_index));\n                        break;\n\n                    case DB_BOOL:\n                        printf(format,\n                               *(bool *)SMI_INFO(attrs, chunk->smi,\n                                                 chunk->rel_sm_info_index));\n                        break;\n\n                    case DB_TEXT:\n                        printf(format,\n                               SMI_INFO(attrs, chunk->smi,\n                                        chunk->rel_sm_info_index));\n                        break;\n\n                    default:\n                        break;\n                    }\n                } else {\n                    switch (chunk->def->db_type) {\n                    case DB_UINT:\n                    case DB_INT:\n                        printf(format, 0);\n                        break;\n\n                    case DB_TEXT:\n                        printf(format, \"[n/a]\");\n                        break;\n\n                    default:\n                        break;\n                    }\n                }\n                break;\n\n#ifdef _LUSTRE\n            case 'o':\n                if (ATTR_MASK_TEST(attrs, stripe_items) &&\n                    (ATTR(attrs, stripe_items).count > 0)) {\n                    GString *osts = g_string_new(\"\");\n\n                    append_stripe_list(osts, &ATTR(attrs, stripe_items), true);\n                    printf(format, osts->str);\n                    g_string_free(osts, TRUE);\n                }\n                break;\n\n            case 'p':\n                {\n                    char fid_str[RBH_FID_LEN];\n\n                    sprintf(fid_str, DFID_NOBRACE,\n                            PFID(&ATTR(attrs, parent_id)));\n                    printf(format, fid_str);\n\n                    break;\n                }\n#endif\n\n            case SUB_DIRECTIVE_STATUS:\n                {\n                    unsigned int smi_index = chunk->smi->smi_index;\n\n                    if (ATTR_MASK_STATUS_TEST(attrs, smi_index))\n                        printf(format, STATUS_ATTR(attrs, smi_index));\n                    else\n                        printf(format, \"[n/a]\");\n\n                    break;\n                }\n\n            }\n            break;\n        }\n    }\n}\n#pragma GCC diagnostic error \"-Wformat-security\"\n\n/**\n * Release the ressources allocated by prepare_printf_format.\n */\nvoid free_printf_formats(GArray *chunks)\n{\n    int i;\n\n    for (i = 0; i < chunks->len; i++) {\n        struct fchunk *chunk = &g_array_index(chunks, struct fchunk, i);\n\n        g_string_free(chunk->format, TRUE);\n        if (chunk->time_format)\n            g_string_free(chunk->time_format, TRUE);\n    }\n\n    g_array_unref(chunks);\n}\n\n/**\n * Split the format string and store the result in an array. Validate\n * each chunk at the same time.\n * Return the array on success, or NULL on error.\n *\n * This function is called only once per format string.\n */\nGArray *prepare_printf_format(const char *format)\n{\n    struct fchunk chunk;\n    GArray *chunks;\n\n    chunks = g_array_sized_new(FALSE, FALSE, sizeof(struct fchunk), 10);\n\n    while (*format) {\n        chunk.format = g_string_sized_new(50);\n\n        format = extract_chunk(format, &chunk);\n        g_array_append_val(chunks, chunk);\n\n        if (format == NULL)\n            goto free;\n    }\n\n    return chunks;\n\n free:\n    free_printf_formats(chunks);\n\n    return NULL;\n}\n"
  },
  {
    "path": "src/robinhood/rbh_import.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for restoring an entry that was accidentally removed from filesystem.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n#include \"backend_ext.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n#include <signal.h>\n\n#define LOGTAG \"Import\"\n\nstatic struct option option_tab[] = {\n    /* options for cancelling remove operation */\n    {\"list\", no_argument, NULL, 'L'},\n    {\"restore\", no_argument, NULL, 'R'},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"LRf:l:hV\"\n\n/* global variables */\n\nstatic lmgr_t lmgr;\nstatic int force_stop = 0;\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options] <backend_path> <import_path>\\n\" \"\\n\"\n// TODO: to be implemented\n//    _B \"Import options:\" B_ \"\\n\"\n//    \"    \" _B \"-H\" B_ \", \" _B \"--hardlink\" B_ \"\\n\"\n//    \"        Create hardlinks instead of moving files.\\n\"\n//    \"\\n\"\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"file\" U_ \", \" _B \"--config-file=\" B_ _U \"file\" U_\n    \"\\n\" \"        Path to configuration file (or short name).\\n\" \"\\n\" _B\n    \"Miscellaneous options:\" B_ \"\\n\" \"    \" _B \"-l\" B_ \" \" _U \"level\" U_ \", \" _B\n    \"--log-level=\" B_ _U \"level\" U_ \"\\n\"\n    \"        Force the log verbosity level (overides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\" \"    \" _B\n    \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\" \"    \" _B \"-V\"\n    B_ \", \" _B \"--version\" B_ \"\\n\" \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" import tool\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* purpose of this daemon */\n#ifdef _LUSTRE_HSM\n    printf(\"    Lustre-HSM Policy Engine\\n\");\n#elif defined(_TMP_FS_MGR)\n    printf(\"    Temporary filesystem manager\\n\");\n#elif defined(_HSM_LITE)\n    printf(\"    Backup filesystem to external storage\\n\");\n#else\n#error \"No purpose was specified\"\n#endif\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic int import_helper(const char *backend_path, char *tgt_path,  /* in/out */\n                         char *new_backend_path, struct stat *src_md)\n{\n    entry_id_t old_id, new_id;\n    recov_status_t st;\n    attr_set_t attrs, new_attrs, src_attrs;\n    int rc;\n\n    /* to check src path */\n    const char *name;\n    const char *first;\n    const char *second;\n    char dummy[RBH_PATH_MAX] = \"\";\n\n    name = rh_basename(backend_path);\n\n    /* clean import path if it already has fid in it */\n    if ((second = strrchr(name, '_')) && (second != name)\n        && (*(first = second - 1) == '_')\n        && (sscanf(second + 1, SFID \"%s\", RFID(&old_id), dummy) >= 3)) {\n        if (EMPTY_STRING(dummy) || !strcmp(dummy, \"z\")) {\n            DisplayLog(LVL_EVENT, LOGTAG, \"'%s' ends with a fid: \" DFID_NOBRACE,\n                       name, PFID(&old_id));\n\n            if (strlen(first) <= strlen(tgt_path)) {\n                /* otherwise, it can't terminate with a fid */\n                char *end_of_tgt = tgt_path + strlen(tgt_path) - strlen(first);\n                if (!strcmp(end_of_tgt, first))\n                    /* clean fid in target path */\n                    *end_of_tgt = '\\0';\n            }\n        } else {\n            DisplayLog(LVL_MAJOR, LOGTAG,\n                       \"'%s' has garbage ('%s') after fid (\" DFID_NOBRACE \")\",\n                       name, dummy, PFID(&old_id));\n            memset(&old_id, 0, sizeof(old_id));\n        }\n    } else\n        memset(&old_id, 0, sizeof(old_id));\n\n    printf(\"Importing '%s' as '%s'...\\n\", backend_path, tgt_path);\n\n    ATTR_MASK_INIT(&attrs);\n    ATTR_MASK_INIT(&src_attrs);\n    ATTR_MASK_INIT(&new_attrs);\n\n    ATTR_MASK_SET(&attrs, backendpath);\n    strcpy(ATTR(&attrs, backendpath), backend_path);\n\n    ATTR_MASK_SET(&attrs, fullpath);\n    strcpy(ATTR(&attrs, fullpath), tgt_path);\n\n    /* merge with source MD (but don't override) */\n    if (src_md) {\n        /* if the entry is a symlink, get its content */\n        if (S_ISLNK(src_md->st_mode)) {\n            const size_t bufflen = sizeof(ATTR(&attrs, link));\n            rc = readlink(backend_path, ATTR(&attrs, link), bufflen);\n            if (rc >= 0) {\n                if (rc >= bufflen)\n                    ATTR(&attrs, link)[bufflen - 1] = '\\0';\n                else\n                    ATTR(&attrs, link)[rc] = '\\0';\n\n                ATTR_MASK_SET(&attrs, link);\n            }\n        }\n\n        stat2rbh_attrs(src_md, &src_attrs, true);\n        ListMgr_MergeAttrSets(&attrs, &src_attrs, false);\n    }\n\n    /* create file in Lustre */\n    st = rbhext_recover(&old_id, &attrs, &new_id, &new_attrs, src_md);\n    if ((st == RS_FILE_OK) || (st == RS_FILE_DELTA) || (st == RS_FILE_EMPTY)\n        || (st == RS_NON_FILE)) {\n        printf(\"\\tSuccess\\n\");\n\n        /* don't insert readonly attrs */\n        new_attrs.attr_mask &= ~readonly_attr_set;\n\n        /* insert or update it in the db */\n        rc = ListMgr_Insert(&lmgr, &new_id, &new_attrs, true);\n        if (rc == 0)\n            printf(\"\\tEntry successfully updated in the dabatase\\n\");\n        else\n            fprintf(stderr, \"ERROR %d inserting entry in the database\\n\", rc);\n        return rc;\n    } else {\n        fprintf(stderr, \"ERROR importing '%s' as '%s'\\n\", backend_path,\n                tgt_path);\n        return -1;\n    }\n}\n\nstatic int perform_import(const char *src_path, const char *tgt_path,\n                          uint64_t *import_count, uint64_t *err_count,\n                          struct stat *md_in)\n{\n    int rc;\n\n    char bk_path[RBH_PATH_MAX] = \"\";\n    char fs_path[RBH_PATH_MAX] = \"\";\n    char new_bk_path[RBH_PATH_MAX] = \"\";\n\n    DIR *dirp;\n    struct dirent direntry;\n    struct dirent *dircookie;\n    struct stat md, src_md, tgt_md;\n    int dir_init_err = 0;   /* errors before importing the directory */\n\n    printf(\"%s\\n\", src_path);\n    if (md_in)\n        src_md = *md_in;\n    else if (lstat(src_path, &src_md) != 0) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, LOGTAG, \"ERROR: lstat failed on %s: %s\",\n                   src_path, strerror(-rc));\n        return rc;\n    }\n\n    /* handle the case when src_path is a file or symlink */\n    if (!S_ISDIR(src_md.st_mode)) {\n        /* tmp copy of path to modify it */\n        rh_strncpy(fs_path, tgt_path, sizeof(fs_path));\n\n        /* is target an exitsting dir? (or link to a dir) */\n        if ((stat(tgt_path, &tgt_md) == 0)\n            && (S_ISDIR(tgt_md.st_mode))) {\n            /* tmp copy of path to modify it */\n            rh_strncpy(bk_path, src_path, sizeof(fs_path));\n            snprintf(fs_path, sizeof(fs_path), \"%s/%s\", tgt_path,\n                     rh_basename(bk_path));\n        }\n\n        if ((rc = import_helper(src_path, fs_path, new_bk_path, &src_md)))\n            (*err_count)++;\n        else\n            (*import_count)++;\n\n        return rc;\n    } else {\n        /* 2nd arg of import_helper is in/out */\n        rh_strncpy(fs_path, tgt_path, sizeof(fs_path));\n\n        /* import directory (create in the backend with the same rights\n         * and owner) */\n        if ((rc = import_helper(src_path, fs_path, new_bk_path, &src_md)))\n            (*err_count)++;\n        else\n            (*import_count)++;\n    }\n\n    /* scan bkpath */\n    if ((dirp = opendir(src_path)) == NULL) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, LOGTAG,\n                   \"opendir on %s failed: Error %d: %s\",\n                   src_path, -rc, strerror(-rc));\n        (*err_count)++;\n        return rc;\n    }\n    dir_init_err = *err_count;\n\n    while (1) {\n        rc = readdir_r(dirp, &direntry, &dircookie);\n\n        if (rc == 0 && dircookie == NULL)\n            /* end of directory */\n            break;\n        else if (force_stop) {\n            DisplayLog(LVL_EVENT, LOGTAG,\n                       \"Stop requested: cancelling import of %s\", src_path);\n            return 0;\n        } else if (rc != 0) {\n            DisplayLog(LVL_CRIT, LOGTAG, \"ERROR %d reading directory '%s': %s\",\n                       rc, src_path, strerror(rc));\n            (*err_count)++;\n            break;\n        }\n        /* ignore . and .. */\n        else if (!strcmp(direntry.d_name, \".\")\n                 || !strcmp(direntry.d_name, \"..\"))\n            continue;\n\n        snprintf(bk_path, sizeof(bk_path), \"%s/%s\", src_path, direntry.d_name);\n        snprintf(fs_path, sizeof(fs_path), \"%s/%s\", tgt_path, direntry.d_name);\n\n        /* what kind of entry is it? */\n        if (lstat(bk_path, &md) != 0) {\n            DisplayLog(LVL_CRIT, LOGTAG, \"ERROR calling lstat(%s): %s\",\n                       bk_path, strerror(errno));\n            (*err_count)++;\n            continue;\n        }\n        if (S_ISDIR(md.st_mode)) {\n            /* recurse */\n            rc = perform_import(bk_path, fs_path, import_count, err_count, &md);\n            if (rc)\n                continue;\n        } else {\n            if (import_helper(bk_path, fs_path, new_bk_path, &md))\n                (*err_count)++;\n            else\n                (*import_count)++;\n        }\n    }\n    closedir(dirp);\n\n    /* no error when importing this directory => remove it from source dir */\n    if (dir_init_err == *err_count) {\n        if (rmdir(src_path)) {\n            DisplayLog(LVL_MAJOR, LOGTAG,\n                       \"Cannot remove source directory %s: %s\", src_path,\n                       strerror(errno));\n            (*err_count)++;\n        } else\n            printf(\"Removed empty source directory %s\\n\", src_path);\n    }\n\n    return 0;\n}\n\nstatic void terminate_handler(int sig)\n{\n    force_stop = 1;\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n\n    char config_file[MAX_OPT_LEN] = \"\";\n    uint64_t total = 0;\n    uint64_t err = 0;\n\n    int rc;\n    char err_msg[4096];\n    robinhood_config_t config;\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n\n    struct sigaction act_sigterm;\n\n    bin = rh_basename(argv[0]); /* supports NULL argument */\n\n    /* parse command line options */\n    while ((c = getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                            &option_index)) != -1) {\n        switch (c) {\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* 2 expected argument: src_path, tgt_path */\n    if (optind != argc - 2) {\n        fprintf(stderr, \"Error: missing arguments on command line.\\n\");\n        display_help(bin);\n        exit(1);\n    }\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read ListMgr config */\n    if (ReadRobinhoodConfig(0, config_file, err_msg, &config, false)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (!config.log_config.force_debug_level)\n        config.log_config.debug_level = log_level;\n\n    /* XXX HOOK: Set logging to stderr */\n    strcpy(config.log_config.log_file, \"stderr\");\n    strcpy(config.log_config.report_file, \"stderr\");\n    strcpy(config.log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin, &config.log_config);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize Filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(rc);\n\n    /* Initialize status managers (XXX all or just the one used for import?) */\n    rc = smi_init_all(options.flags);\n    if (rc)\n        exit(rc);\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error initializing list manager: %s (%d)\",\n                   lmgr_err2str(rc), rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_DEBUG, LOGTAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n#ifdef _HSM_LITE\n    rc = Backend_Start(&config.backend_config, 0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error initializing backend\");\n        exit(1);\n    }\n#endif\n\n    /* create signal handlers */\n    memset(&act_sigterm, 0, sizeof(act_sigterm));\n    act_sigterm.sa_flags = 0;\n    act_sigterm.sa_handler = terminate_handler;\n    if (sigaction(SIGTERM, &act_sigterm, NULL) == -1\n        || sigaction(SIGINT, &act_sigterm, NULL) == -1) {\n        DisplayLog(LVL_CRIT, LOGTAG,\n                   \"Error while setting signal handlers for SIGTERM and SIGINT: %s\",\n                   strerror(errno));\n        exit(1);\n    }\n\n    rc = perform_import(argv[optind], argv[optind + 1], &total, &err, NULL);\n    if (rc)\n        fprintf(stderr, \"Import terminated with error %d\\n\", rc);\n    else if (force_stop)\n        fprintf(stderr, \"Import aborted by user\\n\");\n\n    printf(\"Import summary: %\" PRIu64 \" entries imported, %\" PRIu64 \" errors\\n\",\n           total, err);\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return rc;\n\n}\n"
  },
  {
    "path": "src/robinhood/rbh_rebind.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for rebinding a backend entry to a new fid.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n#include <signal.h>\n\n#define LOGTAG \"Rebind\"\n\nstatic struct option option_tab[] = {\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* status manager selector */\n    {\"statusmgr\", required_argument, NULL, 's'},\n    {\"status-mgr\", required_argument, NULL, 's'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\nstatic sm_instance_t *smi = NULL;\n\n#define SHORT_OPT_STRING    \"f:l:hVs:\"\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\n// rbh-rebind  <old_identifier> <new_identifier> <target_path> [new_fid]\n// Examples:\n// backup:     backend_path=/old   fid=0xxx:xx:xx      /fs/foo\n// lhsm:       fid=xxx             fid=yyy             /fs/foo\n// lhsm:       uuid=abc            uuid=def            /fs/foo\n// other:      output=abc          output=def          /fs/foo 0xxx:xx:xx\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options] <old_bk_id> <new_bk_id> <new_path> [new_fid]\\n\"\n    \"\\n\"\n    \"<old_bk_id>: old backend identifier specified as <attr>=<value>\\n\"\n    \"             e.g. fid=0x:x:x, uuid=xxxx, backend_path=/x/y\\n\"\n    \"<new_bk_id>: new backend identifier specified as <attr>=<value>\\n\"\n    \"             e.g. fid=0x:y:y, uuid=yyyy, backend_path=/x/z\\n\"\n    \"<new_path>: path in the filesystem where the new entry is (or will be) located.\\n\"\n    \"<new_fid>: by default, new_fid is taken as the current fid of new_path \\n\"\n    \"           but it might be different\\n\"\n    \"\\n\"\n    _B \"Module option:\" B_ \"\\n\"\n    \"    \" _B \"--status-mgr\" B_\" \" _U \"statusmgr\" U_\", \"\n           _B \"-s\" B_\" \" _U \"statusmgr\" U_\"\\n\"\n    \"\\n\"\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"file\" U_ \", \" _B \"--config-file=\" B_ _U \"file\" U_ \"\\n\"\n    \"        Path to configuration file (or short name).\\n\"\n    \"\\n\"\n    _B \"Miscellaneous options:\" B_ \"\\n\"\n    \"    \" _B \"-l\" B_ \" \" _U \"level\" U_ \", \" _B \"--log-level=\" B_ _U \"level\" U_ \"\\n\"\n    \"        Force the log verbosity level (overides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\"\n    \"    \" _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" rebind tool\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic int read_fid(const char *fid_str, entry_id_t *fid)\n{\n    int nb_read;\n\n    /* parse fid */\n    if (fid_str[0] == '[')\n        nb_read = sscanf(fid_str, \"[\" SFID \"]\", RFID(fid));\n    else\n        nb_read = sscanf(fid_str, SFID, RFID(fid));\n\n    if (nb_read != FID_SCAN_CNT) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Unexpected format for fid %s\",\n                   fid_str);\n        return -EINVAL;\n    }\n\n    return 0;\n}\n\nstatic const char *recov_status2str(recov_status_t st)\n{\n    switch (st) {\n    case RS_FILE_OK:\n        return \"OK\";\n    case RS_FILE_DELTA:\n        return \"previous version\";\n    case RS_FILE_EMPTY:\n        return \"OK (empty file)\";\n    case RS_NON_FILE:\n        return \"OK (non file)\";\n    case RS_NOBACKUP:\n        return \"no backup\";\n    case RS_ERROR:\n        return \"ERROR\";\n    default:\n        return \"ERROR: unexpected status\";\n    }\n}\n\nstatic int parse_bk_id(attr_set_t *attrs, const char *str, entry_id_t *id,\n                       bool *id_set)\n{\n    char attr[128];\n    char *val;\n    int rc;\n\n    val = strchr(str, '=');\n    if (!val) {\n        fprintf(stderr, \"Invalid attr format '%s': expected <attr>=<value>\\n\",\n                str);\n        return -EINVAL;\n    }\n    rh_strncpy(attr, str, MIN2(val - str + 1, sizeof(attr)));\n    val++;\n\n    if (!strcasecmp(attr, \"id\") || !strcasecmp(attr, \"fid\")) {\n        rc = read_fid(val, id);\n        if (rc == 0 && id_set)\n            *id_set = true;\n        return rc;\n    }\n\n    rc = set_attr_value_from_strings(attr, val, attrs, smi);\n\n    return rc;\n}\n\n\nstatic inline int rebind_helper(const char *old_bk_id,\n                                const char *new_bk_id,\n                                const char *new_path,\n                                const char *new_fid_str)\n{\n    int rc;\n    attr_set_t old_attrs = ATTR_SET_INIT;\n    attr_set_t new_attrs = ATTR_SET_INIT;\n    entry_id_t new_fid;\n    entry_id_t old_fid;\n    bool old_fid_set = false;\n    char *tmp;\n\n    /* full path required */\n    tmp = realpath(new_path, NULL);\n    if (tmp == NULL) {\n        rc = -errno;\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error in realpath(%s): %s\",\n                   new_path, strerror(-rc));\n        return rc;\n    }\n    if (strlen(tmp) >= RBH_PATH_MAX) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Path length is too long!\");\n        return -ENAMETOOLONG;\n    }\n    /* safe because of previous check */\n    strcpy(ATTR(&new_attrs, fullpath), tmp);\n    ATTR_MASK_SET(&new_attrs, fullpath);\n    strcpy(ATTR(&old_attrs, fullpath), tmp);\n    ATTR_MASK_SET(&old_attrs, fullpath);\n    /* now we can free tmp path */\n    free(tmp);\n\n    if ((new_fid_str != NULL) && !EMPTY_STRING(new_fid_str))\n        rc = read_fid(new_fid_str, &new_fid);\n    else\n        /* get fid for the given file */\n        rc = Path2Id(new_path, &new_fid);\n\n    if (rc)\n        return rc;\n\n    printf(\"Rebinding '%s' (\" DFID \") from '%s' to '%s'...\\n\", new_path,\n           PFID(&new_fid), old_bk_id, new_bk_id);\n\n    /* parse old/new bk ids and set attr accordingly */\n    if (parse_bk_id(&old_attrs, old_bk_id, &old_fid, &old_fid_set))\n        return -EINVAL;\n    if (parse_bk_id(&new_attrs, new_bk_id, &new_fid, NULL))\n        return -EINVAL;\n\n    /* rebind is like undelete with 'already recovered = true' */\n    rc = smi->sm->undelete_func(smi, old_fid_set ? &old_fid : NULL,\n                                &old_attrs, &new_fid, &new_attrs,\n                                true);\n    fprintf(stderr, \"Rebind status for '%s': %s\\n\", ATTR(&new_attrs, fullpath),\n            recov_status2str(rc));\n    if (rc == RS_NOBACKUP || rc == RS_ERROR)\n        return -1;\n    return 0;\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n    char config_file[MAX_OPT_LEN] = \"\";\n\n    int rc;\n    char err_msg[4096];\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n    char sm_name[SM_NAME_MAX + 1] = \"\";\n\n    bin = rh_basename(argv[0]);\n\n    /* parse command line options */\n    while ((c = getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                            &option_index)) != -1) {\n        switch (c) {\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n\n        case 's':\n            if (!EMPTY_STRING(sm_name))\n                fprintf(stderr,\n                        \"WARNING: only a single status manager is expected \"\n                        \"on command line. '%s' ignored.\\n\", optarg);\n            else\n                rh_strncpy(sm_name, optarg, sizeof(sm_name));\n            break;\n\n        case 'h':\n            display_help(bin);\n            exit(EXIT_SUCCESS);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(EXIT_SUCCESS);\n            break;\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* 2 expected argument: old backend path, new path is FS */\n    if (optind > argc - 3) {\n        fprintf(stderr, \"Error: missing arguments on command line.\\n\");\n        display_help(bin);\n        exit(1);\n    } else if (optind < argc - 4) {\n        fprintf(stderr, \"Error: too many arguments on command line.\\n\");\n        display_help(bin);\n        exit(1);\n    }\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(EXIT_FAILURE);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read common config (listmgr, ...) (mask=0) */\n    if (rbh_cfg_load(0, config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (!log_config.force_debug_level)\n        log_config.debug_level = LVL_MAJOR; /* no event message */\n\n    /* Set logging to stderr */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(EXIT_FAILURE);\n    }\n\n    /* Initialize Filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(EXIT_FAILURE);\n\n    /* Initialize status managers (XXX all or just the one used for undelete?)\n     */\n    rc = smi_init_all(0);\n    if (rc)\n        exit(EXIT_FAILURE);\n\n    /* load the status manager */\n    if (!EMPTY_STRING(sm_name)) {\n        rc = load_smi(sm_name, &smi);\n        if (rc)\n            exit(EXIT_FAILURE);\n    } else {\n        /* if there is a single smi that allows undelete, use it */\n        rc = load_single_smi(&smi);\n        if (rc)\n            exit(EXIT_FAILURE);\n    }\n\n    if (optind == argc - 3)\n        rc = rebind_helper(argv[optind], argv[optind + 1], argv[optind + 2],\n                          NULL);\n    else if (optind == argc - 4)\n        rc = rebind_helper(argv[optind], argv[optind + 1], argv[optind + 2],\n                           argv[optind + 3]);\n\n    exit(rc ? EXIT_FAILURE: EXIT_SUCCESS);\n}\n"
  },
  {
    "path": "src/robinhood/rbh_recov.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for recovering filesystem content after a disaster (backup flavor)\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n#include <signal.h>\n\n#define RECOV_TAG \"Recov\"\n#define RECOV_TAG \"Recov\"\n\nstatic struct option option_tab[] = {\n    /* recovery options */\n    {\"start\", no_argument, NULL, 'S'},\n    {\"resume\", no_argument, NULL, 'r'},\n    {\"run\", no_argument, NULL, 'r'},\n    {\"complete\", no_argument, NULL, 'c'},\n    {\"status\", no_argument, NULL, 's'},\n    {\"reset\", no_argument, NULL, 'Z'},\n    {\"list\", required_argument, NULL, 'L'},\n\n    {\"ost\", required_argument, NULL, 'o'},\n    {\"since\", required_argument, NULL, 'b'},\n\n    {\"dir\", required_argument, NULL, 'D'},\n    {\"retry\", no_argument, NULL, 'e'},\n    {\"yes\", no_argument, NULL, 'y'},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n//    {\"output-dir\", required_argument, NULL, 'o'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"SrcZsD:eyf:l:o:b:hVL:\"\n\n/* global variables */\n\nstatic lmgr_t   lmgr;\nstatic bool     terminate = false;  /* abort signal received */\n\nstatic char    *path_filter = NULL;\nstatic char     path_buff[RBH_PATH_MAX];\nstatic value_list_t ost_list = { 0, NULL };\n\nstatic char ost_range_str[256] = \"\";\nstatic time_t since_time = 0;\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s <action> [options]\\n\"\n    \"\\n\"\n    _B \"Disaster recovery actions:\" B_ \"\\n\"\n    \"    \" _B \"--start\" B_ \", \" _B \"-S\" B_ \"\\n\"\n    \"        Initialize a disaster recovery process.\\n\"\n    \"    \" _B \"--run\" B_ \", \" _B \"--resume\" B_ \", \" _B \"-r\" B_ \"\\n\"\n    \"        Run/resume the recovery process.\\n\"\n    \"    \" _B \"--complete\" B_ \", \" _B \"-c\" B_ \"\\n\"\n    \"        Terminate the recovery.\\n\"\n    \"    \" _B \"--status\" B_ \", \" _B \"-s\" B_ \"\\n\"\n    \"        Show current recovery progress.\\n\"\n    \"    \" _B \"--list\" B_ \" \" _U \"state\" U_ \", \" _B \"-L\" B_ \" \" _U \"state\" U_\n    \"\\n\"\n    \"        List entries for the given \" _U \"state\" U_ \": all, done, failed, or todo.\\n\"\n    \"    \" _B \"--reset\" B_ \", \" _B \"-Z\" B_\n    \"\\n\"\n    \"        Abort current recovery (/!\\\\ non-recovered entries are lost).\\n\"\n    \"\\n\"\n    _B \"Start options:\" B_ \"\\n\"\n    \"    \" _B \"--ost\" B_ \" \" _U \"ost_index\" U_ \"|\" _U \"ost_set\" U_ \"\\n\"\n    \"        Perform the recovery only for files striped on the given OST \\n\"\n    \"        or set of OSTs (e.g. 3,5-8).\\n\" \"    \" _B \"--since\" B_ \" \" _U \"date_time\" U_ \"\\n\"\n    \"        Perform the recovery only for files updated after the given \" _U \"date_time\" U_ \".\\n\"\n    \"        The expected date/time format is yyyymmdd[HHMM[SS]].\\n\"\n//    \"    \" _B \"--with-data\" B_ \"\\n\"\n//    \"        Used with --ost: only recover files that really have data on the OST.\\n\"\n    _B \"Resume options:\" B_ \"\\n\"\n    \"    \" _B \"--dir\" B_ \"=\" _U \"path\" U_ \", \" _B \"-D\" B_ \" \" _U \"path\" U_ \"\\n\"\n    \"        Only recover files in the given directory.\\n\"\n    \"    \" _B \"--retry\" B_ \", \" _B \"-e\" B_ \"\\n\"\n    \"        Recover entries even if previous recovery failed on them.\\n\"\n    _B \"Reset options:\" B_ \"\\n\"\n    \"    \" _B \"--yes\" B_ \", \" _B \"-y\" B_ \"\\n\"\n    \"        Do not prompt for confirmation.\\n\"\n    \"\\n\"\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"file\" U_ \", \" _B \"--config-file=\" B_ _U \"file\" U_ \"\\n\"\n    \"        Path to configuration file (or short name).\\n\"\n    \"\\n\"\n//    _B \"Output options:\" B_ \"\\n\"\n//    \"    \" _B \"-o\" B_ \" \" _U \"dir\" U_ \", \" _B \"--output-dir=\" B_ _U \"dir\" U_ \"\\n\"\n//    \"        Directory where recovery reports will be written (default=current dir).\\n\"\n//    \"\\n\"\n    _B \"Miscellaneous options:\" B_ \"\\n\"\n    \"    \" _B \"-l\" B_ \" \" _U \"level\" U_ \", \" _B \"--log-level=\" B_ _U \"level\" U_\n    \"\\n\"\n    \"        Force the log verbosity level (overides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\" \"    \" _B\n    \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" disaster recovery tool\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic void terminate_handler(int sig)\n{\n    if (sig == SIGTERM)\n        fprintf(stderr, \"SIGTERM received: performing clean shutdown\\n\");\n    else if (sig == SIGINT)\n        fprintf(stderr, \"SIGINT received: performing clean shutdown\\n\");\n\n    terminate = true;\n}\n\nstatic void print_recov_stats(bool forecast, const lmgr_recov_stat_t *p_stat)\n{\n    char buff[128];\n    unsigned long long diff;\n\n    FormatFileSize(buff, 128, p_stat->status_size[RS_FILE_OK]\n                   + p_stat->status_size[RS_FILE_EMPTY]);\n    if (forecast)\n        printf(\"   - full recovery: %Lu files (%s), %Lu non-files\\n\",\n               p_stat->status_count[RS_FILE_OK] +\n               p_stat->status_count[RS_FILE_EMPTY], buff,\n               p_stat->status_count[RS_NON_FILE]);\n    else\n        printf(\"   - successfully recovered: %Lu files (%s), %Lu non-files\\n\",\n               p_stat->status_count[RS_FILE_OK] +\n               p_stat->status_count[RS_FILE_EMPTY], buff,\n               p_stat->status_count[RS_NON_FILE]);\n\n    FormatFileSize(buff, 128, p_stat->status_size[RS_FILE_DELTA]);\n    printf(\"   - old version:     %10Lu entries (%s)\\n\",\n           p_stat->status_count[RS_FILE_DELTA], buff);\n    FormatFileSize(buff, 128, p_stat->status_size[RS_NOBACKUP]);\n    printf(\"   - not recoverable: %10Lu entries (%s)\\n\",\n           p_stat->status_count[RS_NOBACKUP], buff);\n\n    diff =\n        p_stat->total - p_stat->status_count[RS_FILE_OK] -\n        p_stat->status_count[RS_FILE_DELTA]\n        - p_stat->status_count[RS_FILE_EMPTY] -\n        p_stat->status_count[RS_NOBACKUP]\n        - p_stat->status_count[RS_NON_FILE] - p_stat->status_count[RS_ERROR];\n\n    FormatFileSize(buff, 128, p_stat->status_size[RS_ERROR]);\n\n    if (forecast)\n        printf(\"   - other/errors:    %10Lu/%Lu (%s)\\n\", diff,\n               p_stat->status_count[RS_ERROR], buff);\n    else {\n        printf(\"   - errors:          %10Lu entries (%s)\\n\",\n               p_stat->status_count[RS_ERROR], buff);\n        printf(\"   - still to be recovered: %4Lu entries\\n\", diff);\n    }\n}\n\nstatic int recov_start(void)\n{\n    lmgr_recov_stat_t stats;\n    int rc;\n\n    /* is there a filter to be applied? */\n    if (ost_list.count > 0 || since_time != 0) {\n        lmgr_filter_t filter;\n        filter_value_t fv;\n\n        lmgr_simple_filter_init(&filter);\n\n        /* ost filter? */\n        if (ost_list.count == 1) {\n            printf(\"only recovering files striped on OST#%u\\n\",\n                   ost_list.values[0].val_uint);\n            fv.value.val_uint = ost_list.values[0].val_uint;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_stripe_items, EQUAL, fv,\n                                   0);\n        } else if (ost_list.count > 1) {\n            printf(\"only recovering files striped on OSTs[%s]\\n\",\n                   ost_range_str);\n            fv.list = ost_list;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_stripe_items, IN, fv,\n                                   /* allow it to free ost_list->values: */\n                                   FILTER_FLAG_ALLOC_LIST);\n        }\n\n        /* update time filter */\n        if (since_time) {\n            char date[128];\n            struct tm t;\n            strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&since_time, &t));\n            printf(\"only recovering files updated after %s (timestamp: %lu)\\n\",\n                   date, since_time);\n            fv.value.val_uint = since_time;\n\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_md_update, MORETHAN, fv,\n                                   0);\n        }\n\n        rc = ListMgr_RecovInit(&lmgr, &filter, &stats);\n    } else\n        rc = ListMgr_RecovInit(&lmgr, NULL, &stats);\n\n    if (rc == 0) {\n        printf(\"\\nRecovery successfully initialized.\\n\\n\");\n        printf(\"It should result in the following state:\\n\");\n        print_recov_stats(true, &stats);\n        return 0;\n    } else if (rc == DB_ALREADY_EXISTS) {\n        printf(\"\\nERROR: a recovery is already in progress, or a previous recovery\\n\"\n               \"was not completed properly (see --resume, --complete or --reset option).\\n\\n\");\n\n        unsigned long long total =\n            stats.status_count[RS_FILE_OK] + stats.status_count[RS_FILE_DELTA]\n            + stats.status_count[RS_NON_FILE] +\n            stats.status_count[RS_FILE_EMPTY]\n            + stats.status_count[RS_NOBACKUP] + stats.status_count[RS_ERROR];\n        printf(\"The progress of this recovery is %Lu/%Lu entries\\n\", total,\n               stats.total);\n        print_recov_stats(false, &stats);\n\n        return -EALREADY;\n    } else {    /* other error */\n\n        fprintf(stderr, \"ERROR initializing recovery: db error %d\\n\", rc);\n        return rc;\n    }\n}\n\nstatic int recov_reset(int force)\n{\n    int rc;\n\n    /* ask confirmation */\n    if (!force) {\n        lmgr_recov_stat_t stats;\n        char *buff = malloc(1024);\n        size_t sz = 1024;\n\n        rc = ListMgr_RecovStatus(&lmgr, &stats);\n        if (rc) {\n            if (rc == DB_NOT_EXISTS)\n                fprintf(stderr, \"ERROR: There is no pending recovery\\n\");\n            return rc;\n        }\n\n        printf(\"\\nWARNING: you are about to abort the current recovery.\\n\");\n        printf(\"All entries not yet recovered will be definitely lost!\\n\\n\");\n\n        printf(\"Current recovery status:\\n\");\n        print_recov_stats(false, &stats);\n        printf(\"\\n\");\n\n        do {\n            printf(\"Do you really want to proceed [y/n]: \");\n            if (getline(&buff, &sz, stdin) > 0) {\n                if (!strcasecmp(buff, \"y\\n\") || !strcasecmp(buff, \"yes\\n\"))\n                    break;\n                else {\n                    printf(\"Aborted\\n\");\n                    free(buff);\n                    return -ECANCELED;\n                }\n            }\n        } while (1);\n        free(buff);\n    }\n    return ListMgr_RecovReset(&lmgr);\n}\n\nstatic int recov_resume(int retry_errors)\n{\n    struct lmgr_iterator_t *it;\n    int rc, st;\n    entry_id_t id, new_id;\n    attr_set_t attrs, new_attrs;\n    char buff[128];\n\n    /* TODO iter opt */\n    it = ListMgr_RecovResume(&lmgr, path_filter, retry_errors, NULL);\n    if (it == NULL) {\n        fprintf(stderr,\n                \"ERROR: cannot get the list of entries to be recovered\\n\");\n        return -1;\n    }\n\n    attrs.attr_mask = RECOV_ATTR_MASK;\n\n    while (!terminate &&\n           ((rc =\n             ListMgr_RecovGetNext(it, &id, &attrs, NULL)) != DB_END_OF_LIST)) {\n        if (rc) {\n            fprintf(stderr, \"ERROR %d getting entry from recovery table\\n\", rc);\n            ListMgr_CloseIterator(it);\n            return rc;\n        }\n\n        FormatFileSize(buff, 128, ATTR(&attrs, size));\n\n        if (ATTR_MASK_TEST(&attrs, fullpath))\n            printf(\"Restoring %s (%s)...\", ATTR(&attrs, fullpath), buff);\n        else\n            printf(\"Restoring \" DFID \" (%s)...\", PFID(&id), buff);\n\n        /* TODO process entries asynchronously, in parallel, in separate\n         * threads */\n        st = rbhext_recover(&id, &attrs, &new_id, &new_attrs, NULL);\n\n        if ((st == RS_FILE_OK) || (st == RS_FILE_EMPTY) || (st == RS_NON_FILE)\n            || (st == RS_FILE_DELTA)) {\n            /* don't insert readonly attrs */\n            new_attrs.attr_mask &= ~readonly_attr_set;\n\n            /* insert the entry in the database, and update recovery status */\n            rc = ListMgr_Insert(&lmgr, &new_id, &new_attrs, true);\n            if (rc) {\n                fprintf(stderr, \"DB insert failure for '%s'\\n\",\n                        ATTR(&new_attrs, fullpath));\n                st = RS_ERROR;\n            }\n        }\n\n        /* old id must be used for impacting recovery table */\n        if (ListMgr_RecovSetState(&lmgr, &id, st))\n            st = RS_ERROR;\n\n        switch (st) {\n        case RS_FILE_OK:\n            printf(\" OK\\n\");\n            break;\n        case RS_FILE_DELTA:\n            printf(\" OK (old version)\\n\");\n            break;\n        case RS_NON_FILE:\n            printf(\" OK (non-file)\\n\");\n            break;\n        case RS_FILE_EMPTY:\n            printf(\" OK (empty file)\\n\");\n            break;\n        case RS_NOBACKUP:\n            printf(\" No backup available\\n\");\n            break;\n        case RS_ERROR:\n            printf(\" FAILED\\n\");\n            break;\n        default:\n            printf(\" ERROR st=%d, rc=%d\\n\", st, rc);\n            break;\n        }\n\n        /* reset mask */\n        attrs.attr_mask = RECOV_ATTR_MASK;\n    }\n\n    return 0;\n}\n\nstatic int recov_complete(void)\n{\n    int rc;\n    lmgr_recov_stat_t stats;\n\n    rc = ListMgr_RecovComplete(&lmgr, &stats);\n    if (rc == DB_NOT_ALLOWED) {\n        printf(\"\\nCannot complete recovery\\n\\n\");\n        printf(\"Current status:\\n\");\n        print_recov_stats(false, &stats);\n        return rc;\n    } else if (rc == DB_NOT_EXISTS) {\n        printf(\"\\nERROR: There is no pending recovery.\\n\");\n        return rc;\n    } else if (rc != DB_SUCCESS) {\n        printf(\"\\nERROR %d finalizing recovery\\n\", rc);\n        return rc;\n    } else {\n        printf(\"\\nRecovery successfully completed:\\n\");\n        print_recov_stats(false, &stats);\n        return 0;\n    }\n}\n\nstatic int recov_status(void)\n{\n    int rc;\n    lmgr_recov_stat_t stats;\n\n    rc = ListMgr_RecovStatus(&lmgr, &stats);\n    if (rc) {\n        if (rc == DB_NOT_EXISTS)\n            fprintf(stderr, \"ERROR: There is no pending recovery\\n\");\n        return rc;\n    }\n\n    printf(\"Current recovery status:\\n\");\n    print_recov_stats(false, &stats);\n    printf(\"\\n\");\n    return 0;\n}\n\nstatic int recov_list(recov_type_e state)\n{\n    struct lmgr_iterator_t *it;\n    int rc;\n    entry_id_t id;\n    attr_set_t attrs;\n    char buff[128];\n    recov_status_t st;\n    const char *status;\n\n    /* TODO iter opt */\n    it = ListMgr_RecovList(&lmgr, state);\n    if (it == NULL) {\n        fprintf(stderr, \"ERROR: cannot get the list of entries\\n\");\n        return -1;\n    }\n\n    attrs.attr_mask = RECOV_ATTR_MASK;\n    printf(\"%-8s %-15s %-40s %s\\n\", \"type\", \"state\", \"path\", \"size\");\n\n    while (!terminate &&\n           ((rc =\n             ListMgr_RecovGetNext(it, &id, &attrs, &st)) != DB_END_OF_LIST)) {\n        if (rc) {\n            fprintf(stderr, \"ERROR %d getting entry from recovery table\\n\", rc);\n            ListMgr_CloseIterator(it);\n            return rc;\n        }\n\n        FormatFileSize(buff, 128, ATTR(&attrs, size));\n        switch (st) {\n        case RS_FILE_OK:\n            status = \"done\";\n            break;\n        case RS_FILE_DELTA:\n            status = \"done_old_data\";\n            break;\n        case RS_NON_FILE:\n            status = \"done_non_file\";\n            break;\n        case RS_FILE_EMPTY:\n            status = \"done_empty\";\n            break;\n        case RS_NOBACKUP:\n            status = \"done_no_backup\";\n            break;\n        case RS_ERROR:\n            status = \"failed\";\n            break;\n        case -1:\n            status = \"todo\";\n            break;\n        default:\n            status = \"?\";\n        }\n\n        printf(\"%-8s %-15s %-40s %s\\n\", ATTR(&attrs, type), status,\n               ATTR(&attrs, fullpath), buff);\n\n        /* reset mask */\n        attrs.attr_mask = RECOV_ATTR_MASK;\n    }\n\n    return 0;\n}\n\n#define RETRY_ERRORS 0x00000001\n#define NO_CONFIRM   0x00000002\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int            c, option_index = 0;\n    const char    *bin;\n\n    char           config_file[MAX_OPT_LEN] = \"\";\n\n    bool           do_start = false;\n    bool           do_reset = false;\n    bool           do_resume = false;\n    bool           do_complete = false;\n    bool           do_status = false;\n\n    int            list_state = -1;\n    int            local_flags = 0;\n\n    int            rc;\n    char           err_msg[4096];\n    robinhood_config_t config;\n    struct sigaction act_sigterm;\n    int             chgd = 0;\n    char            badcfg[RBH_PATH_MAX];\n\n    bin = rh_basename(argv[0]); /* supports NULL argument */\n\n    /* parse command line options */\n    while ((c =\n            getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                        &option_index)) != -1) {\n        switch (c) {\n        case 'S':\n            do_start = true;\n            break;\n        case 's':\n            do_status = true;\n            break;\n        case 'Z':\n            do_reset = true;\n            break;\n        case 'c':\n            do_complete = true;\n            break;\n        case 'r':\n            do_resume = true;\n            break;\n        case 'L':\n            if (!strcasecmp(optarg, \"all\"))\n                list_state = RT_ALL;\n            else if (!strcasecmp(optarg, \"done\"))\n                list_state = RT_DONE;\n            else if (!strcasecmp(optarg, \"failed\"))\n                list_state = RT_FAILED;\n            else if (!strcasecmp(optarg, \"todo\"))\n                list_state = RT_TODO;\n            else {\n                fprintf(stderr,\n                        \"Invalid parameter for option --list: all, done, failed or todo expected.\\n\");\n                exit(1);\n            }\n            break;\n        case 'e':\n            local_flags |= RETRY_ERRORS;\n            break;\n        case 'y':\n            local_flags |= NO_CONFIRM;\n            break;\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'D':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <path> for --dir\\n\");\n                exit(1);\n            } else {\n                rh_strncpy(path_buff, optarg, MAX_OPT_LEN);\n                path_filter = path_buff;\n            }\n            break;\n        case 'o':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <ost_index> for --ost\\n\");\n                exit(1);\n            }\n            /* parse it as a set */\n            if (lmgr_range2list(optarg, DB_UINT, &ost_list)) {\n                fprintf(stderr,\n                        \"Invalid value '%s' for --ost option: integer or set expected (e.g. 2 or 3,5-8,10-12).\\n\",\n                        optarg);\n                exit(1);\n            }\n            /* copy arg to display it */\n            rh_strncpy(ost_range_str, optarg, sizeof(ost_range_str));\n            break;\n        case 'b':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <date_time> for --since\\n\");\n                exit(1);\n            }\n            since_time = str2date(optarg);\n            if (since_time == (time_t)-1) {\n                fprintf(stderr,\n                        \"Invalid date format: yyyymmdd[HH[MM[SS]]] expected\\n\");\n                exit(1);\n            }\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* check there is no extra arguments */\n    if (optind != argc) {\n        fprintf(stderr, \"Error: unexpected argument on command line: %s\\n\",\n                argv[optind]);\n        exit(1);\n    }\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read ListMgr config */\n\n    if (ReadRobinhoodConfig(0, config_file, err_msg, &config, false)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    /* XXX HOOK: Set logging to stderr */\n    strcpy(config.log_config.log_file, \"stderr\");\n    strcpy(config.log_config.report_file, \"stderr\");\n    strcpy(config.log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin, &config.log_config);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(rc);\n\n    /* Initialize status managers (XXX all or just the one used for\n     * recovery?) */\n    rc = smi_init_all(options.flags);\n    if (rc)\n        exit(rc);\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, RECOV_TAG,\n                   \"Error initializing list manager: %s (%d)\", lmgr_err2str(rc),\n                   rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_DEBUG, RECOV_TAG,\n                   \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, RECOV_TAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n#ifdef _HSM_LITE\n    rc = Backend_Start(&config.backend_config, 0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, RECOV_TAG, \"Error initializing backend\");\n        exit(1);\n    }\n#endif\n\n    /* create signal handlers */\n    memset(&act_sigterm, 0, sizeof(act_sigterm));\n    act_sigterm.sa_flags = 0;\n    act_sigterm.sa_handler = terminate_handler;\n    if (sigaction(SIGTERM, &act_sigterm, NULL) == -1\n        || sigaction(SIGINT, &act_sigterm, NULL) == -1) {\n        DisplayLog(LVL_CRIT, RECOV_TAG,\n                   \"Error while setting signal handlers for SIGTERM and SIGINT: %s\",\n                   strerror(errno));\n        exit(1);\n    } else\n        DisplayLog(LVL_VERB, RECOV_TAG,\n                   \"Signals SIGTERM and SIGINT (abort command) are ready to be used\");\n\n    if (do_status)\n        rc = recov_status();\n    else if (list_state != -1)\n        rc = recov_list(list_state);\n    else if (do_start)\n        rc = recov_start();\n    else if (do_reset)\n        rc = recov_reset(local_flags & NO_CONFIRM);\n    else if (do_resume)\n        rc = recov_resume(local_flags & RETRY_ERRORS);\n    else if (do_complete)\n        rc = recov_complete();\n    else {\n        display_help(bin);\n        rc = 1;\n    }\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return rc;\n}\n"
  },
  {
    "path": "src/robinhood/rbh_report.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for retrieving stats about filesystem.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"cmd_helpers.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n#include \"Memory.h\"\n#include \"entry_processor.h\"\n#include \"rbh_basename.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <string.h>\n#include <pthread.h>\n#include <sys/types.h>\n#include <pwd.h>\n\n#define REPORT_TAG    \"Report\"\n\n#define DEFAULT_TOP_SIZE 20\n\n/* Array of options for getopt_long().\n * Each record consists of: {const char *name, int has_arg, int *flag, int val}\n */\n\n#define OPT_DUMP_USER   256\n#define OPT_DUMP_GROUP  257\n#define OPT_DUMP_OST    258\n#define OPT_DUMP_STATUS 259\n#define OPT_CLASS_INFO  260\n#define OPT_STATUS_INFO 261\n#define OPT_PROJECT_INFO 262\n\n#define SET_NEXT_MAINT    300\n#define CLEAR_NEXT_MAINT  301\n\n#define OPT_BY_COUNT      310\n#define OPT_BY_AVGSIZE    311\n#define OPT_COUNT_MIN     312\n\n#define OPT_SIZE_PROFILE  330\n#define OPT_BY_SZ_RATIO   331\n\n/* options flags */\n#define OPT_FLAG_CSV        0x0001\n#define OPT_FLAG_NOHEADER   0x0002\n#define OPT_FLAG_GROUP      0x0004\n\n#define OPT_FLAG_NO_ACCT 0x0010\n#define OPT_FLAG_SPLITUSERGROUP 0x0020\n#define OPT_FLAG_BY_COUNT       0x0040\n#define OPT_FLAG_BY_AVGSIZE     0x0080\n#define OPT_FLAG_REVERSE        0x0100\n#define OPT_FLAG_SPROF          0x0200\n#define OPT_FLAG_BY_SZRATIO     0x0400\n#define OPT_FLAG_SPLITUSERPROJ  0x1000\n\n#define CSV(_x) !!((_x)&OPT_FLAG_CSV)\n#define NOHEADER(_x) !!((_x)&OPT_FLAG_NOHEADER)\n#define ISGROUP(_x) !!((_x)&OPT_FLAG_GROUP)\n#define ISSPLITUSERGROUP(_x) !!((_x)&OPT_FLAG_SPLITUSERGROUP)\n#define ISSPLITUSERPROJ(_x) !!((_x)&OPT_FLAG_SPLITUSERPROJ)\n#define FORCE_NO_ACCT(_x) !!((_x)&OPT_FLAG_NO_ACCT)\n#define SORT_BY_COUNT(_x) !!((_x)&OPT_FLAG_BY_COUNT)\n#define SORT_BY_AVGSIZE(_x) !!((_x)&OPT_FLAG_BY_AVGSIZE)\n#define SORT_BY_SZRATIO(_x) !!((_x)&OPT_FLAG_BY_SZRATIO)\n#define REVERSE(_x) !!((_x)&OPT_FLAG_REVERSE)\n#define SPROF(_x) !!((_x)&OPT_FLAG_SPROF)\n\nstatic profile_field_descr_t size_profile = {\n    .attr_index = ATTR_INDEX_size,\n    .range_ratio_start = 0,\n    .range_ratio_len = 0,\n    .range_ratio_sort = SORT_NONE\n};\n\nstatic struct option option_tab[] = {\n\n    /* Stats selectors */\n    {\"activity\", no_argument, NULL, 'a'},\n\n    {\"fsinfo\", no_argument, NULL, 'i'},\n    {\"fs-info\", no_argument, NULL, 'i'},\n\n    {\"entry-info\", required_argument, NULL, 'e'},\n    {\"entryinfo\", required_argument, NULL, 'e'},\n\n    {\"userinfo\", optional_argument, NULL, 'u'},\n    {\"user-info\", optional_argument, NULL, 'u'},\n\n    {\"groupinfo\", optional_argument, NULL, 'g'},\n    {\"group-info\", optional_argument, NULL, 'g'},\n\n    {\"classinfo\", optional_argument, NULL, OPT_CLASS_INFO},\n    {\"class-info\", optional_argument, NULL, OPT_CLASS_INFO},\n\n    {\"statusinfo\", required_argument, NULL, OPT_STATUS_INFO},\n    {\"status-info\", required_argument, NULL, OPT_STATUS_INFO},\n\n    {\"projectinfo\", optional_argument, NULL, OPT_PROJECT_INFO},\n    {\"project-info\", optional_argument, NULL, OPT_PROJECT_INFO},\n\n    {\"topdirs\", optional_argument, NULL, 'd'},\n    {\"top-dirs\", optional_argument, NULL, 'd'},\n    {\"topsize\", optional_argument, NULL, 's'},\n    {\"top-size\", optional_argument, NULL, 's'},\n    {\"topusers\", optional_argument, NULL, 'U'},\n    {\"top-users\", optional_argument, NULL, 'U'},\n    {\"oldest-files\", optional_argument, NULL, 'o'},\n    {\"oldest-empty-dirs\", optional_argument, NULL, 'O'},\n\n    {\"deferred-rm\", no_argument, NULL, 'R'},\n    {\"dump\", no_argument, NULL, 'D'},\n    {\"dump-all\", no_argument, NULL, 'D'},   /* for backward compatibility */\n    {\"dump-user\", required_argument, NULL, OPT_DUMP_USER},\n    {\"dump-group\", required_argument, NULL, OPT_DUMP_GROUP},\n#ifdef _LUSTRE\n    {\"dump-ost\", required_argument, NULL, OPT_DUMP_OST},\n#endif\n    {\"dump-status\", required_argument, NULL, OPT_DUMP_STATUS},\n\n    {\"szprof\", no_argument, NULL, OPT_SIZE_PROFILE},    /* size profile */\n    {\"size-profile\", no_argument, NULL, OPT_SIZE_PROFILE},\n\n    /* additional options for topusers etc... */\n    {\"filter-path\", required_argument, NULL, 'P'},\n    {\"filter-class\", required_argument, NULL, 'C'},\n    {\"filter-project\", required_argument, NULL, 'p'},\n// filter status\n    {\"split-user-groups\", no_argument, NULL, 'S'},\n    {\"split-user-projects\", no_argument, NULL, 'J'},\n    {\"by-count\", no_argument, NULL, OPT_BY_COUNT},\n    {\"by-avgsize\", no_argument, NULL, OPT_BY_AVGSIZE},\n    {\"by-avg-size\", no_argument, NULL, OPT_BY_AVGSIZE},\n    {\"by-szratio\", required_argument, NULL, OPT_BY_SZ_RATIO},\n    {\"by-size-ratio\", required_argument, NULL, OPT_BY_SZ_RATIO},\n\n    {\"count-min\", required_argument, NULL, OPT_COUNT_MIN},\n    {\"reverse\", no_argument, NULL, 'r'},\n\n    {\"next-maintenance\", optional_argument, NULL, SET_NEXT_MAINT},\n    {\"cancel-maintenance\", no_argument, NULL, CLEAR_NEXT_MAINT},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* output format option */\n    {\"csv\", no_argument, NULL, 'c'},\n    {\"no-header\", no_argument, NULL, 'q'},\n\n    /* verbosity level */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n    {\"force-no-acct\", no_argument, NULL, 'F'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"aiDe:u:g:d:s:rU:P:C:Rf:cql:hVFSJo:O:p:\"\n\nstatic const char *cmd_help = _B \"Usage:\" B_ \" %s [options]\\n\";\n\nstatic const char *stats_help =\n    _B \"Available stats:\" B_ \"\\n\"\n    \"    \" _B \"--activity\" B_ \", \" _B \"-a\" B_ \"\\n\"\n    \"        Display stats about daemon activity.\\n\"\n    \"    \" _B \"--fs-info\" B_ \", \" _B \"-i\" B_ \"\\n\"\n    \"        Display statistics about filesystem contents.\\n\"\n    \"    \" _B \"--class-info\" B_ \"[=\" _U \"class_expr\" U_ \"]\\n\"\n    \"        Display Fileclasses summary. Use optional parameter \" _U \"class_expr\" U_ \"\\n\"\n    \"        for retrieving stats about matching fileclasses.\\n\"\n    \"    \" _B \"--status-info\" B_ \" \" _U \"status_name\" U_ \"[:\" _U \"status_value\" U_ \"]\\n\"\n    \"        Display status summary for the given policy or status name.\\n\"\n    \"        Optionally filter on \" _U \"status_value\" U_ \".\\n\"\n    \"    \" _B \"--entry-info\" B_ \" \" _U \"path\" U_ \"|\" _U \"id\" U_ \", \"\n           _B \"-e\" B_ \" \" _U \"path\" U_ \"|\" _U \"id\" U_ \"\\n\"\n    \"        Display all information about the given entry.\\n\"\n    \"    \" _B \"--user-info\" B_ \"[=\" _U \"username\" U_ \"], \" _B \"-u\" B_ \" \" _U \"username\" U_ \"\\n\"\n    \"        Display user statistics. Use optional parameter \" _U \"username\" U_\n            \" for retrieving stats about a single user.\\n\"\n    \"    \" _B \"--group-info\" B_ \"[=\" _U \"groupname\" U_ \"], \" _B \"-g\" B_ \" \" _U \"groupname\" U_ \"\\n\"\n    \"        Display group statistics. Use optional parameter \" _U\n    \"groupname\" U_ \" for retrieving stats about a single group.\\n\"\n    \"    \" _B \"--project-info\" B_ \"[=\" _U \"projid\" U_ \"]\\n\"\n    \"        Display project statistics. Use optional parameter \" _U \"projid\" U_\n            \" for retrieving stats about a single project.\\n\"\n    \"    \" _B \"--top-dirs\" B_ \"[=\" _U \"cnt\" U_ \"], \" _B \"-d\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display largest directories. Optional argument indicates the number of directories to be returned (default: 20).\\n\"\n    \"    \" _B \"--top-size\" B_ \"[=\" _U \"cnt\" U_ \"], \" _B \"-s\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display largest files. Optional argument indicates the number of files to be returned (default: 20).\\n\"\n    \"    \" _B \"--top-users\" B_ \"[=\" _U \"cnt\" U_ \"], \" _B \"-U\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display top disk space consumers. Optional argument indicates the number of users to be returned (default: 20).\\n\"\n    \"    \" _B \"--oldest-files\" B_ \"[=\" _U \"cnt\" U_ \"], \" _B \"-o\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display oldest files in the filesystem (ordered by access time).\\n\"\n    \"        Optional argument indicates the number of entries to be displayed (default: 20).\\n\"\n    \"        Tip: use '--reverse' option to display newest files.\\n\"\n    \"    \" _B \"--oldest-empty-dirs\" B_ \"[=\" _U \"cnt\" U_ \"], \" _B \"-O\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display oldest empty directories in the filesystem (ordered by modification time).\\n\"\n    \"        Optional argument indicates the number of dirs to be returned (default: 20).\\n\"\n    \"    \" _B \"--deferred-rm\" B_ \", \" _B \"-R\" B_ \"\\n\"\n    \"        Display files to be removed from HSM.\\n\"\n    \"    \" _B \"--dump\" B_ \", \" _B \"-D\" B_ \"\\n\"\n    \"        Dump all filesystem entries.\\n\"\n    \"    \" _B \"--dump-user\" B_ \" \" _U \"username\" U_ \"\\n\"\n    \"        Dump all entries for the given user.\\n\"\n    \"    \" _B \"--dump-group\" B_ \" \" _U \"groupname\" U_ \"\\n\"\n    \"        Dump all entries for the given group.\\n\"\n#ifdef _LUSTRE\n    \"    \" _B \"--dump-ost\" B_ \" \" _U \"ost_index\" U_ \"|\" _U \"ost_set\" U_ \"\\n\"\n    \"        Dump all entries on the given OST or set of OSTs (e.g. 3,5-8).\\n\"\n#endif\n    \"    \" _B \"--dump-status\" B_ \" \" _U \"status_name\" U_ \":\" _U \"status_value\" U_ \"\\n\"\n    \"        Dump all entries with the given status (e.g. lhsm_status:released).\\n\";\n\nstatic const char *maintenance_help =\n    _B \"Maintenance scheduling:\" B_ \"\\n\"\n    \"    \" _B \"--next-maintenance[=\" B_ _U \"date_time\" U_ \"]\\n\"\n    \"        Set/display time of the next maintenance.\\n\"\n    \"        Expected \" _U \"date_time\" U_ \" format is yyyymmddHHMM[SS].\\n\"\n    \"    \" _B \"--cancel-maintenance\" B_ \"\\n\"\n    \"        Cancel the next scheduled maintenance.\\n\";\n\nstatic const char *filter_help =\n    _B \"Filter options:\" B_ \"\\n\"\n    \"    The following filters can be specified for reports:\\n\"\n    \"    \" _B \"-P\" B_ \" \" _U \"path\" U_ \", \" _B \"--filter-path\" B_ \" \" _U \"path\" U_ \"\\n\"\n    \"        Display the report only for objects in the given path.\\n\"\n    \"    \" _B \"-C\" B_ \" \" _U \"class_expr\" U_ \", \"\n           _B \"--filter-class\" B_ \" \" _U \"class_expr\" U_ \"\\n\"\n    \"        Only report entries in the matching fileclasses.\\n\"\n    \"    \" _B \"-p\" B_ \" \" _U \"projid\" U_ \", \"\n           _B \"--filter-project\" B_ \" \" _U \"projid\" U_ \"\\n\"\n    \"        Only report entries with the given project_id.\\n\"\n    \"    \" _B \"--count-min\" B_ \" \" _U \"cnt\" U_ \"\\n\"\n    \"        Display only topuser/userinfo with at least \" _U \"cnt\" U_ \" entries\\n\";\n\nstatic const char *acct_help =\n    _B \"Accounting report options:\" B_ \"\\n\"\n    \"    \" _B \"--size-profile\" B_ \", \" _B \"--szprof\" B_ \"\\n\"\n    \"        Display size profile statistics\\n\"\n    \"    \" _B \"--by-count\" B_ \"\\n\"\n    \"        Sort by count\\n\"\n    \"    \" _B \"--by-avgsize\" B_ \"\\n\"\n    \"        Sort by average file size\\n\"\n    \"    \" _B \"--by-size-ratio\" B_ \" \" _U \"range\" U_ \", \" _B \"--by-szratio\" B_ \" \" _U \"range\" U_ \"\\n\"\n    \"        Sort on the ratio of files in the given size-range\\n\"\n    \"        \" _U \"range\" U_ \": <val><sep><val>- or <val><sep><val-1> or <val><sep>inf\\n\"\n    \"           <val>: 0, 1, 32, 1K 32K, 1M, 32M, 1G, 32G, 1T\\n\"\n    \"           <sep>: ~ or ..\\n\"\n    \"           e.g: 1G..inf, 1..1K-, 0..31M\\n\"\n    /* expected format:\n       0\n       <start_val><sep>[<end_val>]\n       <start_val>: 0, 1, 32, 1K, 32K, 1M, 32M, 1G, 32G, 1T\n       <sep>: ~ or ..\n       <end_val>: <start_val>- (e.g. \"1K-\") or <start_val - 1> (e.g. 31K)\n       if no end_val is specified, the range has no upper limit\n\n       examples:\n       1G- => 1GB to infinite\n       1K..1G- => 1K to 1GB-1\n       1K..1023M => 1K to 1GB-1\n     */\n    \"    \" _B \"--reverse\" B_ \"\\n\"\n    \"        Reverse sort order\\n\"\n    \"    \" _B \"-S\" B_ \", \" _B \"--split-user-groups\" B_ \"\\n\"\n    \"        Display the report by user AND group\\n\"\n    \"    \" _B \"-J\" B_ \", \" _B \"--split-user-projects\" B_ \"\\n\"\n    \"        Display the report by user AND projid\\n\"\n    \"    \" _B \"-F\" B_ \", \" _B \"--force-no-acct\" B_ \"\\n\"\n    \"        Generate the report without using accounting table (slower)\\n\";\n\nstatic const char *cfg_help =\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"cfg_file\" U_ \", \" _B \"--config-file=\" B_ _U \"cfg_file\" U_ \"\\n\"\n    \"        Path to configuration file (or short name).\\n\";\n\nstatic const char *output_help =\n    _B \"Output format options:\" B_ \"\\n\"\n    \"    \" _B \"-c\" B_ \" , \" _B \"--csv\" B_ \"\\n\"\n    \"        Output stats in a csv-like format for parsing\\n\"\n    \"    \" _B \"-q\" B_ \" , \" _B \"--no-header\" B_ \"\\n\"\n    \"        Don't display column headers/footers\\n\";\n\nstatic const char *misc_help =\n    _B \"Miscellaneous options:\" B_ \"\\n\"\n    \"    \" _B \"-l\" B_ \" \" _U \"loglevel\" U_ \", \" _B \"--log-level=\" B_ _U \"loglevel\" U_ \"\\n\"\n    \"        Force the log verbosity level (overides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\"\n    \"    \" _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(cmd_help, bin_name);\n    printf(\"\\n\");\n    printf(\"%s\\n\", stats_help);\n    printf(\"%s\\n\", maintenance_help);\n    printf(\"%s\\n\", filter_help);\n    printf(\"%s\\n\", acct_help);\n    printf(\"%s\\n\", cfg_help);\n    printf(\"%s\\n\", output_help);\n    printf(\"%s\", misc_help);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" reporting tool\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\nstatic lmgr_t lmgr;\n\n/* global filter variables */\nchar path_filter[RBH_PATH_MAX] = \"\";\nchar class_filter[1024] = \"\";\nint  projid = -1;\nunsigned int count_min = 0;\n\n/**\n * @param exact exact range value expected\n * @return index of the range it matches\n * @retval -1 on error\n */\nstatic int szrange_val2index(uint64_t val, bool exact)\n{\n    int i;\n    if (exact) {    /* search exact value */\n        for (i = 0; i < SZ_PROFIL_COUNT; i++)\n            if (val == SZ_MIN_BY_INDEX(i))\n                return i;\n    } else {    /* search val-1:  eg 1023M for 1G-, 31M for 32M- */\n\n        i = 0;\n        while (val > SZ_MIN_BY_INDEX(i)) {\n            if (i < SZ_PROFIL_COUNT - 1) {\n                if (val < SZ_MIN_BY_INDEX(i + 1)) {\n                    return i;\n                }\n            } else {\n                /* matches the last */\n                return SZ_PROFIL_COUNT - 1;\n            }\n            i++;\n        }\n    }\n    /* not found */\n    return -1;\n}\n\n#define EXPECTED_SZ_RANGES \"0, 1, 32, 1K, 32K, 1M, 32M, 1G, 32G, 1T\"\nstatic int parse_size_range(const char *str, profile_field_descr_t *p_profile)\n{\n    char argcp[1024];\n    char *beg = NULL;\n    char *end = NULL;\n    char *sep = NULL;\n    uint64_t sz1;\n    uint64_t sz2;\n\n    /* expected format:\n       0\n       <start_val><sep>[<end_val>]\n       <start_val>: 0, 1, 32, 1K, 32K, 1M, 32M, 1G, 32G, 1T\n       <sep>: ~ or ..\n       <end_val>: <start_val>- (e.g. \"1K-\") or <start_val - 1> (e.g. 31K)\n       if no end_val is specified, the range has no upper limit\n\n       examples:\n       1G- => 1GB to infinite\n       1K..1G- => 1K to 1GB-1\n       1K..1023M => 1K to 1GB-1\n     */\n    strcpy(argcp, str);\n    /* is there a separator? */\n    if ((sep = strchr(argcp, '~'))) {\n        *sep = '\\0';\n        beg = argcp;\n        end = sep + 1;\n    } else if ((sep = strstr(argcp, \"..\"))) {\n        *sep = '\\0';\n        beg = argcp;\n        end = sep + 2;\n    } else {    /* single value? */\n\n        beg = argcp;\n        end = NULL;\n    }\n\n    /* parse first value */\n    sz1 = str2size(beg);\n    if (sz1 == (uint64_t)-1LL) {\n        fprintf(stderr, \"Invalid argument: '%s' is not a valid size format\\n\",\n                beg);\n        return -EINVAL;\n    }\n    if (end == NULL || !strcmp(end, \"0\")) {\n        /* size value range: only 0 allowed */\n        if (sz1 != 0LL) {\n            fprintf(stderr,\n                    \"Only 0 is allowed for single value range (%s not allowed)\\n\",\n                    beg);\n            return -EINVAL;\n        }\n        p_profile->range_ratio_start = 0;\n        p_profile->range_ratio_len = 1;\n        /* sort order is determined later */\n        return 0;\n    }\n\n    p_profile->range_ratio_start = szrange_val2index(sz1, true);\n    if (p_profile->range_ratio_start == (unsigned int)-1) {\n        fprintf(stderr,\n                \"Invalid argument: %s is not a valid range start. Allowed values: \"\n                EXPECTED_SZ_RANGES \"\\n\", beg);\n        return -EINVAL;\n    }\n\n    /* to the infinite ? */\n    if (end[0] == '\\0' || !strcasecmp(end, \"inf\")) {\n        if (p_profile->range_ratio_start >= SZ_PROFIL_COUNT) {\n            fprintf(stderr, \"Error: range end < range start\\n\");\n            return -EINVAL;\n        }\n        p_profile->range_ratio_len =\n            SZ_PROFIL_COUNT - p_profile->range_ratio_start;\n        return 0;\n    }\n\n    /* is second value ends with a '-' ? */\n    if (end[strlen(end) - 1] == '-') {\n        int end_idx;\n        /* exact match */\n        end[strlen(end) - 1] = '\\0';\n        sz2 = str2size(end);\n        if (sz2 == (uint64_t)-1LL) {\n            fprintf(stderr,\n                    \"Invalid argument: '%s' is not a valid size format\\n\", end);\n            return -EINVAL;\n        }\n        end_idx = szrange_val2index(sz2, true); /* actually the upper index */\n        if (end_idx <= 0) {\n            fprintf(stderr,\n                    \"Invalid argument: %s is not a valid range end. Allowed values: \"\n                    EXPECTED_SZ_RANGES \"\\n\", end);\n            return -EINVAL;\n        }\n        if (p_profile->range_ratio_start >= end_idx) {\n            fprintf(stderr, \"Error: range end < range start\\n\");\n            return -EINVAL;\n        }\n        p_profile->range_ratio_len = end_idx - p_profile->range_ratio_start;\n        return 0;\n    } else {\n        int end_idx;\n        sz2 = str2size(end);\n        if (sz2 == (uint64_t)-1LL) {\n            fprintf(stderr,\n                    \"Invalid argument: '%s' is not a valid size format\\n\", end);\n            return -EINVAL;\n        }\n        end_idx = szrange_val2index(sz2, false);\n        if (end_idx < 0) {\n            fprintf(stderr,\n                    \"Invalid argument: %s is not a valid range end: terminate it with '-'\\n\",\n                    end);\n            return -EINVAL;\n        }\n        if (p_profile->range_ratio_start > end_idx) {\n            fprintf(stderr, \"Error: range end < range start\\n\");\n            return -EINVAL;\n        }\n        p_profile->range_ratio_len = end_idx - p_profile->range_ratio_start + 1;\n        return 0;\n    }\n\n    return -1;\n}\n\n/**\n *  Read variable from DB and allocate value.\n **/\nstatic int getvar_helper(lmgr_t *p_mgr, const char *varname, char *value,\n                         int size)\n{\n    int rc;\n\n    rc = ListMgr_GetVar(&lmgr, varname, value, size);\n    if (rc == DB_SUCCESS)\n        return 0;\n    else if (rc == DB_NOT_EXISTS) {\n        strcpy(value, \"unknown\");\n        DisplayLog(LVL_VERB, REPORT_TAG, \"WARNING variable %s not in database\",\n                   varname);\n        return rc;\n    } else {\n        strcpy(value, \"error\");\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR %d retrieving variable %s from database\", rc,\n                   varname);\n        return rc;\n    }\n}\n\nstatic void display_policy_stats(const char *name, int flags)\n{\n    char var_name[POLICY_NAME_LEN + 128];\n    char buff[1024];\n    char date[128];\n    char date2[128];\n    time_t ts1, ts2;\n    struct tm t;\n\n    if (!CSV(flags))\n        printf(\"\\nPolicy '%s':\\n\", name);\n\n    /* stats about current policy run (in any) */\n    snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_START_SUFFIX, name);\n    if (getvar_helper(&lmgr, var_name, buff, sizeof(buff)) != 0) {\n        if (!CSV(flags))\n            printf(\"    No current run\\n\");\n        else\n            printf(\"%s, not running\\n\", name);\n    } else if ((ts1 = str2int(buff)) > 0) {\n        snprintf(var_name, sizeof(var_name), \"%s\" CURR_POLICY_TRIGGER_SUFFIX,\n                 name);\n        getvar_helper(&lmgr, var_name, buff, sizeof(buff));\n        strftime(date, sizeof(date), \"%Y/%m/%d %T\", localtime_r(&ts1, &t));\n\n        if (!CSV(flags)) {\n            printf(\"    Current run started on %s: %s\\n\", date, buff);\n        } else {\n            printf(\"%s, running\\n\", name);\n            printf(\"%s_current_run_start, %s\\n\", name, date);\n            printf(\"%s_current_run_trigger, %s\\n\", name, buff);\n        }\n    }\n\n    /* stats about previous policy run (in any) */\n    snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_START_SUFFIX, name);\n    if (getvar_helper(&lmgr, var_name, buff, sizeof(buff)) == 0\n        && ((ts1 = str2int(buff)) > 0)) {\n        time_t dur;\n        char buff2[1024];\n\n        strftime(date, sizeof(date), \"%Y/%m/%d %T\", localtime_r(&ts1, &t));\n\n        snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_TRIGGER_SUFFIX,\n                 name);\n        getvar_helper(&lmgr, var_name, buff, sizeof(buff));\n\n        snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_END_SUFFIX, name);\n        if (getvar_helper(&lmgr, var_name, buff2, sizeof(buff2)) == 0\n            && ((ts2 = str2int(buff2)) > 0)) {\n            strftime(date2, sizeof(date2), \"%Y/%m/%d %T\",\n                     localtime_r(&ts2, &t));\n            dur = ts2 - ts1;\n        } else {\n            strncpy(date2, \"unknown\", sizeof(date2));\n            dur = -1;\n        }\n\n        snprintf(var_name, sizeof(var_name), \"%s\" LAST_POLICY_STATUS_SUFFIX,\n                 name);\n        getvar_helper(&lmgr, var_name, buff2, sizeof(buff2));\n\n        if (!CSV(flags)) {\n            printf(\"    Last complete run: %s\\n\", buff);\n            printf(\"        - Started on %s\\n\", date);\n            if (dur != -1) {\n                FormatDuration(buff, sizeof(buff), dur);\n                printf(\"        - Finished on %s (duration: %s)\\n\", date2,\n                       buff);\n                printf(\"        - Summary: %s\\n\", buff2);\n            }\n        } else {\n            printf(\"%s_last_run_start, %s\\n\", name, date);\n            printf(\"%s_last_run_end, %s\\n\", name, date2);\n            printf(\"%s_last_run_trigger, %s\\n\", name, buff);\n            printf(\"%s_last_run_summary, %s\\n\", name, buff2);\n        }\n    }\n}\n\n#ifdef HAVE_CHANGELOGS\nstatic int64_t read_int64_helper(const char *str)\n{\n    int64_t tmp = str2bigint(str);\n\n    if (tmp == -1LL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"Warning: invalid int64 value from DB: '%s'\", str);\n        return 0;\n    }\n    return tmp;\n}\n\nstatic void str2timeval(struct timeval *tv, char *str)\n{\n    char *save = NULL;\n\n    tv->tv_sec = tv->tv_usec = 0;\n\n    str = strtok_r(str, \".\", &save);\n    if (!str)\n        return;\n    tv->tv_sec = read_int64_helper(str);\n\n    str = strtok_r(NULL, \".\", &save);\n    if (!str)\n        return;\n    tv->tv_usec = read_int64_helper(str);\n}\n\nstatic void str2rec_info(char *str, uint64_t *rec_id, struct timeval *rec_time,\n                          struct timeval *step_time)\n{\n    char *save = NULL;\n    *rec_id = 0;\n    rec_time->tv_sec = rec_time->tv_usec = 0;\n    step_time->tv_sec = step_time->tv_usec = 0;\n\n    /* format is rec_id:rec_time:step_time */\n    str = strtok_r(str, \":\", &save);\n    if (!str)\n        return;\n    *rec_id = read_int64_helper(str);\n\n    str = strtok_r(NULL, \":\", &save);\n    if (!str)\n        return;\n    str2timeval(rec_time, str);\n\n    str = strtok_r(NULL, \":\", &save);\n    if (!str)\n        return;\n    str2timeval(step_time, str);\n}\n\n/** Display stats for a given changelog reader and processing step */\nstatic int display_rec_stats(lmgr_t *lmgr, const char *name, const char *prefix,\n                             int index, int flags)\n{\n    char *varname = NULL;\n    char value[MAX_VAR_LEN];\n    int rc;\n    uint64_t rec_id = 0;\n    struct timeval tv_rec = {0};\n    struct timeval tv_step = {0};\n\n    if (asprintf(&varname, \"%s_MDT%04X\", prefix, index) == -1\n                 || varname == NULL)\n        return -ENOMEM;\n\n    rc = ListMgr_GetVar(lmgr, varname, value, sizeof(value));\n    free(varname);\n    if (rc == DB_NOT_EXISTS)\n        return -ENOENT;\n    else if (rc != DB_SUCCESS)\n        return -EIO;\n\n    str2rec_info(value, &rec_id, &tv_rec, &tv_step);\n\n    if (CSV(flags)) {\n        printf(\"MDT%04X, %s, rec_id=%\"PRIu64\", rec_time=%lu.%06lu, \"\n               \"step_time=%lu.%06lu\\n\", index, name, rec_id, tv_rec.tv_sec,\n               tv_rec.tv_usec, tv_step.tv_sec, tv_step.tv_usec);\n    } else {\n        struct tm t;\n        char t1[128];\n        char t2[128];\n\n        if (!NOHEADER(flags))\n            printf(\"Changelog stats for MDT%04X:\\n\", index);\n\n        strftime(t1, sizeof(t1), \"%Y/%m/%d %T\",\n                 localtime_r(&tv_rec.tv_sec, &t));\n        strftime(t2, sizeof(t2), \"%Y/%m/%d %T\",\n                 localtime_r(&tv_step.tv_sec, &t));\n\n        printf(\"    %s record: rec_id=%\"PRIu64\", rec_time=%s.%06lu, \"\n               \"step_time=%s.%06lu\\n\", name, rec_id, t1, tv_rec.tv_usec, t2,\n               tv_step.tv_usec);\n    }\n\n    return 0;\n}\n\n/** Display stats per changelog type for given MDT */\nstatic void display_cl_type_stats(lmgr_t *lmgr, int index, int flags)\n{\n    char *varname = NULL;\n    char value[1024];\n    time_t interval;\n    int i;\n\n    /* changelog stats */\n    if (asprintf(&varname, \"%s_MDT%04X\", CL_DIFF_INTERVAL, index) == -1\n        || varname == NULL)\n        return;\n\n    if (!NOHEADER(flags)) {\n        if (!CSV(flags)) {\n            printf(\"    Changelog stats per type (MDT%04X):\\n\", index);\n            printf(\"        %5s  %15s \\t(%s)\\t(%s)\\n\", \"type\", \"total\", \"diff\",\n                   \"rate\");\n        } else\n            printf(\"%7s, %11s, %12s, %8s, %s\\n\",\n                   \"mdt\", \"record_type\", \"total\", \"diff\", \"rate (ops/sec)\");\n    }\n\n    /* get diff interval */\n    if (ListMgr_GetVar(lmgr, varname, value, sizeof(value)) != DB_SUCCESS)\n        interval = 0;\n    else\n        interval = str2int(value);\n    free(varname);\n\n    for (i = 0; i < CL_LAST; i++) {\n        char *varname2;\n        char diff_str[256];\n        unsigned long long diff;\n        double rate;\n        int rc;\n\n        if (asprintf(&varname, \"%s_MDT%04X_%s\", CL_COUNT_PREFIX, index,\n                     changelog_type2str(i)) == -1 || varname == NULL)\n            continue;\n\n        if (asprintf(&varname2, \"%s_MDT%04X_%s\", CL_DIFF_PREFIX, index,\n                     changelog_type2str(i)) == -1 || varname2 == NULL) {\n            free(varname);\n            continue;\n        }\n\n        rc = ListMgr_GetVar(lmgr, varname, value, sizeof(value));\n        if (rc == DB_NOT_EXISTS)\n            strcpy(value, \"0\");\n        else if (rc != 0)\n            strcpy(value, \"db_error\");\n\n        if ((interval > 0)\n            && (ListMgr_GetVar(lmgr, varname2, diff_str, sizeof(value)) ==\n                DB_SUCCESS)) {\n            diff = str2bigint(diff_str);\n            rate = (0.0 + diff) / (0.0 + interval);\n        } else {\n            diff = 0;\n            rate = 0.0;\n        }\n\n        if (CSV(flags))\n            printf(\"MDT%04X, %11s, %12s, %8llu, %8.2f\\n\", index,\n                   changelog_type2str(i), value, diff, rate);\n        else if (diff != 0)\n            printf(\"        %5s: %15s \\t(+%llu)\\t(%.2f/sec)\\n\",\n                   changelog_type2str(i), value, diff, rate);\n        else if (read_int64_helper(value) > 0)\n            printf(\"        %5s: %15s\\n\", changelog_type2str(i), value);\n\n    }\n\n    if (!CSV(flags))\n        printf(\"\\n\");\n}\n\nstatic int display_changelog_stats(lmgr_t *lmgr, int index, int flags)\n{\n    int rc;\n\n    rc = display_rec_stats(lmgr, \"last_read\", CL_LAST_READ_REC, index, flags);\n    if (rc)\n        return rc;\n    rc = display_rec_stats(lmgr, \"last_pushed\", CL_LAST_PUSHED_REC, index,\n                           flags | OPT_FLAG_NOHEADER);\n    if (rc)\n        return rc;\n    rc = display_rec_stats(lmgr, \"last_committed\", CL_LAST_COMMITTED_REC, index,\n                           flags | OPT_FLAG_NOHEADER);\n    if (rc)\n        return rc;\n    rc = display_rec_stats(lmgr, \"last_cleared\", CL_LAST_CLEARED_REC, index,\n                           flags | OPT_FLAG_NOHEADER);\n\n    display_cl_type_stats(lmgr, index, flags);\n\n    return rc;\n}\n#endif /* HAVE_CHANGELOGS */\n\nstatic void report_activity(int flags)\n{\n    char value[1024];\n    time_t timestamp;\n    time_t timestamp2;\n    char date[128];\n    struct tm t;\n    int rc;\n    char scan_status[128];\n    int nb_threads;\n    int i;\n\n    if (!CSV(flags))\n        printf(\"\\nFilesystem scan activity:\\n\\n\");\n\n    /* Previous FS scan */\n\n    if (getvar_helper(&lmgr, PREV_SCAN_START_TIME, value, sizeof(value)) == 0) {\n        timestamp = str2int(value);\n        if (timestamp >= 0) {\n            strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&timestamp, &t));\n            if (!CSV(flags)) {\n                printf(\"    Previous filesystem scan:\\n\");\n                printf(\"            start:           %s\\n\", date);\n            } else\n                printf(\"previous_scan_start, %s\\n\", date);\n\n            if (getvar_helper(&lmgr, PREV_SCAN_END_TIME, value, sizeof(value))\n                == 0) {\n                timestamp2 = str2int(value);\n                if (timestamp2 >= timestamp) {\n                    int dur = (int)difftime(timestamp2, timestamp);\n                    if (!CSV(flags)) {\n                        FormatDuration(value, 1024, dur);\n                        printf(\"            duration:        %s\\n\\n\", value);\n                    } else\n                        printf(\"previous_scan_duration, %i sec\\n\", dur);\n                }\n            }\n        }\n    }\n\n    /* Last FS scan */\n\n    // status\n    rc = getvar_helper(&lmgr, LAST_SCAN_STATUS, scan_status,\n                       sizeof(scan_status));\n\n    if (rc == 0) {\n        if (!CSV(flags)) {\n            printf(\"    Last filesystem scan:\\n\");\n            printf(\"            status:          %s\\n\", scan_status);\n        } else\n            printf(\"last_scan_status, %s\\n\", scan_status);\n    } else if (rc == DB_NOT_EXISTS) {\n        if (CSV(flags))\n            printf(\"last_scan_status, no scan done\\n\");\n        else\n            printf(\"    Filesystem has never been scanned\\n\");\n    }\n    // start\n    if (getvar_helper(&lmgr, LAST_SCAN_START_TIME, value, sizeof(value)) == 0)\n        timestamp = str2int(value);\n    else\n        timestamp = -1;\n\n    if (timestamp > 0) {\n        strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&timestamp, &t));\n        if (CSV(flags))\n            printf(\"last_scan_start, %s\\n\", date);\n        else {\n            int ago = difftime(time(NULL), timestamp);\n            if (!strcmp(scan_status, SCAN_STATUS_RUNNING)) {\n                FormatDuration(value, 1024, ago);\n                printf(\"            start:           %s (%s ago)\\n\", date,\n                       value);\n            } else\n                printf(\"            start:           %s\\n\", date);\n        }\n    }\n    // last action\n    if (!strcmp(scan_status, SCAN_STATUS_RUNNING) &&\n        getvar_helper(&lmgr, LAST_SCAN_LAST_ACTION_TIME, value,\n                      sizeof(value)) == 0) {\n        timestamp2 = str2int(value);\n        if (timestamp2 > 0) {\n            strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&timestamp2, &t));\n            if (CSV(flags))\n                printf(\"last_action_time, %s\\n\", date);\n            else {\n                int ago = difftime(time(NULL), timestamp2);\n                if (!strcmp(scan_status, SCAN_STATUS_RUNNING)) {\n                    FormatDuration(value, 1024, ago);\n                    printf(\"            last action:     %s (%s ago)\\n\", date,\n                           value);\n                } else\n                    printf(\"            last action:     %s\\n\", date);\n            }\n        }\n    }\n    // end\n    if (getvar_helper(&lmgr, LAST_SCAN_END_TIME, value, sizeof(value)) == 0) {\n        timestamp2 = str2int(value);\n        if (timestamp2 >= timestamp) {\n            strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&timestamp2, &t));\n            if (CSV(flags))\n                printf(\"last_scan_end, %s\\n\", date);\n            else\n                printf(\"            end:             %s\\n\", date);\n\n            // duration\n            if (timestamp > 0) {\n                int dur = (int)difftime(timestamp2, timestamp);\n                if (CSV(flags))\n                    printf(\"last_scan_duration, %i sec\\n\", dur);\n                else {\n                    FormatDuration(value, 1024, dur);\n                    printf(\"            duration:        %s\\n\", value);\n                }\n            }\n        }\n    }\n\n    rc = getvar_helper(&lmgr, LAST_SCAN_ENTRIES_SCANNED, value, sizeof(value));\n    if (rc == 0) {\n        // entries scanned\n        if (!CSV(flags)) {\n            printf(\"\\n\");\n            printf(\"         Statistics:\\n\");\n        }\n        if (CSV(flags))\n            printf(\"entries_scanned, %s\\n\", value);\n        else\n            printf(\"            entries scanned: %s\\n\", value);\n\n        // errors\n        getvar_helper(&lmgr, LAST_SCAN_ERRORS, value, sizeof(value));\n        if (CSV(flags))\n            printf(\"scan_errors, %s\\n\", value);\n        else if (strcmp(value, \"0\"))    /* don't display 0 */\n            printf(\"            errors:          %s\\n\", value);\n\n        // timeouts\n        getvar_helper(&lmgr, LAST_SCAN_TIMEOUTS, value, sizeof(value));\n        if (CSV(flags))\n            printf(\"scan_timeouts, %s\\n\", value);\n        else if (strcmp(value, \"0\"))    /* don't display 0 */\n            printf(\"            timeouts:        %s\\n\", value);\n\n        // nb threads\n        getvar_helper(&lmgr, LAST_SCAN_NB_THREADS, value, sizeof(value));\n        nb_threads = atoi(value);\n        if (CSV(flags))\n            printf(\"scan_nb_threads, %i\\n\", nb_threads);\n        else\n            printf(\"            # threads:       %i\\n\", nb_threads);\n\n        // average speed\n        getvar_helper(&lmgr, LAST_SCAN_AVGMSPE, value, sizeof(value));\n        double speed = 0.0;\n        double avgmspe = atof(value);\n        if (avgmspe > 0)\n            speed = (1000.0 / avgmspe) * nb_threads;\n        if (CSV(flags))\n            printf(\"scan_average_speed, %.2f entries/sec\\n\", speed);\n        else\n            printf(\"            average speed:   %.2f entries/sec\\n\", speed);\n\n        // current speed\n        if (!strcmp(scan_status, SCAN_STATUS_RUNNING)) {\n            getvar_helper(&lmgr, LAST_SCAN_CURMSPE, value, sizeof(value));\n            double speed = 0.0;\n            double curmspe = atof(value);\n            if (curmspe > 0.0)\n                speed = (1000.0 / curmspe) * nb_threads;\n            if (CSV(flags))\n                printf(\"scan_current_speed, %.2f\\n\", speed);\n            else\n                printf(\"        >>> current speed:   %.2f entries/sec\\n\",\n                       speed);\n        }\n    }\n\n    if (!CSV(flags))\n        printf(\"\\n\");\n\n#ifdef HAVE_CHANGELOGS\n    /* read and display stats for MDT indexes from 0 and stop after N indexes\n     * are not found. This allows a certain number of gaps in MDT indexes.\n     */\n    int allowed_gaps = 16;\n\n    for (i = 0; allowed_gaps > 0; i++) {\n        rc = display_changelog_stats(&lmgr, i, flags);\n        if (rc == -ENOENT)\n            allowed_gaps--;\n        else if (rc != 0)\n            break;\n    }\n\n    if (!CSV(flags))\n        printf(\"\\n\");\n#endif\n\n    /* max usage */\n    rc = ListMgr_GetVar(&lmgr, USAGE_MAX_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS) {\n        if (CSV(flags))\n            printf(\"usage_max, %s\\n\", value);\n        else\n            printf(\"Storage unit usage max:   %s%%\\n\", value);\n    } else if (rc == DB_NOT_EXISTS) {\n        if (CSV(flags))\n            printf(\"usage_max, not checked\\n\");\n        else\n            printf(\"Storage usage has never been checked\\n\");\n    } else {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR retrieving variable \" USAGE_MAX_VAR \" from database\");\n    }\n\n    /* display stats for all policies defined in config file */\n    for (i = 0; i < policies.policy_count; i++) {\n        /* retrieve stats for policy */\n        display_policy_stats(policies.policy_list[i].name, flags);\n    }\n}\n\ntypedef enum { DUMP_ALL, DUMP_USR, DUMP_GROUP, DUMP_OST,\n        DUMP_STATUS } type_dump;\n\n/**\n * Build filter on path.\n * Initialize the filter if it is not already initialized.\n */\nstatic int append_path_filter(lmgr_filter_t *filter, bool *initialized)\n{\n    filter_value_t fv;\n    char path_regexp[RBH_PATH_MAX+10] = \"\";\n    char tmp[RBH_PATH_MAX] = \"\";\n    size_t len;\n    int rc;\n\n    if ((initialized != NULL) && !(*initialized)) {\n        lmgr_simple_filter_init(filter);\n        *initialized = true;\n    }\n\n    len = strlen(path_filter);\n    /* remove last slash */\n    if (path_filter[len - 1] == '/') {\n        path_filter[len - 1] = '\\0';\n        len--;\n    }\n\n    /* If the filter is root, skip filter creation. */\n    if (strcmp(path_filter, global_config.fs_path) == 0)\n        return 0;\n\n    /* If the length of the specified path is less than\n     * the length of the FS root, it has no chance to be\n     * in it.\n     * However we can be permissive if the path is an upper\n     * level directory e.g. list entries of \"/mnt/lustre\"\n     * if the option is -P \"/mnt\".\n     */\n    if (len < strlen(global_config.fs_path)) {\n        /* be permissive if the path is an upper level directory */\n        if (strncmp(path_filter, global_config.fs_path, len) == 0\n            && global_config.fs_path[len] == '/') {\n            fprintf(stderr, \"WARNING: '%s' is a top level directory of '%s'. \"\n                    \"Filter ignored.\\n\", path_filter, global_config.fs_path);\n            return 0;\n        } else {\n            fprintf(stderr, \"ERROR: '%s' is not under filesystem root '%s'.\\n\",\n                    path_filter, global_config.fs_path);\n            return -EINVAL;\n        }\n    }\n\n    /* Special characters in a POSIX extended regex: .[{}()\\*+?|^$\n     *\n     * Escape POSIX ERE special characters that have no meaning in a\n     * globbing pattern. */\n    /* Don't escape characters in the FS root as list manager does\n     * an exact check on it using strcmp, which is not regexp aware. */\n    rc = str_escape_charset(tmp, sizeof(tmp),\n                            path_filter + strlen(global_config.fs_path),\n                            \".^$+(){}\\\\|\");\n    if (rc < 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"Error %d: '%s' is too big to be properly escaped\",\n                   rc, path_filter);\n        return rc;\n    }\n\n    /* Translate those that have a different meaning */\n    str_subst(tmp, \"*\", \".*\");\n    str_subst(tmp, \"?\", \".\");\n\n    /* match 'path$' OR 'path/.*' */\n    rc = snprintf(path_regexp, sizeof(path_regexp), \"%s%s($|/.*)\",\n                  global_config.fs_path, tmp);\n    fv.value.val_str = path_regexp;\n    if (rc > sizeof(path_regexp)) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"Error: '%s%s($|/.*)' is toolong to fit into regexp\",\n                   global_config.fs_path, tmp);\n        return -ENAMETOOLONG;\n    }\n\n    return lmgr_simple_filter_add(filter, ATTR_INDEX_fullpath, RLIKE, fv, 0);\n}\n\n/**\n * Build filter on fileclass.\n * Initialize the filter if it is not already initialized.\n */\nstatic int append_class_filter(lmgr_filter_t *filter, bool *initialized)\n{\n    filter_value_t fv;\n\n    if ((initialized != NULL) && !(*initialized)) {\n        lmgr_simple_filter_init(filter);\n        *initialized = true;\n    }\n\n    fv.value.val_str = class_filter;\n\n    /* list manager as a specific fileclass management,\n     * as fileclass attr may be a list of fileclasses */\n    return lmgr_simple_filter_add(filter, ATTR_INDEX_fileclass, LIKE, fv, 0);\n}\n\n/**\n * add filter on project id\n */\nstatic int append_project_filter(lmgr_filter_t *filter, bool *initialized)\n{\n    filter_value_t fv;\n\n    if ((initialized != NULL) && !(*initialized)) {\n        lmgr_simple_filter_init(filter);\n        *initialized = true;\n    }\n\n    fv.value.val_uint = projid;\n\n    return lmgr_simple_filter_add(filter, ATTR_INDEX_projid, EQUAL, fv, 0);\n}\n\n\n/*\n * Append global filters on path, class...\n * \\param do_display [in] display filters?\n * \\param initialized [in/out] indicate if the filter is initialized.\n */\nstatic int mk_global_filters(lmgr_filter_t *filter, bool do_display,\n                             bool *initialized)\n{\n    int rc;\n\n    /* is a filter on path specified? */\n    if (!EMPTY_STRING(path_filter)) {\n        if (do_display)\n            printf(\"filter path: %s\\n\", path_filter);\n\n        rc = append_path_filter(filter, initialized);\n        if (rc)\n            return rc;\n    }\n\n    if (!EMPTY_STRING(class_filter)) {\n        if (do_display)\n            printf(\"filter class: %s\\n\", class_format(class_filter));\n\n        rc = append_class_filter(filter, initialized);\n        if (rc)\n            return rc;\n    }\n\n   if (projid != -1) {\n        if (do_display)\n            printf(\"filter projid: %u\\n\", projid);\n\n        rc = append_project_filter(filter, initialized);\n        if (rc)\n            return rc;\n    }\n\n    return 0;\n}\n\n/**\n * Manage fid2path resolution\n */\nstatic int TryId2path(lmgr_t *p_mgr, const entry_id_t *p_id, char *path)\n{\n    static int is_init = 0;\n    static int is_resolvable = 0;\n    int rc;\n    char value[1024];\n\n    if (!is_init) {\n        is_init = 1;\n        /* try to get fspath from DB */\n        rc = ListMgr_GetVar(&lmgr, FS_PATH_VAR, value, sizeof(value));\n        if (rc)\n            return -1;\n\n        if (InitFS() == 0)\n            is_resolvable = 1;\n        else\n            return -1;\n    }\n    if (!is_resolvable)\n        return -1;\n\n#ifdef _HAVE_FID\n    /* filesystem is mounted and fsname can be get: solve the fid */\n    rc = Lustre_GetFullPath(p_id, path, RBH_PATH_MAX);\n    return rc;\n#else\n    entry_id_t root_id;\n    if (Path2Id(global_config.fs_path, &root_id) == 0) {\n        if (entry_id_equal(p_id, &root_id)) {\n            strcpy(path, global_config.fs_path);\n            return 0;\n        }\n    }\n    return -1;\n#endif\n}\n\nstatic const char *ResolvName(const entry_id_t *p_id, attr_set_t *attrs,\n                              char *buff)\n{\n    if (ATTR_MASK_TEST(attrs, fullpath)) {\n        return ATTR(attrs, fullpath);\n    }\n    /* try to get dir path from fid if it's mounted */\n    else if (TryId2path(&lmgr, p_id, ATTR(attrs, fullpath)) == 0) {\n        struct stat st;\n        ATTR_MASK_SET(attrs, fullpath);\n\n        /* we're lucky, try lstat now! */\n        if (lstat(ATTR(attrs, fullpath), &st) == 0)\n            stat2rbh_attrs(&st, attrs, true);\n        return ATTR(attrs, fullpath);\n    }\n    /* if parent id and name are set: try to resolve parent */\n    else if (ATTR_MASK_TEST(attrs, parent_id) && ATTR_MASK_TEST(attrs, name)) {\n        char tmpstr[RBH_PATH_MAX];\n        if (TryId2path(&lmgr, &ATTR(attrs, parent_id), tmpstr) == 0) {\n            int rc;\n            rc = snprintf(ATTR(attrs, fullpath), RBH_PATH_MAX, \"%s/%s\",\n                          tmpstr, ATTR(attrs, name));\n            if (rc >= RBH_PATH_MAX) {\n                DisplayLog(LVL_EVENT, REPORT_TAG,\n                           \"ERROR: Entry '%s/%s' truncated\",\n                           tmpstr, ATTR(attrs, name));\n            }\n            return ATTR(attrs, fullpath);\n        } else {    /* print <parent_id>/name */\n\n            sprintf(buff, DFID \"/%s\", PFID(&ATTR(attrs, parent_id)),\n                    ATTR(attrs, name));\n            return buff;\n        }\n    } else {\n        /* last case: display the raw ID */\n        sprintf(buff, DFID, PFID(p_id));\n        return buff;\n    }\n}\n\nstatic void dump_entries(type_dump type, int int_arg, char *str_arg,\n                         value_list_t *ost_list, int flags)\n{\n    /* get basic information */\n    attr_mask_t mask_sav;\n    int rc;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    struct lmgr_iterator_t *it;\n    attr_set_t attrs;\n    entry_id_t id;\n    int custom_len = 0;\n\n    unsigned long long total_size, total_count;\n    total_size = total_count = 0;\n\n    /* list of attributes to be use for all dumps\n     * except ost dump and status dump */\n    static unsigned int list_std[] = {\n        ATTR_INDEX_type,\n        ATTR_INDEX_size,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_fileclass\n    };\n\n    static unsigned int list_status[] = {\n        ATTR_INDEX_type,\n        0,  /* to be set in the code */\n        ATTR_INDEX_size,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_fileclass,\n        ATTR_INDEX_fullpath,\n    };\n\n    /* list of attributes to be used for OST dumps */\n    static unsigned int list_stripe[] = {\n        ATTR_INDEX_type,\n        ATTR_INDEX_size,\n        ATTR_INDEX_fullpath,\n        ATTR_INDEX_stripe_info,\n        ATTR_INDEX_stripe_items\n    };\n    unsigned int *list = NULL;\n    bool list_allocated = false;\n    int list_cnt = 0;\n\n    if (type == DUMP_OST) {\n        list = list_stripe;\n        list_cnt = sizeof(list_stripe) / sizeof(int);\n    } else if (type == DUMP_STATUS) {\n        list = list_status;\n        list_cnt = sizeof(list_status) / sizeof(int);\n        list[1] = ATTR_INDEX_FLG_STATUS | int_arg;  /* status index */\n    } else {    /* std dump: display all status */\n\n        int i;\n\n        list_cnt = sizeof(list_std) / sizeof(int);\n        /* add all policy status (except for removed entries)\n         * + 1 for fullpath (always last) */\n        list = calloc(list_cnt + sm_inst_count + 1, sizeof(int));\n        if (list == NULL)\n            exit(ENOMEM);\n\n        list_allocated = true;\n\n        memcpy(list, list_std, sizeof(list_std));\n        for (i = 0; i < sm_inst_count; i++) {\n            list[list_cnt] = ATTR_INDEX_FLG_STATUS | i;\n            list_cnt++;\n        }\n        /* add fullpath */\n        list[list_cnt] = ATTR_INDEX_fullpath;\n        list_cnt++; /* +1 for fullpath */\n    }\n\n    lmgr_simple_filter_init(&filter);\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), NULL) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    /* what do we dump? */\n    switch (type) {\n    case DUMP_ALL:\n        /* no filter */\n        break;\n    case DUMP_USR:\n        if (set_uid_val(str_arg, &fv.value))\n            return;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_uid,\n                               WILDCARDS_IN(str_arg) ? LIKE : EQUAL, fv, 0);\n        break;\n    case DUMP_GROUP:\n        if (set_gid_val(str_arg, &fv.value))\n            return;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_gid,\n                               WILDCARDS_IN(str_arg) ? LIKE : EQUAL, fv, 0);\n        break;\n    case DUMP_OST:\n        if (ost_list->count == 1) {\n            fv.value.val_uint = ost_list->values[0].val_uint;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_stripe_items, EQUAL, fv,\n                                   0);\n        } else {\n            fv.list = *ost_list;\n            lmgr_simple_filter_add(&filter, ATTR_INDEX_stripe_items, IN, fv, 0);\n        }\n        break;\n\n    case DUMP_STATUS:\n        /* int arg: smi index */\n        /* str arg: status value */\n\n        fv.value.val_str = str_arg;\n        lmgr_simple_filter_add(&filter, int_arg | ATTR_INDEX_FLG_STATUS, EQUAL,\n                               fv,\n                               EMPTY_STRING(str_arg) ? FILTER_FLAG_ALLOW_NULL :\n                               0);\n        break;\n\n    default:\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: unexpected dump command\");\n        return;\n    }\n\n    /* attributes to be retrieved */\n    ATTR_MASK_INIT(&attrs);\n    mask_sav = attrs.attr_mask = list2mask(list, list_cnt);\n\n    it = ListMgr_Iterator(&lmgr, &filter, NULL, NULL);\n\n    lmgr_simple_filter_free(&filter);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not dump entries from database.\");\n        return;\n    }\n\n    if (!(NOHEADER(flags))) {\n        if (type != DUMP_OST)\n            print_attr_list(0, list, list_cnt, NULL, CSV(flags));\n        else {\n            char tmp[128];\n            if (ost_list->count == 1)\n                sprintf(tmp, \"data_on_ost%u\", ost_list->values[0].val_uint);\n            else\n                sprintf(tmp, \"data_on_ost[%s]\", str_arg);\n\n            custom_len = strlen(tmp);\n            /* if dump_ost is specified: add specific field\n             * to indicate if file really has data on the given OST\n             */\n            print_attr_list_custom(0, list, list_cnt, NULL, CSV(flags), tmp,\n                                   custom_len);\n        }\n    }\n\n    while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n        total_count++;\n        total_size += ATTR(&attrs, size);\n\n        if (type != DUMP_OST)\n            print_attr_values(0, list, list_cnt, &attrs, &id, CSV(flags), NULL);\n#ifdef _LUSTRE\n        else {\n            const char *has_data;\n\n            if (!ATTR_MASK_TEST(&attrs, size)\n                || !ATTR_MASK_TEST(&attrs, stripe_info)\n                || !ATTR_MASK_TEST(&attrs, stripe_items))\n                has_data = \"?\";\n            else {\n                int i;\n                has_data = \"no\";\n                for (i = 0; i < ost_list->count; i++) {\n                    if (DataOnOST\n                        (ATTR(&attrs, size), ost_list->values[i].val_uint,\n                         &ATTR(&attrs, stripe_info), &ATTR(&attrs,\n                                                           stripe_items))) {\n                        has_data = \"yes\";\n                        break;\n                    }\n                }\n            }\n\n            /* if dump_ost is specified: add specific field\n             * to indicate if file really has data on the given OST.\n             */\n            print_attr_values_custom(0, list, list_cnt, &attrs, &id,\n                                     CSV(flags), NULL, has_data, custom_len);\n        }\n#endif\n\n        ListMgr_FreeAttrs(&attrs);\n\n        /* prepare next call */\n        attrs.attr_mask = mask_sav;\n    }\n\n    ListMgr_CloseIterator(it);\n\n    if (list_allocated)\n        free(list);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n        FormatFileSize(strsz, 128, total_size);\n        printf(\"\\nTotal: %llu entries, %llu bytes (%s)\\n\",\n               total_count, total_size, strsz);\n    }\n}\n\nstatic void report_fs_info(int flags)\n{\n    unsigned int result_count;\n    struct lmgr_report_t *it;\n    int rc;\n    lmgr_filter_t filter;\n    bool is_filter = false;\n#define FSINFOCOUNT 7\n    db_value_t result[FSINFOCOUNT];\n    unsigned long long total_size, total_count, total_used;\n    /* To be retrieved:\n     * - type\n     * - number of items for this type\n     * - MIN/MAX/SUM size\n     * - MIN/MAX/SUM dircount\n     */\n    report_field_descr_t fs_info[FSINFOCOUNT] = {\n        {ATTR_INDEX_type, REPORT_GROUP_BY, SORT_ASC, false, 0, FV_NULL},\n        {ATTR_INDEX_COUNT, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_blocks, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MIN, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MAX, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_AVG, SORT_NONE, false, 0, FV_NULL},\n    };\n    lmgr_iter_opt_t opt;\n    profile_u prof;\n    bool display_header = !NOHEADER(flags);\n\n    total_size = total_count = total_used = 0;\n\n    if (REVERSE(flags))\n        fs_info[0].sort_flag = SORT_DESC;\n\n    if (count_min) {\n        fs_info[1].filter = true;\n        fs_info[1].filter_compar = MORETHAN;\n        fs_info[1].filter_value.value.val_biguint = count_min;\n    }\n\n    /* no limit */\n    opt.list_count_max = 0;\n    /* skip missing entries */\n    opt.allow_no_attr = 0;\n    opt.force_no_acct = FORCE_NO_ACCT(flags);\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    if (is_filter)\n        it = ListMgr_Report(&lmgr, fs_info, FSINFOCOUNT,\n                            SPROF(flags) ? &size_profile : NULL, &filter, &opt);\n    else\n        it = ListMgr_Report(&lmgr, fs_info, FSINFOCOUNT,\n                            SPROF(flags) ? &size_profile : NULL, NULL, &opt);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve filesystem stats from database.\");\n        goto free_filter;\n    }\n\n    result_count = FSINFOCOUNT;\n\n    while ((rc = ListMgr_GetNextReportItem(it, result, &result_count,\n                                           SPROF(flags) ? &prof : NULL))\n           == DB_SUCCESS) {\n        if (result[1].value_u.val_biguint == 0) /* count=0 (don't display) */\n            display_report(fs_info, FSINFOCOUNT, NULL, result_count,\n                           SPROF(flags) ? &size_profile : NULL,\n                           SPROF(flags) ? &prof : NULL, CSV(flags),\n                           display_header, 0);\n        else\n            display_report(fs_info, FSINFOCOUNT, result, result_count,\n                           SPROF(flags) ? &size_profile : NULL,\n                           SPROF(flags) ? &prof : NULL, CSV(flags),\n                           display_header, 0);\n        display_header = false; /* just display it once */\n\n        total_count += result[1].value_u.val_biguint;\n        total_size += result[2].value_u.val_biguint;\n        total_used += result[3].value_u.val_biguint * DEV_BSIZE;\n\n        /* prepare next call */\n        result_count = FSINFOCOUNT;\n    }\n\n    ListMgr_CloseReport(it);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n        char strus[128];\n\n        FormatFileSize(strsz, 128, total_size);\n        FormatFileSize(strus, 128, total_used);\n\n        printf(\"\\nTotal: %llu entries, volume: %llu bytes (%s), \"\n               \"space used: %llu bytes (%s)\\n\",\n               total_count, total_size, strsz, total_used, strus);\n    }\n free_filter:\n    if (is_filter)\n        lmgr_simple_filter_free(&filter);\n}\n\nstatic int report_entry(const char *entry, int flags)\n{\n    int rc;\n    entry_id_t id;\n    attr_set_t attrs;\n\n    /* try it as a fid */\n    if (sscanf(entry, SFID, RFID(&id)) != FID_SCAN_CNT) {\n        if ((rc = InitFS()) != 0)\n            fprintf(stderr,\n                    \"Warning: cannot access the filesystem to get entry id: %s\\n\",\n                    strerror(-rc));\n        /* try to continue anyway */\n\n        /* try it as a path */\n        if ((rc = Path2Id(entry, &id)) != 0) {\n            fprintf(stderr, \"Couldn't get id for %s: %s\\n,\", entry,\n                    strerror(-rc));\n            return rc;\n        }\n    }\n\n    /* try to get all attrs */\n    attrs.attr_mask.std = ~0;\n    attrs.attr_mask.status = ~0;\n    attrs.attr_mask.sm_info = ~0LL;\n\n    if (CSV(flags))\n        printf(\"id, \" DFID \"\\n\", PFID(&id));\n    else\n        printf(\"%-15s: \\t\" DFID \"\\n\", \"id\", PFID(&id));\n\n    if (ListMgr_Get(&lmgr, &id, &attrs) == DB_SUCCESS) {\n        int i, cookie;\n        char str[RBH_PATH_MAX];\n\n        cookie = -1;\n        while ((i = attr_index_iter(0, &cookie)) != -1) {\n            if (attr_mask_test_index(&attrs.attr_mask, i)) {\n                if (attrindex2len(i, CSV(flags)) != 1) {    /* for '?' */\n                    if (!CSV(flags))\n                        printf(\"%-15s: \\t%s\\n\", attrindex2name(i),\n                               attr2str(&attrs, &id, i, CSV(flags), NULL,\n                                        str, sizeof(str)));\n                    else\n                        printf(\"%s, %s\\n\", attrindex2name(i),\n                               attr2str(&attrs, &id, i, CSV(flags), NULL,\n                                        str, sizeof(str)));\n                }\n            }\n        }\n\n        ListMgr_FreeAttrs(&attrs);\n        return 0;\n    } else\n        return -1;\n}\n\nstatic inline void set_report_rec_nofilter(report_field_descr_t *ent,\n                                           int attr_index,\n                                           report_type_t report_type,\n                                           sort_order_t sort_flag)\n{\n    ent->attr_index = attr_index;\n    ent->report_type = report_type;\n    ent->sort_flag = sort_flag;\n    ent->filter = false;\n    ent->filter_compar = 0;\n}\n\nstatic void report_usergroup_info(char *name, int flags)\n{\n    unsigned int result_count;\n    struct lmgr_report_t *it;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    int rc;\n    unsigned int field_count = 0;\n    unsigned int head = 0;\n    bool is_filter = false;\n    bool display_header = !NOHEADER(flags);\n    unsigned long long total_size, total_used, total_count;\n    lmgr_iter_opt_t opt;\n#define USERINFOCOUNT_MAX 10\n    db_value_t result[USERINFOCOUNT_MAX];\n    profile_u prof;\n\n    total_size = total_used = total_count = 0;\n\n    /* To be retrieved for each user:\n     * - username\n     * - number of items of each type\n     * - SUM(blocks)\n     * - MIN/MAX/AVG size\n     */\n    report_field_descr_t user_info[USERINFOCOUNT_MAX];\n\n    head = 0;\n    /* user first, except if reporting groups */\n    if (!ISGROUP(flags)) {\n        set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_uid,\n                                REPORT_GROUP_BY,\n                                REVERSE(flags) ? SORT_DESC : SORT_ASC);\n        field_count++;\n        head++;\n    }\n    /* then add group if this is a group request or if the split is requested */\n    if (ISGROUP(flags) || ISSPLITUSERGROUP(flags)) {\n        set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_gid,\n                                REPORT_GROUP_BY,\n                                REVERSE(flags) ? SORT_DESC : SORT_ASC);\n        field_count++;\n        head++;\n\n        /* group report + split, add user after group */\n        if (ISGROUP(flags) && ISSPLITUSERGROUP(flags)) {\n            set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_uid,\n                                    REPORT_GROUP_BY,\n                                    REVERSE(flags) ? SORT_DESC : SORT_ASC);\n            field_count++;\n            head++;\n        }\n    }\n    /* split by project if requested */\n    if (ISSPLITUSERPROJ(flags)) {\n        set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_projid,\n                                REPORT_GROUP_BY,\n                                REVERSE(flags) ? SORT_DESC : SORT_ASC);\n        field_count++;\n        head++;\n    }\n\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_type,\n                            REPORT_GROUP_BY,\n                            REVERSE(flags) ? SORT_DESC : SORT_ASC);\n    field_count++;\n    head++;\n\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_COUNT,\n                            REPORT_COUNT, SORT_NONE);\n    if (count_min) {\n        user_info[field_count].filter = true;\n        user_info[field_count].filter_compar = MORETHAN;\n        user_info[field_count].filter_value.value.val_biguint = count_min;\n    }\n    field_count++;\n\n    /* for 'release'-capable systems, count sum(size) instead of sum(blocks)\n     * that might be zero */\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_size,\n                            REPORT_SUM, SORT_NONE);\n    field_count++;\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_blocks,\n                            REPORT_SUM, SORT_NONE);\n    field_count++;\n\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_size,\n                            REPORT_MIN, SORT_NONE);\n    field_count++;\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_size,\n                            REPORT_MAX, SORT_NONE);\n    field_count++;\n    set_report_rec_nofilter(&user_info[field_count], ATTR_INDEX_size,\n                            REPORT_AVG, SORT_NONE);\n    field_count++;\n\n    opt.force_no_acct = FORCE_NO_ACCT(flags);\n\n    /* no limit */\n    opt.list_count_max = 0;\n    /* skip missing entries */\n    opt.allow_no_attr = false;\n\n    if (name) {\n        lmgr_simple_filter_init(&filter);\n        is_filter = true;\n\n        if (ISGROUP(flags)) {\n            if (set_gid_val(name, &fv.value))\n                return;\n        } else {\n            if (set_uid_val(name, &fv.value))\n                return;\n        }\n\n        if (WILDCARDS_IN(name))\n            lmgr_simple_filter_add(&filter,\n                                   (ISGROUP(flags) ? ATTR_INDEX_gid :\n                                    ATTR_INDEX_uid), LIKE, fv, 0);\n        else\n            lmgr_simple_filter_add(&filter,\n                                   (ISGROUP(flags) ? ATTR_INDEX_gid :\n                                    ATTR_INDEX_uid), EQUAL, fv, 0);\n    }\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    it = ListMgr_Report(&lmgr, user_info, field_count,\n                        SPROF(flags) ? &size_profile : NULL,\n                        is_filter ? &filter : NULL, &opt);\n\n    if (is_filter)\n        lmgr_simple_filter_free(&filter);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve user stats from database.\");\n        return;\n    }\n\n    result_count = field_count;\n\n    while ((rc = ListMgr_GetNextReportItem(it, result, &result_count,\n                                           SPROF(flags) ? &prof : NULL)) ==\n           DB_SUCCESS) {\n        result_count = field_count;\n        display_report(user_info, result_count, result, result_count,\n                       SPROF(flags) ? &size_profile : NULL,\n                       SPROF(flags) ? &prof : NULL, CSV(flags), display_header,\n                       0);\n        display_header = false; /* just display it once */\n\n        total_count += result[head].value_u.val_biguint;\n        /* this is a sum(size) => keep it as is */\n        total_size += result[head + 1].value_u.val_biguint;\n        /* this is a block count => multiply by 512 to get the space in bytes */\n        total_used += (result[head + 2].value_u.val_biguint * DEV_BSIZE);\n    }\n\n    ListMgr_CloseReport(it);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n        char strus[128];\n\n        FormatFileSize(strsz, 128, total_size);\n        FormatFileSize(strus, 128, total_used);\n\n        printf(\"\\nTotal: %llu entries, volume: %llu bytes (%s), \"\n               \"space used: %llu bytes (%s)\\n\",\n               total_count, total_size, strsz, total_used, strus);\n    }\n}\n\nstatic void report_topdirs(unsigned int count, int flags)\n{\n    /* To be retrieved for dirs:\n     * fullpath, owner, dircount, last_mod\n     * => sorted by dircount DESC\n     */\n    int rc, index;\n    attr_mask_t mask_sav;\n    lmgr_sort_type_t sorttype;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    lmgr_iter_opt_t opt;\n    struct lmgr_iterator_t *it;\n    attr_set_t attrs;\n    entry_id_t id;\n\n    unsigned int list[] = { ATTR_INDEX_fullpath,\n        ATTR_INDEX_dircount,\n        ATTR_INDEX_avgsize,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_last_mod\n    };\n    int list_cnt = sizeof(list) / sizeof(*list);\n\n    /* select only directories */\n    lmgr_simple_filter_init(&filter);\n\n    /* This filter is implicit when sorting dirs by count */\n//    fv.value.val_str = STR_TYPE_DIR;\n//    lmgr_simple_filter_add( &filter, ATTR_INDEX_type, EQUAL, fv, 0 );\n\n    if (count_min) {\n        /* @TODO Not supported by ListMgr yet */\n        fv.value.val_biguint = count_min;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_dircount, MORETHAN, fv, 0);\n    }\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), NULL) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    if (SORT_BY_AVGSIZE(flags))\n        sorttype.attr_index = ATTR_INDEX_avgsize;\n    else if (!SORT_BY_SZRATIO(flags))\n        /* sort by count (default) */\n        /* default: order by dircount */\n        sorttype.attr_index = ATTR_INDEX_dircount;\n    else {\n        /* SORT_BY_SZRATIO? */\n        DisplayLog(LVL_MAJOR, REPORT_TAG,\n                   \"WARNING: sorting directories by size-ratio is not supported\");\n        sorttype.attr_index = ATTR_INDEX_dircount;  /* keep the default */\n    }\n\n    sorttype.order = REVERSE(flags) ? SORT_ASC : SORT_DESC;\n\n    /* select only the top dirs */\n    opt.list_count_max = count;\n    opt.force_no_acct = 0;\n    /* allow missing entries */\n    opt.allow_no_attr = 1;\n\n    ATTR_MASK_INIT(&attrs);\n    mask_sav = attrs.attr_mask = list2mask(list, list_cnt);\n\n    it = ListMgr_Iterator(&lmgr, &filter, &sorttype, &opt);\n\n    lmgr_simple_filter_free(&filter);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve top directories from database.\");\n        return;\n    }\n\n    if (!(NOHEADER(flags)))\n        print_attr_list(1, list, list_cnt, NULL, CSV(flags));\n\n    index = 0;\n    while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n        index++;\n        /* resolv id for dir requests */\n        print_attr_values(index, list, list_cnt, &attrs, &id,\n                          CSV(flags), ResolvName);\n\n        ListMgr_FreeAttrs(&attrs);\n\n        /* prepare next call */\n        attrs.attr_mask = mask_sav;\n    }\n    ListMgr_CloseIterator(it);\n}\n\nstatic void report_topsize(unsigned int count, int flags)\n{\n    /* To be retrieved for files\n     * fullpath, owner, size, stripe_info, last_access, last_mod\n     * => sorted by size DESC\n     */\n    int rc, index;\n    attr_mask_t mask_sav;\n    lmgr_sort_type_t sorttype;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    lmgr_iter_opt_t opt;\n    struct lmgr_iterator_t *it;\n    attr_set_t attrs;\n    entry_id_t id;\n\n    unsigned int list[] = { ATTR_INDEX_fullpath,\n        ATTR_INDEX_size,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_last_access,\n        ATTR_INDEX_last_mod,\n        ATTR_INDEX_fileclass,\n        ATTR_INDEX_stripe_info,\n        ATTR_INDEX_stripe_items\n    };\n    int list_cnt = sizeof(list) / sizeof(*list);\n\n    /* select only files */\n    fv.value.val_str = STR_TYPE_FILE;\n    lmgr_simple_filter_init(&filter);\n    lmgr_simple_filter_add(&filter, ATTR_INDEX_type, EQUAL, fv, 0);\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), NULL) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    /* order by size desc */\n    sorttype.attr_index = ATTR_INDEX_size;\n    sorttype.order = REVERSE(flags) ? SORT_ASC : SORT_DESC;\n\n    /* select only the top size */\n    opt.list_count_max = count;\n    opt.force_no_acct = 0;\n    /* skip missing entries */\n    opt.allow_no_attr = 0;\n\n    ATTR_MASK_INIT(&attrs);\n    mask_sav = attrs.attr_mask = list2mask(list, list_cnt);\n\n    it = ListMgr_Iterator(&lmgr, &filter, &sorttype, &opt);\n\n    lmgr_simple_filter_free(&filter);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve top file size from database.\");\n        return;\n    }\n\n    if (!(NOHEADER(flags)))\n        print_attr_list(1, list, list_cnt, NULL, CSV(flags));\n\n    index = 0;\n    while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n        index++;\n        print_attr_values(index, list, list_cnt, &attrs, &id, CSV(flags), NULL);\n\n        ListMgr_FreeAttrs(&attrs);\n        /* prepare next call */\n        attrs.attr_mask = mask_sav;\n    }\n\n    ListMgr_CloseIterator(it);\n}\n\nstatic void report_oldest(obj_type_t type, unsigned int count, int flags)\n{\n    int rc, index;\n    attr_mask_t mask_sav;\n    lmgr_sort_type_t sorttype;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    lmgr_iter_opt_t opt;\n    struct lmgr_iterator_t *it;\n    attr_set_t attrs;\n    entry_id_t id;\n\n    unsigned int list_files[] = {\n        ATTR_INDEX_fullpath,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_last_access,\n        ATTR_INDEX_last_mod,\n        ATTR_INDEX_size,\n        ATTR_INDEX_blocks,\n        ATTR_INDEX_stripe_info,\n        ATTR_INDEX_stripe_items\n    };\n\n    unsigned int list_dirs[] = {\n        ATTR_INDEX_fullpath,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_last_mod\n    };\n\n    unsigned int *list = NULL;\n    int list_cnt;\n\n    lmgr_simple_filter_init(&filter);\n\n    if (type == TYPE_DIR) {\n        list = list_dirs;\n        list_cnt = sizeof(list_dirs) / sizeof(int);\n\n        /* only consider empty directories */\n        fv.value.val_uint = 0;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_dircount, EQUAL, fv, 0);\n\n        fv.value.val_str = STR_TYPE_DIR;\n        sorttype.attr_index = ATTR_INDEX_last_mod;\n    } else {\n        list = list_files;\n        list_cnt = sizeof(list_files) / sizeof(int);\n        fv.value.val_str = STR_TYPE_FILE;\n        sorttype.attr_index = ATTR_INDEX_last_access;\n    }\n\n    /* select only the requested type */\n    lmgr_simple_filter_add(&filter, ATTR_INDEX_type, EQUAL, fv, 0);\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), NULL) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n#ifndef _HAVE_FID\n#ifdef ATTR_INDEX_invalid\n    /* select only non invalid */\n    fv.value.val_bool = true;\n    lmgr_simple_filter_add(&filter, ATTR_INDEX_invalid, NOTEQUAL, fv, 0);\n#endif\n#endif\n\n    sorttype.order = REVERSE(flags) ? SORT_DESC : SORT_ASC;\n\n    /* select only the top count */\n    opt.list_count_max = count;\n    opt.force_no_acct = 0;\n    /* skip missing entries */\n    opt.allow_no_attr = 0;\n\n    ATTR_MASK_INIT(&attrs);\n    mask_sav = attrs.attr_mask = list2mask(list, list_cnt);\n\n    it = ListMgr_Iterator(&lmgr, &filter, &sorttype, &opt);\n\n    lmgr_simple_filter_free(&filter);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve top purge list from database.\");\n        return;\n    }\n\n    if (!(NOHEADER(flags)))\n        print_attr_list(1, list, list_cnt, NULL, CSV(flags));\n\n    index = 0;\n    while ((rc = ListMgr_GetNext(it, &id, &attrs)) == DB_SUCCESS) {\n        index++;\n\n        print_attr_values(index, list, list_cnt, &attrs, &id, CSV(flags), NULL);\n        ListMgr_FreeAttrs(&attrs);\n\n        /* prepare next call */\n        attrs.attr_mask = mask_sav;\n    }\n\n    ListMgr_CloseIterator(it);\n}\n\nstatic void report_topuser(unsigned int count, int flags)\n{\n    unsigned int result_count;\n    struct lmgr_report_t *it;\n    lmgr_iter_opt_t opt;\n    int rc;\n    unsigned int rank = 1;\n    lmgr_filter_t filter;\n    filter_value_t fv;\n    bool is_filter_init = false;\n    profile_u prof;\n\n#define TOPUSERCOUNT 7\n\n    db_value_t result[TOPUSERCOUNT];\n\n    /* To be retrieved for each user:\n     * - username\n     * - SUM(blocks)\n     * - NB entries\n     * - MIN/MAX/AVG size\n     */\n    report_field_descr_t user_info[TOPUSERCOUNT] = {\n        {ATTR_INDEX_uid, REPORT_GROUP_BY, SORT_NONE, false, 0, FV_NULL},\n        /* display the total size in HSM (not only the disk level) */\n        {ATTR_INDEX_size, REPORT_SUM, SORT_DESC, false, 0, FV_NULL},\n        {ATTR_INDEX_blocks, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_COUNT, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MIN, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MAX, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_AVG, SORT_NONE, false, 0, FV_NULL},\n    };\n\n    if (REVERSE(flags))\n        user_info[1].sort_flag = SORT_ASC;\n\n    if (SORT_BY_COUNT(flags)) {\n        /* replace sort on blocks by sort on count */\n        user_info[1].sort_flag = SORT_NONE;\n        user_info[3].sort_flag = REVERSE(flags) ? SORT_ASC : SORT_DESC;\n    } else if (SORT_BY_AVGSIZE(flags)) {\n        /* sort (big files first) */\n        user_info[1].sort_flag = SORT_NONE;\n        user_info[6].sort_flag = REVERSE(flags) ? SORT_ASC : SORT_DESC;\n    }\n\n    if (!SORT_BY_COUNT(flags)) {\n        /* Because we consider that only relevant files should be displayed\n         * for size-based reports (i.e. not links or directories)\n         * we apply a filter to only select files.\n         * Note that this behaviour can be confusing, regarding how we want to\n         * sort, the filecount will be different.*/\n        lmgr_simple_filter_init(&filter);\n\n        fv.value.val_str = STR_TYPE_FILE;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_type, EQUAL, fv, 0);\n        is_filter_init = true;\n    }\n\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter_init) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    if (count_min) {\n        user_info[3].filter = true;\n        user_info[3].filter_compar = MORETHAN;\n        user_info[3].filter_value.value.val_biguint = count_min;\n    }\n\n    /* select only the top users */\n    opt.list_count_max = count;\n    /* skip missing entries */\n    opt.allow_no_attr = 0;\n    opt.force_no_acct = FORCE_NO_ACCT(flags);\n\n    it = ListMgr_Report(&lmgr, user_info, TOPUSERCOUNT,\n                        SPROF(flags) ? &size_profile : NULL,\n                        is_filter_init ? &filter : NULL,\n                        &opt);\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve top space consumers from database.\");\n        return;\n    }\n\n    result_count = TOPUSERCOUNT;\n    while ((rc = ListMgr_GetNextReportItem(it, result, &result_count,\n                                           SPROF(flags) ? &prof : NULL)) ==\n           DB_SUCCESS) {\n        display_report(user_info, result_count, result, result_count,\n                       SPROF(flags) ? &size_profile : NULL,\n                       SPROF(flags) ? &prof : NULL,\n                       CSV(flags), (rank == 1) && !NOHEADER(flags), rank);\n\n        rank++;\n\n        /* prepare next call */\n        result_count = TOPUSERCOUNT;\n\n    }\n\n    ListMgr_CloseReport(it);\n}\n\nstatic void report_deferred_rm(int flags)\n{\n    int rc;\n    struct lmgr_rm_list_t *rmlist;\n    entry_id_t id;\n    attr_set_t attrs = ATTR_SET_INIT;\n\n    unsigned long long total_count = 0;\n    unsigned long long total_size = 0;\n\n    lmgr_filter_t filter;\n    bool is_filter = false;\n\n    lmgr_sort_type_t sort;\n\n    static unsigned int list[] = {\n        ATTR_INDEX_rm_time,\n        ATTR_INDEX_ID,  /* id */\n        ATTR_INDEX_type,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_size,\n        ATTR_INDEX_last_mod,\n        ATTR_INDEX_fullpath\n    };\n    int list_cnt = sizeof(list) / sizeof(*list);\n\n    lmgr_simple_filter_init(&filter);\n\n    /* append global filters */\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    /* order by rmtime asc */\n    sort.attr_index = ATTR_INDEX_rm_time;\n    sort.order = REVERSE(flags) ? SORT_DESC : SORT_ASC;\n\n    rmlist = ListMgr_RmList(&lmgr, is_filter ? &filter : NULL, &sort);\n\n    lmgr_simple_filter_free(&filter);\n\n    if (rmlist == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve removed entries from database.\");\n        return;\n    }\n\n    if (!NOHEADER(flags))\n        print_attr_list(0, list, list_cnt, NULL, CSV(flags));\n\n    while ((rc = ListMgr_GetNextRmEntry(rmlist, &id, &attrs)) == DB_SUCCESS) {\n        total_count++;\n        if (ATTR_MASK_TEST(&attrs, size))\n            total_size += ATTR(&attrs, size);\n\n        print_attr_values(0, list, list_cnt, &attrs, &id, CSV(flags),\n                          ResolvName);\n\n        /* prepare next call */\n        ListMgr_FreeAttrs(&attrs);\n        memset(&attrs, 0, sizeof(attrs));\n    }\n\n    ListMgr_CloseRmList(rmlist);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n\n        FormatFileSize(strsz, sizeof(strsz), total_size);\n\n        printf(\"\\nTotal: %llu entries, %llu bytes (%s)\\n\",\n               total_count, total_size, strsz);\n    }\n}\n\nstatic void report_groupby(int attr_index, int flags)\n{\n#define GROUPBY_FIELDS 7\n    db_value_t result[GROUPBY_FIELDS];\n\n    struct lmgr_report_t *it;\n    lmgr_filter_t filter;\n    int rc;\n    bool header;\n    unsigned int result_count;\n    profile_u prof;\n    bool is_filter = false;\n\n    unsigned long long total_size, total_count, total_used;\n    total_size = total_count = total_used = 0;\n\n    /* To be retrieved for each group:\n     * - attribute value\n     * - NB entries\n     * - SUM(blocks)\n     * - MIN/MAX/AVG file size\n     */\n    report_field_descr_t info[GROUPBY_FIELDS] = {\n        {attr_index, REPORT_GROUP_BY, SORT_ASC, false, 0, FV_NULL},\n        {ATTR_INDEX_COUNT, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_blocks, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MIN, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MAX, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_AVG, SORT_NONE, false, 0, FV_NULL},\n    };\n\n    lmgr_simple_filter_init(&filter);\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n\n    result_count = GROUPBY_FIELDS;\n    it = ListMgr_Report(&lmgr, info, GROUPBY_FIELDS,\n                        SPROF(flags) ? &size_profile : NULL,\n                        is_filter ? &filter : NULL, NULL);\n\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve class information from database.\");\n        return;\n    }\n\n    header = !NOHEADER(flags);\n\n    result_count = GROUPBY_FIELDS;\n    while ((rc =\n            ListMgr_GetNextReportItem(it, result, &result_count,\n                                      SPROF(flags) ? &prof : NULL))\n           == DB_SUCCESS) {\n        display_report(info, result_count, result, result_count,\n                       SPROF(flags) ? &size_profile : NULL,\n                       SPROF(flags) ? &prof : NULL, CSV(flags), header, 0);\n        header = false; /* display header once */\n\n        total_count += result[1].value_u.val_biguint;\n        total_size += result[2].value_u.val_biguint;\n        total_used += result[3].value_u.val_biguint * DEV_BSIZE;\n        result_count = GROUPBY_FIELDS;\n    }\n\n    ListMgr_CloseReport(it);\n    lmgr_simple_filter_free(&filter);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n        char strus[128];\n\n        FormatFileSize(strsz, 128, total_size);\n        FormatFileSize(strus, 128, total_used);\n\n        printf(\"\\nTotal: %llu entries, volume: %llu bytes (%s), \"\n               \"space used: %llu bytes (%s)\\n\",\n               total_count, total_size, strsz, total_used, strus);\n    }\n}\n\nstatic void report_status_info(int smi_index, const char *val, int flags)\n{\n#define STATUSINFO_FIELDS 8\n    db_value_t result[STATUSINFO_FIELDS];\n\n    struct lmgr_report_t *it;\n    lmgr_filter_t filter;\n    lmgr_iter_opt_t opt;\n    int rc;\n    bool header;\n    unsigned int result_count;\n    profile_u prof;\n    bool is_filter = false;\n\n    unsigned long long total_size, total_count, total_used;\n    total_size = total_count = total_used = 0;\n\n    /* To be retrieved for each group:\n     * - status names and status\n     * - NB entries\n     * - SUM(blocks)\n     * - MIN/MAX/AVG file size\n     */\n    report_field_descr_t status_info[STATUSINFO_FIELDS] = {\n        {ATTR_INDEX_FLG_STATUS | smi_index, REPORT_GROUP_BY, SORT_ASC,\n         false, 0, FV_NULL},\n        {ATTR_INDEX_type, REPORT_GROUP_BY, SORT_ASC, false, 0, FV_NULL},\n        {ATTR_INDEX_COUNT, REPORT_COUNT, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_blocks, REPORT_SUM, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MIN, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_MAX, SORT_NONE, false, 0, FV_NULL},\n        {ATTR_INDEX_size, REPORT_AVG, SORT_NONE, false, 0, FV_NULL},\n    };\n\n    if (count_min) {\n        status_info[2].filter = true;\n        status_info[2].filter_compar = MORETHAN;\n        status_info[2].filter_value.value.val_biguint = count_min;\n    }\n\n    lmgr_simple_filter_init(&filter);\n\n    if (val != NULL) {\n        filter_value_t fv;\n\n        fv.value.val_str = val;\n        lmgr_simple_filter_add(&filter, ATTR_INDEX_FLG_STATUS | smi_index,\n                               EQUAL, fv,\n                               EMPTY_STRING(val) ? FILTER_FLAG_ALLOW_NULL : 0);\n        is_filter = true;\n    }\n\n    if (mk_global_filters(&filter, !NOHEADER(flags), &is_filter) != 0) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"ERROR: Failed to build filter\");\n        return;\n    }\n    result_count = STATUSINFO_FIELDS;\n\n    opt.force_no_acct = FORCE_NO_ACCT(flags);\n    /* no limit */\n    opt.list_count_max = 0;\n    /* skip missing entries */\n    opt.allow_no_attr = false;\n\n    /* @TODO add filter on status, if a value is specified */\n\n    it = ListMgr_Report(&lmgr, status_info, STATUSINFO_FIELDS,\n                        SPROF(flags) ? &size_profile : NULL,\n                        is_filter ? &filter : NULL, &opt);\n    if (it == NULL) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR: Could not retrieve status information from database.\");\n        return;\n    }\n\n    /* a single status column (release), can print as is */\n    header = !NOHEADER(flags);\n\n    result_count = STATUSINFO_FIELDS;\n    while ((rc =\n            ListMgr_GetNextReportItem(it, result, &result_count,\n                                      SPROF(flags) ? &prof : NULL))\n           == DB_SUCCESS) {\n        display_report(status_info, result_count, result, result_count,\n                       SPROF(flags) ? &size_profile : NULL,\n                       SPROF(flags) ? &prof : NULL, CSV(flags), header, 0);\n        header = false; /* display header once */\n\n        total_count += result[2].value_u.val_biguint;\n        total_size += result[3].value_u.val_biguint;\n        total_used += result[4].value_u.val_biguint * DEV_BSIZE;\n        result_count = STATUSINFO_FIELDS;\n    }\n\n    ListMgr_CloseReport(it);\n    lmgr_simple_filter_free(&filter);\n\n    /* display summary */\n    if (!NOHEADER(flags)) {\n        char strsz[128];\n        char strus[128];\n\n        FormatFileSize(strsz, 128, total_size);\n        FormatFileSize(strus, 128, total_used);\n\n        printf(\"\\nTotal: %llu entries, volume: %llu bytes (%s), \"\n               \"space used: %llu bytes (%s)\\n\",\n               total_count, total_size, strsz, total_used, strus);\n    }\n}\n\nstatic void maintenance_get(int flags)\n{\n    char value[1024];\n    time_t timestamp;\n    char date[128];\n    struct tm t;\n    int rc;\n\n    rc = ListMgr_GetVar(&lmgr, NEXT_MAINT_VAR, value, sizeof(value));\n    if (rc == DB_SUCCESS) {\n        timestamp = atoi(value);\n        strftime(date, 128, \"%Y/%m/%d %T\", localtime_r(&timestamp, &t));\n        if (time(NULL) >= timestamp) {\n            if (CSV(flags))\n                printf(\"next_maintenance, %s (in the past: no effect)\\n\", date);\n            else\n                printf(\"Next maintenance: %s (in the past: no effect)\\n\", date);\n        } else {\n            if (CSV(flags))\n                printf(\"next_maintenance, %s\\n\", date);\n            else\n                printf(\"Next maintenance: %s\\n\", date);\n        }\n    } else if (rc == DB_NOT_EXISTS) {\n        if (CSV(flags))\n            printf(\"next_maintenance, none\\n\");\n        else\n            printf(\"No maintenance is planned\\n\");\n    } else {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR retrieving variable \" NEXT_MAINT_VAR\n                   \" from database\");\n    }\n}\n\nstatic void maintenance_set(int flags, time_t when)\n{\n    char value[1024];\n    int rc;\n\n    if (when == 0) {\n        rc = ListMgr_SetVar(&lmgr, NEXT_MAINT_VAR, NULL);\n\n        if (rc)\n            DisplayLog(LVL_CRIT, REPORT_TAG,\n                       \"ERROR deleting variable \" NEXT_MAINT_VAR\n                       \" in database\");\n        else\n            DisplayLog(LVL_EVENT, REPORT_TAG,\n                       \"Next maintenance time has been cleared successfully\");\n\n        return;\n    }\n\n    sprintf(value, \"%u\", (unsigned int)when);\n\n    rc = ListMgr_SetVar(&lmgr, NEXT_MAINT_VAR, value);\n    if (rc == DB_SUCCESS) {\n        DisplayLog(LVL_EVENT, REPORT_TAG,\n                   \"Next maintenance time has been set successfully\");\n    } else {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"ERROR setting variable \" NEXT_MAINT_VAR \" in database\");\n    }\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n\n    char config_file[MAX_OPT_LEN] = \"\";\n\n    bool activity = false;\n    bool fs_info = false;\n\n    bool entry_info = false;\n    char entry_path[RBH_PATH_MAX] = \"\";\n\n    bool user_info = false;\n    char user_name[256] = \"\";\n\n    bool group_info = false;\n    char group_name[256] = \"\";\n\n    bool project_info = false; /* use projid */\n\n    bool class_info = false; /* use class filter */\n\n    int topdirs = 0;\n    int topsize = 0;\n    int old_files = 0;\n    int old_dirs = 0;\n    int topuser = 0;\n    int deferred_rm = 0;\n\n    bool dump_all = false;\n    bool dump_user = false;\n    char dump_user_name[256];\n    bool dump_group = false;\n    char dump_group_name[256];\n#ifdef _LUSTRE\n    bool dump_ost = false;\n    value_list_t dump_ost_set = { 0, NULL };\n    char ost_set_str[256] = \"\";\n#endif\n    char *status_name = NULL;\n    char *status_value = NULL;\n\n    char *status_info_name = NULL;\n    char *status_info_value = NULL;\n\n    time_t next_maint = 0;\n    bool get_next_maint = false;\n    bool cancel_next_maint = false;\n\n    int flags = 0;\n    int rc;\n    char err_msg[4096];\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n\n    bin = rh_basename(argv[0]); /* supports NULL argument */\n\n    /* parse command line options */\n    while ((c =\n            getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                        &option_index)) != -1) {\n        switch (c) {\n        case 'a':\n            activity = true;\n            break;\n\n        case 'P':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <path> for --filter-path\\n\");\n                exit(1);\n            }\n            rh_strncpy(path_filter, optarg, RBH_PATH_MAX);\n            break;\n\n        case 'C':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <class> for --filter-class\\n\");\n                exit(1);\n            }\n            if (class_info && !EMPTY_STRING(class_filter))\n                fprintf(stderr,\n                        \"WARNING: --filter-class conflicts with --class-info parameter. ignored.\\n\");\n            else\n                rh_strncpy(class_filter, optarg, 1024);\n            break;\n\n        case 'p':\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <class> for --filter-project\\n\");\n                exit(1);\n            }\n            projid = atoi(optarg);\n            break;\n\n        case OPT_CLASS_INFO:\n            if (class_info)\n                fprintf(stderr,\n                        \"WARNING: --class-info parameter already specified on command line.\\n\");\n\n            class_info = true;\n            if (optarg) {\n                if (!EMPTY_STRING(class_filter))\n                    fprintf(stderr,\n                            \"WARNING: --class-info conflicts with --filter-class parameter. overriding filter.\\n\");\n                rh_strncpy(class_filter, optarg, 1024);\n            }\n            break;\n\n        case OPT_PROJECT_INFO:\n            if (project_info)\n                fprintf(stderr,\n                        \"WARNING: --project-info parameter already specified on command line.\\n\");\n\n            project_info = true;\n            if (optarg) {\n                if (projid != 0)\n                    fprintf(stderr,\n                            \"WARNING: --project-info conflicts with --filter-project parameter. overriding filter.\\n\");\n                projid = atoi(optarg);\n            }\n            break;\n\n        case OPT_STATUS_INFO:\n            if (status_info_name)\n                fprintf(stderr,\n                        \"WARNING: --status-info parameter already specified on command line.\\n\");\n            rc = parse_status_arg(\"--status-info\", optarg, &status_info_name,\n                                  &status_info_value, false);\n            if (rc)\n                exit(rc);\n            break;\n\n        case 'i':\n            fs_info = true;\n            break;\n\n        case 'e':\n            entry_info = true;\n            rh_strncpy(entry_path, optarg, RBH_PATH_MAX);\n            break;\n\n        case 'u':\n            user_info = true;\n            if (optarg)\n                rh_strncpy(user_name, optarg, 256);\n            break;\n\n        case 'g':\n            group_info = true;\n            if (optarg)\n                rh_strncpy(group_name, optarg, 256);\n            break;\n\n        case 'D':\n            dump_all = true;\n            break;\n\n        case OPT_DUMP_USER:\n            dump_user = true;\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <username> for --dump-user\\n\");\n                exit(1);\n            }\n            rh_strncpy(dump_user_name, optarg, 256);\n            break;\n\n        case OPT_DUMP_GROUP:\n            dump_group = true;\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <groupname> for --dump-group\\n\");\n                exit(1);\n            }\n            rh_strncpy(dump_group_name, optarg, 256);\n            break;\n\n#ifdef _LUSTRE\n        case OPT_DUMP_OST:\n            dump_ost = true;\n            if (!optarg) {\n                fprintf(stderr,\n                        \"Missing mandatory argument <ost_index|ost_set> for --dump-ost\\n\");\n                exit(1);\n            }\n            /* parse it as a set */\n            if (lmgr_range2list(optarg, DB_UINT, &dump_ost_set)) {\n                fprintf(stderr,\n                        \"Invalid value '%s' for --dump-ost option: integer or set expected (e.g. 2 or 3,5-8,10-12).\\n\",\n                        optarg);\n                exit(1);\n            }\n            /* copy arg to display it */\n            rh_strncpy(ost_set_str, optarg, sizeof(ost_set_str));\n            break;\n#endif\n\n        case OPT_DUMP_STATUS:\n            rc = parse_status_arg(\"--dump-status\", optarg, &status_name,\n                                  &status_value, true);\n            if (rc)\n                exit(rc);\n            break;\n\n        case 'd':\n            if (optarg) {\n                topdirs = str2int(optarg);\n                if (topdirs == -1) {\n                    fprintf(stderr,\n                            \"Invalid parameter '%s' for --topdirs option: positive integer expected\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                topdirs = DEFAULT_TOP_SIZE;\n            break;\n\n        case 's':\n            if (optarg) {\n                topsize = str2int(optarg);\n                if (topsize == -1) {\n                    fprintf(stderr,\n                            \"Invalid parameter '%s' for --topsize option: positive integer expected\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                topsize = DEFAULT_TOP_SIZE;\n            break;\n\n        case 'o':\n            if (optarg) {\n                old_files = str2int(optarg);\n                if (old_files == -1) {\n                    fprintf(stderr,\n                            \"Invalid parameter '%s' for --oldest-files option: positive integer expected\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                old_files = DEFAULT_TOP_SIZE;\n            break;\n\n        case 'O':\n            if (optarg) {\n                old_dirs = str2int(optarg);\n                if (old_dirs == -1) {\n                    fprintf(stderr,\n                            \"Invalid parameter '%s' for --oldest-empty-dirs option: positive integer expected\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                old_dirs = DEFAULT_TOP_SIZE;\n            break;\n\n        case 'U':\n            if (optarg) {\n                topuser = str2int(optarg);\n                if (topuser == -1) {\n                    fprintf(stderr,\n                            \"Invalid parameter '%s' for --topusers option: positive integer expected\\n\",\n                            optarg);\n                    exit(1);\n                }\n            } else\n                topuser = DEFAULT_TOP_SIZE;\n            break;\n\n        case 'R':\n            deferred_rm = true;\n            break;\n\n        case CLEAR_NEXT_MAINT:\n            cancel_next_maint = true;\n            get_next_maint = true;\n            break;\n        case SET_NEXT_MAINT:\n            if (optarg) {   /* optional argument */\n                /* parse date/time yyyymmddHHMM[SS] */\n                next_maint = str2date(optarg);\n                if (next_maint == (time_t)-1) {\n                    fprintf(stderr,\n                            \"Invalid date format: yyyymmdd[HH[MM[SS]]] expected\\n\");\n                    exit(1);\n                }\n            }\n            /* in all cases, display next maintenance time */\n            get_next_maint = true;\n            break;\n\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'r':\n            flags |= OPT_FLAG_REVERSE;\n            break;\n        case 'c':\n            flags |= OPT_FLAG_CSV;\n            break;\n        case 'q':\n            flags |= OPT_FLAG_NOHEADER;\n            break;\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case 'F':\n            flags |= OPT_FLAG_NO_ACCT;\n            break;\n        case 'S':\n            flags |= OPT_FLAG_SPLITUSERGROUP;\n            break;\n        case 'J':\n            flags |= OPT_FLAG_SPLITUSERPROJ;\n            break;\n        case OPT_BY_COUNT:\n            flags |= OPT_FLAG_BY_COUNT;\n            break;\n        case OPT_BY_AVGSIZE:\n            flags |= OPT_FLAG_BY_AVGSIZE;\n            break;\n        case OPT_BY_SZ_RATIO:\n            flags |= OPT_FLAG_BY_SZRATIO;\n            /* auto-enable size profiling */\n            flags |= OPT_FLAG_SPROF;\n            /* parse range */\n            if (parse_size_range(optarg, &size_profile))\n                exit(1);\n            break;\n        case OPT_COUNT_MIN:\n            count_min = atoi(optarg);\n            break;\n\n        case OPT_SIZE_PROFILE:\n            flags |= OPT_FLAG_SPROF;\n            break;\n\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* check there is no extra arguments */\n    if (optind != argc) {\n        fprintf(stderr, \"Error: unexpected argument on command line: %s\\n\",\n                argv[optind]);\n        exit(1);\n    }\n\n    /* if a size range was specified, determine sort order:\n       default DESC, ASC for reverse */\n    if (size_profile.range_ratio_len > 0)\n        size_profile.range_ratio_sort = REVERSE(flags) ? SORT_ASC : SORT_DESC;\n\n    if (!activity && !fs_info && !user_info && !group_info\n        && !topsize && !topuser && !dump_all && !dump_user\n        && !dump_group && !class_info && !entry_info && !project_info\n        && (status_name == NULL) && (status_info_name == NULL)\n        && !topdirs && !deferred_rm && !old_dirs && !old_files\n#ifdef _LUSTRE\n        && !dump_ost\n#endif\n        && !next_maint && !get_next_maint && !cancel_next_maint) {\n        display_help(bin);\n        exit(1);\n    }\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read common config (listmgr, ...) (mask=0) */\n    if (rbh_cfg_load(0, config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (!log_config.force_debug_level)\n        log_config.debug_level = LVL_MAJOR; /* no event message */\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    if ((rc = InitFS()) != 0)\n        fprintf(stderr,\n                \"Warning: cannot access filesystem %s (%s), some reports may be incomplete or not available.\\n\",\n                global_config.fs_path, strerror(abs(rc)));\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(LIF_REPORT_ONLY);\n    if (rc) {\n        DisplayLog(LVL_CRIT, REPORT_TAG,\n                   \"Error initializing list manager: %s (%d)\", lmgr_err2str(rc),\n                   rc);\n        exit(rc);\n    } else\n        DisplayLog(LVL_DEBUG, REPORT_TAG,\n                   \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, REPORT_TAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n\n    /* retrieve and display info */\n    if (activity)\n        report_activity(flags);\n\n    if (fs_info)\n        report_fs_info(flags);\n\n    if (entry_info)\n        report_entry(entry_path, flags);\n\n    if (user_info)\n        report_usergroup_info((EMPTY_STRING(user_name) ? NULL : user_name),\n                              flags);\n\n    if (group_info)\n        report_usergroup_info((EMPTY_STRING(group_name) ? NULL : group_name),\n                              flags | OPT_FLAG_GROUP);\n\n    if (class_info)\n        report_groupby(ATTR_INDEX_fileclass, flags);\n\n    if (project_info)\n        report_groupby(ATTR_INDEX_projid, flags);\n\n    if (topdirs)\n        report_topdirs(topdirs, flags);\n\n    if (topsize)\n        report_topsize(topsize, flags);\n\n    if (old_files)\n        report_oldest(TYPE_FILE, old_files, flags);\n\n    if (old_dirs)\n        report_oldest(TYPE_DIR, old_dirs, flags);\n\n    if (topuser)\n        report_topuser(topuser, flags);\n\n    if (deferred_rm)\n        report_deferred_rm(flags);\n\n    if (dump_all)\n        dump_entries(DUMP_ALL, 0, NULL, NULL, flags);\n\n    if (dump_user)\n        dump_entries(DUMP_USR, 0, dump_user_name, NULL, flags);\n\n    if (dump_group)\n        dump_entries(DUMP_GROUP, 0, dump_group_name, NULL, flags);\n\n#ifdef _LUSTRE\n    if (dump_ost) {\n        dump_entries(DUMP_OST, 0, ost_set_str, &dump_ost_set, flags);\n        /* free the list */\n        if (dump_ost_set.values)\n            MemFree(dump_ost_set.values);\n    }\n#endif\n\n    if (status_name != NULL) {\n        sm_instance_t *smi;\n        const char *strval;\n\n        rc = check_status_args(status_name, status_value, &strval, &smi);\n        if (rc)\n            exit(rc);\n        dump_entries(DUMP_STATUS, smi->smi_index, (char *)strval, NULL, flags);\n    }\n\n    if (status_info_name != NULL) {\n        sm_instance_t *smi;\n        const char *strval;\n\n        rc = check_status_args(status_info_name, status_info_value, &strval,\n                               &smi);\n        if (rc)\n            exit(rc);\n        report_status_info(smi->smi_index, strval, flags);\n    }\n\n    if (cancel_next_maint)\n        maintenance_set(flags, 0);\n\n    if (next_maint != 0)\n        maintenance_set(flags, next_maint);\n\n    if (get_next_maint)\n        maintenance_get(flags);\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return 0;   /* for compiler */\n\n}\n"
  },
  {
    "path": "src/robinhood/rbh_undelete.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009-2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for restoring an entry that was accidentally removed from filesystem.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"list_mgr.h\"\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"xplatform_print.h\"\n#include \"rbh_basename.h\"\n#include \"cmd_helpers.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <pthread.h>\n\n#define LOGTAG \"Undelete\"\n\nstatic struct option option_tab[] = {\n    /* options for cancelling remove operation */\n    {\"list\", no_argument, NULL, 'L'},\n    {\"restore\", no_argument, NULL, 'R'},\n\n    {\"statusmgr\", required_argument, NULL, 's'},\n    {\"status-mgr\", required_argument, NULL, 's'},\n\n    /* config file options */\n    {\"config-file\", required_argument, NULL, 'f'},\n\n    /* log options */\n    {\"log-level\", required_argument, NULL, 'l'},\n\n    /* miscellaneous options */\n    {\"help\", no_argument, NULL, 'h'},\n    {\"version\", no_argument, NULL, 'V'},\n\n    {NULL, 0, NULL, 0}\n\n};\n\n#define SHORT_OPT_STRING    \"LRs:f:l:hV\"\n\n/* global variables */\n\nstatic lmgr_t         lmgr;\nchar                  path_filter[RBH_PATH_MAX] = \"\";\nstatic sm_instance_t *smi = NULL;\n\n/* special character sequences for displaying help */\n\n/* Bold start character sequence */\n#define _B \"\u001b[1m\"\n/* Bold end character sequence */\n#define B_ \"\u001b[m\"\n\n/* Underline start character sequence */\n#define _U \"\u001b[4m\"\n/* Underline end character sequence */\n#define U_ \"\u001b[0m\"\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [options] [<path>|<fid>]\\n\"\n    \"\\n\"\n    _B \"Actions:\" B_ \"\\n\"\n    \"    \" _B \"--list\" B_ \", \" _B \"-L\" B_ \"\\n\"\n    \"        List removed entries in the given directory.\\n\"\n    \"    \" _B \"--restore\" B_ \", \" _B \"-R\" B_ \"\\n\"\n    \"        Restore removed entries in the given directory.\\n\"\n    \"\\n\"\n    _B \"Module option:\" B_ \"\\n\"\n    \"    \" _B \"--status-mgr\" B_\" \" _U \"statusmgr\" U_\", \"\n           _B \"-s\" B_\" \"_U \"statusmgr\" U_\"\\n\"\n    \"\\n\"\n    _B \"Config file options:\" B_ \"\\n\"\n    \"    \" _B \"-f\" B_ \" \" _U \"file\" U_ \", \" _B \"--config-file=\" B_ _U \"file\" U_ \"\\n\"\n    \"        Path to configuration file (or short name).\\n\"\n    \"\\n\"\n    _B \"Miscellaneous options:\" B_ \"\\n\"\n    \"    \" _B \"-l\" B_ \" \" _U \"level\" U_ \", \" _B \"--log-level=\" B_ _U \"level\" U_ \"\\n\"\n    \"        Force the log verbosity level (overides configuration value).\\n\"\n    \"        Allowed values: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL.\\n\"\n    \"    \" _B \"-h\" B_ \", \" _B \"--help\" B_ \"\\n\"\n    \"        Display a short help about command line options.\\n\"\n    \"    \" _B \"-V\" B_ \", \" _B \"--version\" B_ \"\\n\"\n    \"        Display version info\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\nstatic inline void display_version(const char *bin_name)\n{\n    printf(\"\\n\");\n    printf(\"Product:         \" PACKAGE_NAME \" rm cancellation tool\\n\");\n    printf(\"Version:         \" PACKAGE_VERSION \"-\" RELEASE \"\\n\");\n    printf(\"Build:           \" COMPIL_DATE \"\\n\");\n    printf(\"\\n\");\n    printf(\"Compilation switches:\\n\");\n\n/* Access by Fid ? */\n#ifdef _HAVE_FID\n    printf(\"    Address entries by FID\\n\");\n#else\n    printf(\"    Address entries by path\\n\");\n#endif\n\n#ifdef HAVE_CHANGELOGS\n    printf(\"    MDT Changelogs supported\\n\");\n#else\n    printf(\"    MDT Changelogs disabled\\n\");\n#endif\n\n    printf(\"\\n\");\n#ifdef _LUSTRE\n#ifdef LUSTRE_VERSION\n    printf(\"Lustre Version: \" LUSTRE_VERSION \"\\n\");\n#else\n    printf(\"Lustre FS support\\n\");\n#endif\n#else\n    printf(\"No Lustre support\\n\");\n#endif\n\n#ifdef _MYSQL\n    printf(\"Database binding: MySQL\\n\");\n#elif defined(_SQLITE)\n    printf(\"Database binding: SQLite\\n\");\n#else\n#error \"No database was specified\"\n#endif\n    printf(\"\\n\");\n    printf(\"Report bugs to: <\" PACKAGE_BUGREPORT \">\\n\");\n    printf(\"\\n\");\n}\n\n/*\n * Append global filters on path\n * \\param do_display [in] display filters?\n * \\param initialized [in/out] indicate if the filter is initialized.\n */\nstatic int mk_path_filter(lmgr_filter_t *filter, bool do_display,\n                          bool *initialized)\n{\n    filter_value_t fv;\n    char path_regexp[RBH_PATH_MAX+10] = \"\";\n    char tmp[RBH_PATH_MAX] = \"\";\n    size_t len;\n    int rc;\n\n    /* is a filter on path specified? */\n    if (!EMPTY_STRING(path_filter)) {\n        if ((initialized != NULL) && !(*initialized)) {\n            lmgr_simple_filter_init(filter);\n            *initialized = true;\n        }\n        if (do_display)\n            printf(\"filter path: %s\\n\", path_filter);\n\n        len = strlen(path_filter);\n        /* remove last slash */\n        if (path_filter[len - 1] == '/')\n            path_filter[len - 1] = '\\0';\n\n        /* Special characters in a POSIX extended regex: .[{}()\\*+?|^$\n         *\n         * Escape POSIX ERE special characters that have no meaning in a\n         * globbing pattern. */\n        rc = str_escape_charset(tmp, sizeof(tmp), path_filter, \".^$+(){}\\\\|\");\n        if (rc < 0) {\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       \"Error %d: '%s' is too big to be properly escaped\",\n                       rc, path_filter);\n            return rc;\n        }\n\n        /* Translate those that have a different meaning */\n        str_subst(tmp, \"*\", \".*\");\n        str_subst(tmp, \"?\", \".\");\n\n        /* match 'path$' OR 'path/.*' */\n        snprintf(path_regexp, sizeof(path_regexp), \"%s($|/.*)\", tmp);\n        fv.value.val_str = path_regexp;\n\n        lmgr_simple_filter_add(filter, ATTR_INDEX_fullpath, RLIKE, fv, 0);\n    }\n    return 0;\n}\n\nstatic bool is_id_filter(entry_id_t *id)\n{\n    entry_id_t fid = { 0 };\n\n    if (!EMPTY_STRING(path_filter)) {\n        if (sscanf(path_filter, SFID, RFID(&fid)) != FID_SCAN_CNT)\n            return false;\n        else {\n            if (id)\n                *id = fid;\n            return true;\n        }\n    }\n    return false;\n}\n\nstatic int list_rm(void)\n{\n    int rc, index;\n    entry_id_t id;\n    attr_set_t attrs = ATTR_SET_INIT;\n    attr_mask_t mask;\n\n    static unsigned int list[] = {\n        ATTR_INDEX_rm_time,\n        ATTR_INDEX_ID,  /* id */\n        ATTR_INDEX_type,\n        ATTR_INDEX_uid,\n        ATTR_INDEX_gid,\n        ATTR_INDEX_size,\n        ATTR_INDEX_last_mod,\n        0,  /* to be set in the code: status index */\n        ATTR_INDEX_fullpath,\n    };\n    int list_cnt = sizeof(list) / sizeof(*list);\n\n    list[7] = ATTR_INDEX_FLG_STATUS | smi->smi_index;\n\n    mask = list2mask(list, list_cnt);\n    attrs.attr_mask = mask;\n\n    print_attr_list(0, list, list_cnt, NULL, false);\n\n    if (is_id_filter(&id)) {    /* 1 single entry */\n        rc = ListMgr_GetRmEntry(&lmgr, &id, &attrs);\n        if (rc == DB_SUCCESS) {\n            print_attr_values(0, list, list_cnt, &attrs, &id, false, NULL);\n        } else if (rc == DB_NOT_EXISTS)\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       DFID \": fid not found in deferred removal list\",\n                       PFID(&id));\n        else\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       \"ERROR %d in ListMgr_GetRmEntry(\" DFID \")\",\n                       rc, PFID(&id));\n        return rc;\n    } else {    /* list of entries */\n\n        struct lmgr_rm_list_t *rm_list;\n        lmgr_filter_t filter = { 0 };\n        bool filter_init = false;\n\n        /* set filters */\n        mk_path_filter(&filter, false, &filter_init);\n\n        /* list all deferred rm */\n        rm_list = ListMgr_RmList(&lmgr, filter_init ? &filter : NULL, NULL);\n\n        if (filter_init)\n            lmgr_simple_filter_free(&filter);\n\n        if (rm_list == NULL) {\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       \"ERROR: Could not retrieve removed entries from \"\n                       \"database.\");\n            return -1;\n        }\n\n        index = 0;\n        while ((rc = ListMgr_GetNextRmEntry(rm_list, &id, &attrs))\n               == DB_SUCCESS) {\n            index++;\n\n            print_attr_values(0, list, list_cnt, &attrs, &id, false, NULL);\n\n            /* prepare next call */\n            ListMgr_FreeAttrs(&attrs);\n            memset(&attrs, 0, sizeof(attrs));\n            attrs.attr_mask = mask;\n        }\n\n        ListMgr_CloseRmList(rm_list);\n    }\n    return 0;\n}\n\nstatic ull_t counters[RS_COUNT] = { 0 };\n\nstatic ull_t db_err = 0;\n\nstatic const char *st_names[] = {\n    [RS_FILE_OK] = \"files\",\n    [RS_FILE_DELTA] = \"old version\",\n    [RS_FILE_EMPTY] = \"empty files\",\n    [RS_NON_FILE] = \"non-files\",\n    [RS_NOBACKUP] = \"no backup\",\n    [RS_ERROR] = \"errors\"\n};\n\nstatic void undelete_helper(const entry_id_t *id, const attr_set_t *attrs)\n{\n    entry_id_t new_id = { 0 };\n    recov_status_t st;\n    attr_set_t new_attrs = ATTR_SET_INIT;\n    int rc;\n\n    printf(\"Restoring '%s'...\", ATTR(attrs, fullpath));\n\n    st = smi->sm->undelete_func(smi, id, attrs, &new_id, &new_attrs, false);\n\n    counters[st]++;\n\n    switch (st) {\n    case RS_FILE_OK:\n        printf(\"\\t restore OK (file)\\n\");\n        break;\n    case RS_FILE_DELTA:\n        printf(\"\\t restored previous version (file)\\n\");\n        break;\n    case RS_FILE_EMPTY:\n        printf(\"\\t restore OK (empty file)\\n\");\n        break;\n    case RS_NON_FILE:\n        printf(\"\\t restore OK (%s)\\n\", ATTR(attrs, type));\n        break;\n    case RS_NOBACKUP:\n        printf(\"\\t cannot restore %s (no backup)\\n\", ATTR(attrs, type));\n        break;\n    case RS_ERROR:\n        printf(\"\\t ERROR\\n\");\n        break;\n    default:\n        printf(\"ERROR: UNEXPECTED STATUS %d\\n\", st);\n    }\n    /* TODO for symlinks and dir, we can implement a common recovery\n     * that consists in setting entry attributes from DB.\n     * FIXME these entries may not be matches by status managers.\n     * XXX Use create_from_attrs() */\n\n    if ((st == RS_FILE_OK) || (st == RS_FILE_DELTA) || (st == RS_FILE_EMPTY)\n        || (st == RS_NON_FILE)) {\n        /* discard entry from remove list */\n        if (ListMgr_SoftRemove_Discard(&lmgr, id) != 0) {\n            db_err++;\n            fprintf(stderr, \"Error: could not remove previous id \" DFID\n                    \" from database\\n\", PFID(id));\n        }\n\n        /* clean read-only attrs */\n        attr_mask_unset_readonly(&new_attrs.attr_mask);\n\n        /* insert or update it in the db */\n        rc = ListMgr_Insert(&lmgr, &new_id, &new_attrs, true);\n        if (rc == 0)\n            printf(\"\\tEntry successfully updated in the dabatase\\n\");\n        else {\n            db_err++;\n            fprintf(stderr, \"\\tERROR %d inserting entry in the database\\n\", rc);\n        }\n    }\n}\n\nstatic int undelete(void)\n{\n    int rc;\n    struct lmgr_rm_list_t *list;\n    entry_id_t id;\n    attr_set_t attrs = ATTR_SET_INIT;\n    attr_mask_t mask;\n    recov_status_t st;\n\n    /* get all POSIX + status manager mask */\n    mask = smi->sm->softrm_table_mask;\n    mask.std |= POSIX_ATTR_MASK;\n\n    attrs.attr_mask = mask;\n\n    if (is_id_filter(&id)) {    /* 1 single entry */\n        ATTR_MASK_SET(&attrs, fullpath);\n        rc = ListMgr_GetRmEntry(&lmgr, &id, &attrs);\n        if (rc == DB_SUCCESS) {\n            undelete_helper(&id, &attrs);\n        } else if (rc == DB_NOT_EXISTS)\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       DFID \": fid not found in removed entries\", PFID(&id));\n        else\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       \"ERROR %d in ListMgr_GetRmEntry(\" DFID \")\",\n                       rc, PFID(&id));\n        return rc;\n    } else {    /* recover a list of entries */\n\n        lmgr_filter_t filter = { 0 };\n        bool filter_init = false;\n\n        /* set filters */\n        mk_path_filter(&filter, false, &filter_init);\n\n        /* list files to be recovered */\n        list = ListMgr_RmList(&lmgr, filter_init ? &filter : NULL, NULL);\n\n        if (filter_init)\n            lmgr_simple_filter_free(&filter);\n\n        if (list == NULL) {\n            DisplayLog(LVL_CRIT, LOGTAG,\n                       \"ERROR: Could not retrieve removed entries from database.\");\n            return -1;\n        }\n\n        while ((rc = ListMgr_GetNextRmEntry(list, &id, &attrs)) == DB_SUCCESS) {\n            undelete_helper(&id, &attrs);\n\n            /* prepare next call */\n            ListMgr_FreeAttrs(&attrs);\n            memset(&attrs, 0, sizeof(attrs));\n            attrs.attr_mask = mask;\n        }\n        ListMgr_CloseRmList(list);\n    }\n\n    /* display summary */\n    printf(\"\\nundelete summary:\\n\");\n    for (st = RS_FILE_OK; st < RS_COUNT; st++) {\n        printf(\"\\t%9llu %s\\n\", counters[st], st_names[st]);\n    }\n    printf(\"\\t%9llu DB errors\\n\", db_err);\n\n    return 0;\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main(int argc, char **argv)\n{\n    int c, option_index = 0;\n    const char *bin;\n\n    char config_file[MAX_OPT_LEN] = \"\";\n\n    enum { ACTION_NONE, ACTION_LIST, ACTION_RESTORE } action = ACTION_NONE;\n\n    int rc;\n    char err_msg[4096];\n    bool chgd = false;\n    char badcfg[RBH_PATH_MAX];\n\n    char sm_name[SM_NAME_MAX + 1] = \"\";\n\n    bin = rh_basename(argv[0]); /* supports NULL argument */\n\n    /* parse command line options */\n    while ((c = getopt_long(argc, argv, SHORT_OPT_STRING, option_tab,\n                            &option_index)) != -1) {\n        switch (c) {\n        case 'L':\n            if ((action != ACTION_NONE) && (action != ACTION_LIST))\n                fprintf(stderr,\n                        \"WARNING: only a single action (--list or --restore) is expected\\n\"\n                        \"on command line. '--restore' will be ignored.\\n\");\n            action = ACTION_LIST;\n            break;\n        case 'R':\n            if ((action != ACTION_NONE) && (action != ACTION_RESTORE))\n                fprintf(stderr,\n                        \"WARNING: only a single action (--list or --restore) is expected\\n\"\n                        \"on command line. '--list' will be ignored.\\n\");\n            action = ACTION_RESTORE;\n            break;\n\n        case 's':\n            if (!EMPTY_STRING(sm_name))\n                fprintf(stderr,\n                        \"WARNING: only a single status manager is expected \"\n                        \"on command line. '%s' ignored.\\n\", optarg);\n            else\n                rh_strncpy(sm_name, optarg, sizeof(sm_name));\n            break;\n\n        case 'f':\n            rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n            break;\n        case 'l':\n        {\n            int log_level = str2debuglevel(optarg);\n\n            if (log_level == -1) {\n                fprintf(stderr,\n                        \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                        optarg);\n                exit(1);\n            }\n            force_debug_level(log_level);\n            break;\n        }\n        case 'h':\n            display_help(bin);\n            exit(0);\n            break;\n        case 'V':\n            display_version(bin);\n            exit(0);\n            break;\n        case ':':\n        case '?':\n        default:\n            display_help(bin);\n            exit(1);\n            break;\n        }\n    }\n\n    /* 1 expected argument: path */\n    if (optind < argc - 1) {\n        fprintf(stderr,\n                \"Error: too many arguments on command line: expected <path|fid>\\n\");\n        exit(1);\n    } else if (optind == argc - 1)\n        rh_strncpy(path_filter, argv[optind], RBH_PATH_MAX);\n\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg,\n                     MAX_OPT_LEN) != 0) {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\",\n                badcfg);\n        exit(2);\n    } else if (chgd) {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file);\n    }\n\n    /* only read common config */\n    if (rbh_cfg_load(0, config_file, err_msg)) {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    /* XXX HOOK: Set logging to stderr */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc) {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize Filesystem access */\n    rc = InitFS();\n    if (rc)\n        exit(rc);\n\n    /* Initialize status managers (XXX all or just the one used for undelete?)\n     */\n    rc = smi_init_all(0);\n    if (rc)\n        exit(rc);\n\n    /* load the status manager */\n    if (!EMPTY_STRING(sm_name)) {\n        rc = load_smi(sm_name, &smi);\n        if (rc)\n            exit(rc);\n    } else {\n        /* if there is a single smi that allows undelete, use it */\n        rc = load_single_smi(&smi);\n        if (rc)\n            exit(rc);\n    }\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(0);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error initializing list manager: %s (%d)\",\n                   lmgr_err2str(rc), rc);\n        exit(rc);\n    }\n    DisplayLog(LVL_DEBUG, LOGTAG, \"ListManager successfully initialized\");\n\n    if (CheckLastFS() != 0)\n        exit(1);\n\n    /* Create database access */\n    rc = ListMgr_InitAccess(&lmgr);\n    if (rc) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Error %d: cannot connect to database\",\n                   rc);\n        exit(rc);\n    }\n\n    if (!has_deletion_policy()) {\n        DisplayLog(LVL_CRIT, LOGTAG, \"Unsupported action: no defined policy \"\n                   \"manages deleted files\");\n        exit(ENOTSUP);\n    }\n\n    /* perform the action */\n    switch (action) {\n    case ACTION_LIST:\n        rc = list_rm();\n        break;\n    case ACTION_RESTORE:\n        rc = undelete();\n        break;\n    case ACTION_NONE:\n        display_help(bin);\n        rc = 1;\n        break;\n    default:\n        fprintf(stderr, \"Unexpected action (action code=%#x)\\n\", action);\n        display_help(bin);\n        rc = EINVAL;\n        break;\n    }\n\n    ListMgr_CloseAccess(&lmgr);\n\n    return rc;\n}\n"
  },
  {
    "path": "src/tests/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\nAM_LDFLAGS= -lpthread\n\n# See autotools/m4/ax_valgrind_check.m4 for documentation\n@VALGRIND_CHECK_RULES@\n#VALGRIND_SUPPRESSIONS_FILES = my-project.supp\n#EXTRA_DIST = my-project.supp\n\ncheck_PROGRAMS=test_uidgidcache test_params \\\n    test_confparam test_parse\nif LUSTRE\ncheck_PROGRAMS+=create_nostripe test_forcestripe\nendif\nTESTS=test_parsing.sh test_uidgidcache test_params test_confparam\n\nnoinst_PROGRAMS=$(check_PROGRAMS)\n\ntest_forcestripe_LDADD=$(DB_LDFLAGS) $(PURPOSE_LDFLAGS) $(FS_LDFLAGS)\n\ntest_uidgidcache_SOURCES=test_uidgidcache.c\ntest_uidgidcache_LDADD=../common/libcommontools.la\ntest_params_SOURCES=test_params.c\ntest_params_LDADD=../common/libcommontools.la\ntest_confparam_SOURCES=test_confparam.c ../common/param_utils.c\ntest_confparam_LDFLAGS=$(DB_LDFLAGS) $(PURPOSE_LDFLAGS) $(FS_LDFLAGS)\ntest_confparam_LDADD=../policies/libpolicies.la ../common/libcommontools.la\ntest_parse_SOURCES\t    = test_parse.c\ntest_parse_LDADD         =  ../cfg_parsing/libconfigparsing.la\n\n\nindent:\n\t$(top_srcdir)/scripts/indent.sh\n"
  },
  {
    "path": "src/tests/create_nostripe.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <sys/types.h>\n#include <asm/types.h>\n#include <errno.h>\n#include <unistd.h>\n\n#include \"lustre_extended_types.h\"\n\n\n\nint main(int argc, char ** argv)\n{\n    int fd = open(argv[1], O_CREAT | O_LOV_DELAY_CREATE, 0644);\n    if (fd < 0)\n        fprintf(stderr, \"open error: %s\", strerror(errno));\n    else\n    {\n        printf (\"open OK\\n\");\n        close(fd);\n    }\n}\n"
  },
  {
    "path": "src/tests/path.sql",
    "content": "DROP FUNCTION IF EXISTS one_path;\nDELIMITER //\nCREATE FUNCTION one_path(param VARCHAR(128)) RETURNS VARCHAR(1024) READS SQL DATA\nBEGIN\n    DECLARE p VARCHAR(1024) DEFAULT NULL;\n    DECLARE pid VARCHAR(128) DEFAULT NULL;\n    DECLARE n VARCHAR(256) DEFAULT NULL;\n    -- return '/p' when not found\n    DECLARE EXIT HANDLER FOR NOT FOUND RETURN p;\n    SELECT parent_id, name INTO pid, p from NAMES where id=param LIMIT 1;\n    LOOP\n        SELECT parent_id, name INTO pid, n from NAMES where id=pid ;\n        SELECT CONCAT( n, '/', p) INTO p;\n    END LOOP;\nEND//\n\nDROP FUNCTION IF EXISTS this_path;\nDELIMITER //\nCREATE FUNCTION this_path(pid_arg VARCHAR(128), n_arg VARCHAR(256)) RETURNS VARCHAR(1024) READS SQL DATA\nBEGIN\n    DECLARE p VARCHAR(1024) DEFAULT NULL;\n    DECLARE pid VARCHAR(128) DEFAULT NULL;\n    DECLARE n VARCHAR(256) DEFAULT NULL;\n    -- return '/p' when not found\n    DECLARE EXIT HANDLER FOR NOT FOUND RETURN p;\n    SET pid=pid_arg;\n    SET p=n_arg;\n    LOOP\n        SELECT parent_id, name INTO pid, n from NAMES where id=pid ;\n        SELECT CONCAT( n, '/', p) INTO p;\n    END LOOP;\nEND//\n\nDROP PROCEDURE IF EXISTS all_paths;\nCREATE PROCEDURE all_paths(IN param VARCHAR(128)) READS SQL DATA\nBEGIN\n    DECLARE p VARCHAR(1024) DEFAULT '';\n    DECLARE pid VARCHAR(128) DEFAULT '';\n    DECLARE n VARCHAR(256) DEFAULT '';\n    DECLARE nb INT;\n    DECLARE cur CURSOR FOR SELECT parent_id, name from NAMES where id=param;\n    DECLARE CONTINUE HANDLER FOR NOT FOUND SET nb=0;\n    OPEN cur;\n    -- loop on all hardlinks to an object\n    alllinks: LOOP \n        SET nb=1;\n        FETCH cur INTO pid, p;\n--        SELECT nb, pid, p;\n        IF nb=0 THEN \n--            SELECT 'EXIT' as 'MSG:';\n            LEAVE alllinks;\n        END IF;\n        WHILE nb > 0 DO\n            SELECT parent_id, name INTO pid, n from NAMES where id=pid LIMIT 1;\n            SELECT FOUND_ROWS() INTO nb;\n            IF nb > 0 THEN\n                SELECT CONCAT( n, '/', p) INTO p;\n            END IF;\n        END WHILE;\n        SELECT p AS paths;\n\n    END LOOP alllinks;\n    CLOSE cur;\n\nEND//\n\nSELECT one_path(id), type, size from ENTRIES WHERE id='286A47DD:1000D';\nSELECT one_path('toto');\nSELECT one_path('286A47DD:1000D');\n\n-- hardlink: 286A47DD:1000D\nCALL all_paths('286A47DD:1000D');\n"
  },
  {
    "path": "src/tests/test_confparam.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright 2015 Cray Inc., All rights reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n#include \"rbh_params.h\"\n#include \"rbh_logs.h\"\n#include \"global_config.h\"\n#include \"rbh_misc.h\"\n\n/* avoid linking with all robinhood libs */\nlog_config_t log_config = { .debug_level = LVL_DEBUG };\nglobal_config_t global_config = { .fs_path = \"somefspath\" };\n\nvoid DisplayLogFn(log_level debug_level, const char *tag, const char *format, ...)\n{\n    if (LVL_DEBUG >= debug_level)\n    {\n        va_list args;\n\n        va_start(args, format);\n        vprintf(format, args);\n        va_end(args);\n        printf(\"\\n\");\n    }\n}\n\nconst char *config_file_path(void) { return \"someconfigfile\"; }\nconst char *get_fsname(void) { return \"somefsname\"; }\n\n/* dummy ListMgr_PrintAttrPtr() function: avoid linking with all libs */\nint ListMgr_PrintAttrPtr(GString *str, db_type_e type,\n                         void *value_ptr, const char *quote)\n{\n    g_string_printf(str, \"%p\", value_ptr);\n    return 0;\n}\n\n/* Will substitute values in first column with values in second\n * column. */\nstatic const char *vars[] = {\n    \"foo\", \"barbar\",\n    \"hello\", \"bye\",\n    \"marco\", \"polo\",\n    \"double\", \"{marco}\",\n    \"triple\", \"{double}\",\n    \"explorer\", \"marco\",\n    \"quote1\", \"a'b\",\n    NULL, NULL\n};\n\nstatic const char *find_vars[] = {\n    \"\", \"somepath\",\n    NULL, NULL\n};\n\nstatic const char descr[] = \"my test string\";\n\nstatic inline void assert_str_equal(const char *s1, const char *s2)\n{\n    if (!strcmp(s1, s2))\n        return;\n\n    fprintf(stderr, \"'%s' differs from '%s'\\n\", s1, s2);\n    abort();\n}\n\n/** test with braces in strict mode and non-strict mode\n * @param cmd the   original string\n * @param weak_res  result for weak mode (if different from cmd).\n */\nstatic void test_braces(const char *cmd, const char* weak_res)\n{\n    char *newcmd;\n\n    /* strict braces mode: should be an error */\n    newcmd = subst_params(cmd, descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* weak braces mode: no error */\n    newcmd = subst_params(cmd, descr, NULL, NULL, NULL, vars, NULL, false, false);\n    assert_str_equal(newcmd, weak_res ? weak_res : cmd);\n    g_free(newcmd);\n}\n\nstatic void test_subst_params(void)\n{\n    char       *newcmd;\n    const char *cmd;\n    int         rc;\n    const attr_set_t attrs = {\n        .attr_mask = {.std = ATTR_MASK_name | ATTR_MASK_fullpath},\n        .attr_values = {\n            .name = \"somename\",\n            .fullpath = \"somepath\",\n        }\n    };\n#ifdef _HAVE_FID\n    /* oid, seq, ver */\n    entry_id_t id = { 0x1234, 0x5678, 0xabcd };\n    const char fid_str[] = \"0x1234:0x5678:0xabcd\";\n#else\n    /* fskey, inode */\n    entry_id_t id = { 0x8BC1, 12345 };\n    const char fid_str[] = \"8BC1/12345\";\n#endif\n    struct rbh_params params = { 0 };\n\n    /* convert the list to user params */\n    rc = rbh_list2params(&params, vars, true);\n    assert(rc == 0);\n\n    /*\n     * Basic checks. No variable.\n     */\n\n    /* Empty string */\n    newcmd = subst_params(\"\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    if (strcmp(newcmd, \"\"))\n        abort();\n    g_free(newcmd);\n\n    /* Nothing interresting */\n    cmd = \"hello\";\n    newcmd = subst_params(cmd, descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, cmd);\n    g_free(newcmd);\n\n    /* Empty variable */\n    /* should fail if empty string is not a param */\n    newcmd = subst_params(\"{}\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* case of 'rbh-find': {} is expended to path */\n    newcmd = subst_params(\"{}\", descr, NULL, NULL, NULL, find_vars, NULL, false, true);\n    assert_str_equal(newcmd, \"somepath\");\n    g_free(newcmd);\n\n    /* One unknown variable */\n    newcmd = subst_params(\"{hello}\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* One unknown variable with text before */\n    newcmd = subst_params(\"qwerty{hello}\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* One unknown variable with text after */\n    newcmd = subst_params(\"{hello}cvbn\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* One unknown variable with text around */\n    newcmd = subst_params(\"qwerty{hello}cvbn\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* two unknown variables */\n    newcmd = subst_params(\"{azerty}{hello}\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* two unknown variables with text around */\n    newcmd = subst_params(\"jgds{azerty}lgkfhd{hello}iub\", descr, NULL, NULL, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* string with lone { */\n    test_braces(\"qwerty{hellocvbn\", NULL);\n\n    /* string with lone } */\n    test_braces(\"qwerty}hellocvbn\", NULL);\n\n    /* string with 2 { and 1 } (std variable) */\n    test_braces(\"qwerty{{fsroot}cvbn\", \"qwerty{somefspathcvbn\");\n\n    /* string with 2 { */\n    test_braces(\"qwerty{{hellocvbn\", NULL);\n\n    /* string with 2 { */\n    test_braces(\"qwerty{ghfd{hellocvbn\", NULL);\n\n    /* string with inverted {} */\n    test_braces(\"qwerty}ghfd{hellocvbn\", NULL);\n\n    /* string with 2 { and 1 } (additional variable) */\n    test_braces(\"qwerty{ghfd{hello}cvbn\", \"qwerty{ghfdbyecvbn\");\n\n    /* string with 1 { and 2 }} (additional variable) */\n    test_braces(\"qwertyghfd{hello}}cvbn\", \"qwertyghfdbye}cvbn\");\n\n    /* string with } { and } (additional variable) */\n    test_braces(\"qwerty}ghfd{hello}cvbn\", \"qwerty}ghfdbyecvbn\");\n\n\n    /*\n     * With standard variables\n     */\n\n    /* One standard variable */\n    newcmd = subst_params(\"{name}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, attrs.attr_values.name);\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{fid}\", \"\", &id, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, fid_str);\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{fsname}\", \"\", &id, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"somefsname\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{fsroot}\", \"\", &id, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"somefspath\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{cfg}\", \"\", &id, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"someconfigfile\");\n    g_free(newcmd);\n\n    /* try to resolve fid without passing id argument */\n    newcmd = subst_params(\"{fid}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* try to resolve missing attribute */\n    newcmd = subst_params(\"{ost_pool}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* Partial standard variable name */\n    newcmd = subst_params(\"{nam}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* Standard variable name with an extra letter */\n    newcmd = subst_params(\"{namee}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* Two standard variables */\n    newcmd = subst_params(\"{name} {fullpath}\", descr, NULL, &attrs, NULL, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"somename somepath\");\n    g_free(newcmd);\n\n    /*\n     * With some real variables (additional params).\n     */\n\n    /* Simple replacement */\n    newcmd = subst_params(\"{foo}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"barbar\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{hello}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"bye\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{marco}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"polo\");\n    g_free(newcmd);\n\n    /*\n     * With some real variables (user params).\n     */\n    /* Simple replacement */\n    newcmd = subst_params(\"{foo}\", descr, NULL, NULL, &params, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"barbar\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{hello}\", descr, NULL, NULL, &params, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"bye\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{marco}\", descr, NULL, NULL, &params, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"polo\");\n    g_free(newcmd);\n\n    /* With quotes */\n    newcmd = subst_params(\"{foo}\", descr, NULL, NULL, NULL, vars, NULL, true, true);\n    assert_str_equal(newcmd, \"'barbar'\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{quote1}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"a'b\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{quote1}\", descr, NULL, NULL, NULL, vars, NULL, true, true);\n    assert_str_equal(newcmd, \"'a'\\\\''b'\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"az {quote1}\", descr, NULL, NULL, NULL, vars, NULL, true, true);\n    assert_str_equal(newcmd, \"az 'a'\\\\''b'\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"{quote1} sx\", descr, NULL, NULL, NULL, vars, NULL, true, true);\n    assert_str_equal(newcmd, \"'a'\\\\''b' sx\");\n    g_free(newcmd);\n\n    newcmd = subst_params(\"az {quote1} sx\", descr, NULL, NULL, NULL, vars, NULL, true, true);\n    assert_str_equal(newcmd, \"az 'a'\\\\''b' sx\");\n    g_free(newcmd);\n\n    /* Non-existent variable in first column, but present in 2nd column */\n    newcmd = subst_params(\"{barbar}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert(newcmd == NULL);\n\n    newcmd = subst_params(\"{bye}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert(newcmd == NULL);\n\n    newcmd = subst_params(\"{polo}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert(newcmd == NULL);\n\n    /* 2 variables */\n    newcmd = subst_params(\"{foo} {hello}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"barbar bye\");\n    g_free(newcmd);\n\n    /* twice the same variable */\n    newcmd = subst_params(\"{hello}{hello}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"byebye\");\n    g_free(newcmd);\n\n    /* 3 variables */\n    newcmd = subst_params(\"{marco}{hello}{foo}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"polobyebarbar\");\n    g_free(newcmd);\n\n    /* 2 + 3 variables */\n    newcmd = subst_params(\"A{marco} {hello} {marco}d{hello}w{hello}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"Apolo bye polodbyewbye\");\n    g_free(newcmd);\n\n    /* 5 times the same variable */\n    newcmd = subst_params(\"{marco}{marco}{marco}{marco}{marco}\", descr, NULL, NULL, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"polopolopolopolopolo\");\n    g_free(newcmd);\n\n    /*\n     * Mix between standard and additional variables\n     */\n\n    /* One of each */\n    newcmd = subst_params(\"{marco} {fullpath}\", descr, NULL, &attrs, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"polo somepath\");\n    g_free(newcmd);\n\n    /* Several of each */\n    newcmd = subst_params(\"{marco} {fullpath} {hello} {name} \", descr, NULL, &attrs, NULL, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"polo somepath bye somename \");\n    g_free(newcmd);\n\n    /*\n     * Mix between standard and user variables\n     */\n\n    /* One of each */\n    newcmd = subst_params(\"{marco} {fullpath}\", descr, NULL, &attrs, &params, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"polo somepath\");\n    g_free(newcmd);\n\n    /* Several of each */\n    newcmd = subst_params(\"{marco} {fullpath} {hello} {name} \", descr, NULL, &attrs, &params, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"polo somepath bye somename \");\n    g_free(newcmd);\n\n    /* test user params priority */\n    struct rbh_params p = {0};\n\n    rc = rbh_param_set(&p, \"fid\", \"override\", true);\n    assert(rc == 0);\n    rc = rbh_param_set(&p, \"hello\", \"goodbye\", true);\n    assert(rc == 0);\n\n    /* user params priority on std params */\n    newcmd = subst_params(\"{fid}\", descr, &id, &attrs, &p, NULL, NULL, false, true);\n    assert_str_equal(newcmd, \"override\");\n    g_free(newcmd);\n\n    /* user params priority on additional params */\n    newcmd = subst_params(\"{hello}\", descr, &id, &attrs, &p, vars, NULL, false, true);\n    assert_str_equal(newcmd, \"goodbye\");\n    g_free(newcmd);\n\n    rbh_params_free(&p);\n\n#if 0\n    /*\n     * Multiple substitutions\n     */\n\n    /* \"{triple}\" -> \"{double}\" --> \"{marco}\" --> \"polo\" */\n    /* TODO: is it an abuse -- should that work? Is it something we want to work? */\n    newcmd = subst_params(\"{triple} \", descr, NULL, &attrs, NULL, vars, NULL, false);\n    assert_str_equal(newcmd, \"polo\");\n    g_free(newcmd);\n#endif\n\n#if 0\n    /* \"{{explorer}}\" -> \"{marco}\" --> \"polo\" */\n    /* TODO: is it an abuse -- should that work? Is it something we want to work? */\n    newcmd = subst_params(\"{{explorer}} \", descr, NULL, &attrs, NULL, vars, NULL, false);\n    assert_str_equal(newcmd, \"polo\");\n    g_free(newcmd);\n#endif\n\n    rbh_params_free(&params);\n}\n\nstatic void test_param_mask(void)\n{\n    attr_mask_t newmask;\n    bool        err = false;\n\n    /* no variable */\n    newmask = params_mask(\"\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /* Empty variable */\n    newmask = params_mask(\"{}\", descr, &err);\n    assert(err);\n\n    /* Known std parameter */\n    newmask = params_mask(\"{name}\", descr, &err);\n    assert(!err && newmask.std == ATTR_MASK_name);\n\n    /* Known std parameter but not an attribute */\n    newmask = params_mask(\"{fid}\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /* Unknown parameter */\n    newmask = params_mask(\"{marco}\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /*\n     * With extra parameters\n     */\n\n    /* no variable */\n    newmask = params_mask(\"\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /* Empty variable */\n    newmask = params_mask(\"{}\", descr, &err);\n    assert(err);\n\n    /* Known std parameter */\n    newmask = params_mask(\"{name}\", descr, &err);\n    assert(!err && newmask.std == ATTR_MASK_name);\n\n    /* Known std parameter but not an attribute */\n    newmask = params_mask(\"{fid}\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /* extra parameter */\n    newmask = params_mask(\"{marco}\", descr, &err);\n    assert(!err && attr_mask_is_null(newmask));\n\n    /* twice the same variable */\n    newmask = params_mask(\"{fullpath}{fullpath}\", descr, &err);\n    assert(!err && newmask.std == ATTR_MASK_fullpath);\n\n    /* 2 variables */\n    newmask = params_mask(\"wertyu{fullpath}fghj {name}\", descr, &err);\n    assert(!err && (newmask.std == (ATTR_MASK_fullpath | ATTR_MASK_name)));\n\n    /* 1 known variable and 1 unknown */\n    newmask = params_mask(\"wertyu{fullpath}fghj {namee}\", descr, &err);\n    assert(!err && newmask.std == ATTR_MASK_fullpath);\n\n    /* 5 mixed variables */\n    newmask = params_mask(\"{hello}wertyu{fullpath}{marco}fghj {name}{fid}\",\n                          descr, &err);\n    assert(!err && (newmask.std == (ATTR_MASK_fullpath | ATTR_MASK_name)));\n}\n\nint main(int argc, char **argv)\n{\n    test_subst_params();\n    test_param_mask();\n\n    printf(\"good\\n\");\n\n    return 0;\n}\n"
  },
  {
    "path": "src/tests/test_forcestripe.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <sys/types.h>\n#include <asm/types.h>\n#include <errno.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <linux/limits.h>\n#include <sys/xattr.h>\n\n#include \"lustre_extended_types.h\"\n\nstatic inline int lum_size(struct lov_user_md *p_lum)\n{\n    switch(p_lum->lmm_magic) {\n        case LOV_USER_MAGIC_V1:\n            return (sizeof(struct lov_user_md_v1) + p_lum->lmm_stripe_count * sizeof(struct lov_user_ost_data_v1));\n        case LOV_USER_MAGIC_V3:\n            return (sizeof(struct lov_user_md_v3) + p_lum->lmm_stripe_count * sizeof(struct lov_user_ost_data_v1));\n    }\n    return 0;\n}\n\n\nint main(int argc, char **argv)\n{\n    int            rc, fd;\n\tconst char *file1;\n\tchar file_new[PATH_MAX];\n    char           lum_buffer[4096];\n    struct lov_user_md *p_lum = ( struct lov_user_md * ) lum_buffer;\n\n\tif (argc < 2)\n\t{\n\t\tfprintf(stderr, \"usage: %s <file>\\n\", argv[0]);\n\t\texit(1);\n\t}\n    file1 = argv[1];\n\n\t/* get stripe from file 1 */\n\n    memset( lum_buffer, 0, sizeof( lum_buffer ) );\n    rc = llapi_file_get_stripe(file1, p_lum);\n    if (rc)\n    {\n        fprintf(stderr, \"llapi_file_get_stripe error: %s\\n\", strerror(-rc));\n        exit(-rc);\n    }\n\n\t/* set stripe to new file 2 (created with no stripe) */\n    sprintf(file_new, \"%s.create_nostripe\", file1);\n    fd = open(file_new, O_CREAT | O_LOV_DELAY_CREATE, 0644);\n    if (fd < 0) {\n        fprintf(stderr, \"open error: %s\", strerror(errno));\n        exit(errno);\n    }\n\n    rc = fsetxattr(fd, \"lustre.lov\", (void*)p_lum, lum_size(p_lum), 0 /* create or replace */);\n    if (rc) {\n        fprintf(stderr, \"fsetxattr error: %s\", strerror(errno));\n        exit(errno);\n    }\n\n\t/* set stripe to file 3 (striped) */\n}\n"
  },
  {
    "path": "src/tests/test_params.c",
    "content": "#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"global_config.h\"\nglobal_config_t global_config;\n\n#include \"rbh_params.h\"\n#include \"rbh_logs.h\"\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n/* avoid linking with all robinhood libs */\nlog_config_t log_config = { .debug_level = LVL_DEBUG };\n\nvoid DisplayLogFn(log_level debug_level, const char *tag, const char *format, ...)\n{\n    if (LVL_DEBUG >= debug_level)\n    {\n        va_list args;\n\n        va_start(args, format);\n        vprintf(format, args);\n        va_end(args);\n        printf(\"\\n\");\n    }\n}\n\nint main(int argc, char **argv)\n{\n    /* exclude lists */\n    const char *excl1[] = { \"foo\", NULL };\n    const char *excl2[] = { \"program\", NULL };\n    const char *excl3[] = { \"foo\", \"program\", NULL };\n\n    struct rbh_params p = {0};\n    struct rbh_params e = {0};\n    GString *dump = g_string_new(NULL);\n    int i;\n\n    /* new param, don't override */\n    if (rbh_param_set(&p, \"foo\", \"bar\", false))\n        abort();\n    /* new param, override */\n    if (rbh_param_set(&p, \"toto\", \"tutu\", true))\n        abort();\n    /* override existing */\n    if (rbh_param_set(&p, \"toto\", \"tata\", true))\n        abort();\n    /* new value must be 'tata' */\n    if (strcmp(rbh_param_get(&p, \"toto\"), \"tata\"))\n        abort();\n    /* override should fail */\n    if (!rbh_param_set(&p, \"foo\", \"truc\", false))\n        abort();\n    /* value must be 'bar' */\n    if (strcmp(rbh_param_get(&p, \"foo\"), \"bar\"))\n        abort();\n    /* set other test values */\n    if (rbh_param_set(&p, \"program\", argv[0], false))\n        abort();\n    if (rbh_param_set(&p, \"XYZ\", \"ABC\", false))\n        abort();\n\n    /* dump to CSV */\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    printf(\"%s\\n\", dump->str);\n    /* compact: no space expected */\n    if (strchr(dump->str, ' '))\n        abort();\n    g_string_assign(dump, \"\");\n\n    /* dump to CSV (non compact) */\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV))\n        abort();\n    printf(\"%s\\n\", dump->str);\n    /* compact: space expected after comma */\n    if (!strstr(dump->str, \", \"))\n        abort();\n    g_string_assign(dump, \"\");\n\n    /* use exclude lists */\n    if (rbh_list2params(&e, excl1, false))\n        abort();\n    if (rbh_params_serialize(&p, dump, &e, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    printf(\"%s\\n\", dump->str);\n    /* foo unexpected */\n    if (strstr(dump->str, \"foo\"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&e);\n\n    if (rbh_list2params(&e, excl2, false))\n        abort();\n    if (rbh_params_serialize(&p, dump, &e, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    printf(\"%s\\n\", dump->str);\n    /* program unexpected */\n    if (strstr(dump->str, \"program\"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&e);\n\n    if (rbh_list2params(&e, excl3, false))\n        abort();\n    if (rbh_params_serialize(&p, dump, &e, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    printf(\"%s\\n\", dump->str);\n    /* foo and program unexpected */\n    if (strstr(dump->str, \"foo\"))\n        abort();\n    if (strstr(dump->str, \"program\"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&e);\n\n    /* dump to CSV with a comma in values (comma should be escaped) */\n    if (rbh_param_set(&p, \"key\", \"val,ue\", false))\n        abort();\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    if (!strstr(dump->str, \"\\\\,\"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&p);\n\n    /* dump to CSV with a comma in key (comma should be escaped) */\n    if (rbh_param_set(&p, \"ke,y\", \"value\", false))\n        abort();\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    if (!strstr(dump->str, \"\\\\,\"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&p);\n\n    /* stress tests */\n    for (i = 0; i < 10000; i++) {\n        if (rbh_param_set(&p, \"foo\", \"bar\", true))\n            abort();\n    }\n    rbh_params_free(&p);\n\n    for (i = 0; i < 10000; i++) {\n        char key[100];\n        char val[100];\n\n        sprintf(key, \"key%d\", i);\n        sprintf(val, \"bar%d\", i);\n        if (rbh_param_set(&p, key, val, true))\n            abort();\n        if ((i - 1) % 1000 == 0)\n            printf(\"set %u keys\\n\", i - 1);\n    }\n\n    /* test all values */\n    for (i = 9999; i >= 0; i--) {\n        char key[100];\n        char val[100];\n\n        sprintf(key, \"key%d\", i);\n        sprintf(val, \"bar%d\", i);\n        if (strcmp(rbh_param_get(&p, key), val))\n            abort();\n        if ((9999 - i) % 1000 == 0)\n            printf(\"verified %u keys\\n\", 9999 - i);\n    }\n\n    /* dump to CSV */\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV | RBH_PARAM_COMPACT))\n        abort();\n    /* truncate output at 1024 char */\n    printf(\"%.*s...\\n\", 1024, dump->str);\n    /* compact: no space expected */\n    if (strchr(dump->str, ' '))\n        abort();\n    g_string_assign(dump, \"\");\n\n    /* dump to CSV (non compact) */\n    if (rbh_params_serialize(&p, dump, NULL, RBH_PARAM_CSV))\n        abort();\n    /* truncate output at 1024 char */\n    printf(\"%.*s...\\n\", 1024, dump->str);\n    /* compact: space expected after comma */\n    if (!strstr(dump->str, \", \"))\n        abort();\n    g_string_assign(dump, \"\");\n    rbh_params_free(&p);\n\n    g_string_free(dump, TRUE);\n    exit(0);\n}\n"
  },
  {
    "path": "src/tests/test_parse.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009,2015 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"config_parsing.h\"\n#include <errno.h>\n\nint main(int argc, char **argv)\n{\n    config_file_t  config;\n    char          *file;\n    char          *errtxt;\n\n    if ((argc > 1) && (argv[1]))\n    {\n        file = argv[1];\n    }\n    else\n    {\n        fprintf(stderr, \"Usage %s <config_file>\\n\", argv[0]);\n        exit(EINVAL);\n    }\n\n    /* Example of parsing */\n    config = rh_config_ParseFile(file);\n\n    printf(\"config_pointer = %p\\n\", config);\n\n    if (config == NULL)\n    {\n        errtxt = rh_config_GetErrorMsg();\n        fprintf(stderr, \"Error parsing %s: %s\\n\", argv[1], errtxt);\n        exit(EINVAL);\n    }\n\n    rh_config_Print(stdout, config);\n\n    printf(\"Brownsing configuration:\\n\");\n\n    int            i;\n    char          *val_a;\n    config_item_t  block, item;\n\n    for (i = 0; i < rh_config_GetNbBlocks(config); i++)\n    {\n\n        int            j;\n        char          *name;\n        char          *val;\n        bool           uniq = true;\n\n        /* display item name */\n        block = rh_config_GetBlockByIndex(config, i);\n\n        printf(\"block %s\\n\", rh_config_GetBlockName(block));\n\n        if ((val_a = rh_config_GetKeyValueByName(block, \"fs_path\", &uniq)) != NULL)\n        {\n            printf(\"%s.fs_path is defined and is %s: %s\\n\",\n                   rh_config_GetBlockName(block), uniq?\"unique\":\"not unique\", val_a);\n        }\n        else\n        {\n            printf(\"%s.fs_path is not defined\\n\",\n                   rh_config_GetBlockName(block));\n        }\n\n        /* browse block variables */\n        for (j = 0; j < rh_config_GetNbItems(block); j++)\n        {\n            int            extra_args;\n            char         **extra_arg_tab;\n            int            k;\n\n            item = rh_config_GetItemByIndex(block, j);\n\n            if (rh_config_ItemType(item) == CONFIG_ITEM_VAR)\n            {\n                rh_config_GetKeyValue(item, &name, &val, &extra_args);\n                if (extra_args)\n                {\n                    printf(\"> %s.%s = %s (\", rh_config_GetBlockName(block),\n                           name, val);\n                    extra_args = rh_config_GetExtraArgs(item, &extra_arg_tab);\n                    for (k = 0; k < extra_args; k++)\n                    {\n                        if (k == 0)\n                            printf(\"%s\", extra_arg_tab[k]);\n                        else\n                            printf(\", %s\", extra_arg_tab[k]);\n                    }\n                    printf(\")\\n\");\n                }\n                else\n                    printf(\"> %s.%s = %s\\n\", rh_config_GetBlockName(block),\n                           name, val);\n            }\n            else if (rh_config_ItemType(item) == CONFIG_ITEM_BLOCK)\n                printf(\"\\tsub-block = %s\\n\", rh_config_GetBlockName(item));\n            else\n                printf(\"\\tcomplex boolean expression\\n\");\n        }\n        printf(\"\\n\");\n    }\n\n    /* free and reload the file */\n    rh_config_Free(config);\n\n    config = rh_config_ParseFile(file);\n\n    printf(\"config_pointer = %p\\n\", config);\n\n    if (config == NULL)\n    {\n        errtxt = rh_config_GetErrorMsg();\n        fprintf(stderr, \"Error parsing %s twice: %s\\n\", argv[1], errtxt);\n        exit(EINVAL);\n    }\n\n    rh_config_Print(stdout, config);\n    rh_config_Free(config);\n\n    exit(0);\n}\n"
  },
  {
    "path": "src/tests/test_parsing.sh",
    "content": "#!/bin/bash\n\ndir=$(dirname $0)\ntstdir=$dir/tst.data\n\nfunction error\n{\n    echo \"ERROR: $*\" >&2\n    exit 1\n}\n\nfunction test_out\n{\n    grep \"$1\" $2 || error \"pattern not found: $1\"\n}\n\n$dir/test_parse $tstdir/bad.conf > /dev/null 2> /dev/null && error \"Parsing should fail\"\n$dir/test_parse $tstdir/ok.conf > /dev/null || error \"Parsing failed\"\n\n# check various parsing features (unicity, env variables, includes...)\nout=/tmp/tst.$$\nTEST_VAL=XYZ FILE_INC2=inc2.inc ./test_parse tst.data/test.conf > $out\n\ntest_out \"block1.fs_path is defined and is unique\" $out\ntest_out \"block2.fs_path is defined and is not unique\" $out \ntest_out \"block3.fs_path is not defined\" $out\ntest_out \"block4.env = XYZ\" $out\ntest_out \"block_inc1.include_var = 42\" $out\ntest_out \"block_inc2.var_env = XYZ\" $out\ntest_out \"block_inc3.subsubfile = yes\" $out\n\nrm -f $out\ntrue\n"
  },
  {
    "path": "src/tests/test_uidgidcache.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n/**\n *\n * \\file    $RCSfile: testuidgidcache.c,v $\n * \\author  $Author: leibovic $\n * \\date    $Date: 2008/02/15 10:37:38 $\n * \\brief   Cache user and groups relatives information.\n *\n * Cache user and groups relative information.\n *\n *\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"uidgidcache.h\"\n#include \"rbh_logs.h\"\n\n#include <stdio.h>\n#include <sys/time.h>\n#include <assert.h>\n#include <string.h>\n\n/* Overwrite getpwuid_r and getgrgid_r as used by the UID/GID cache,\n * so we can feed many more ids than present in the system. This test\n * and the cache only care about the names and the UID/GID, so don't\n * fill the rest of the structures. */\n#define MAX_UID 100000\nint getpwuid_r(uid_t uid, struct passwd *pwd,\n               char *buf, size_t buflen, struct passwd **result)\n{\n    pwd->pw_uid = uid;\n    sprintf(buf, \"%ld\", (long)uid);\n    pwd->pw_name = buf;\n\n    if (uid >= MAX_UID)\n        *result = NULL;\n    else\n        *result = pwd;\n\n    return 0;\n}\n\n#define MAX_GID 100000\nint getgrgid_r(gid_t gid, struct group *grp,\n               char *buf, size_t buflen, struct group **result)\n{\n    grp->gr_gid = gid;\n    sprintf(buf, \"%ld\", (long)gid);\n    grp->gr_name = buf;\n\n    if (gid >= MAX_GID)\n        *result = NULL;\n    else\n        *result = grp;\n\n    return 0;\n}\n\n/* avoid linking with all robinhood libs */\nlog_config_t log_config = { .debug_level = LVL_DEBUG };\n\nvoid DisplayLogFn(log_level debug_level, const char *tag, const char *format, ...)\n{\n    if (LVL_DEBUG >= debug_level)\n    {\n        va_list args;\n\n        va_start(args, format);\n        vprintf(format, args);\n        va_end(args);\n        printf(\"\\n\");\n    }\n}\n\nint main(int argc, char **argv)\n{\n    unsigned int   i;\n    uid_t          u;\n    gid_t          g;\n    unsigned int   c;\n    struct timeval tinit, tcurr, tdiff, tlast = {0};\n    struct timeval tref_u, tref_g = {0};\n    float ratio;\n\n    InitUidGid_Cache();\n\n    printf(\"Reference test of getpwuid (%u items)\\n\", MAX_UID);\n    gettimeofday(&tinit, NULL);\n    for (i = 0; i <= MAX_UID/10; i++)\n        for (u = 0; u < 10; u++)\n            getpwuid(u);\n    gettimeofday(&tcurr, NULL);\n    timersub(&tcurr, &tinit, &tdiff);\n    tref_u = tdiff;\n    printf(\"Elapsed time: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n           MAX_UID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n\n    printf(\"\\nReference test of getgrgid (%u items)\\n\", MAX_GID);\n    gettimeofday(&tinit, NULL);\n    for (i = 0; i <= MAX_GID/10; i++)\n        for (u = 0; u < 10; u++)\n            getgrgid(u);\n    gettimeofday(&tcurr, NULL);\n    timersub(&tcurr, &tinit, &tdiff);\n    tref_g = tdiff;\n    printf(\"Elapsed time: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n           MAX_GID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n\n    printf(\"\\nTest of password cache\\n\");\n\n    gettimeofday(&tinit, NULL);\n\n    for (i = 0; i <= 10; i++)\n    {\n        c = 0;\n        for (u = 0; u < MAX_UID; u++)\n        {\n            const struct passwd *ppw;\n\n            ppw = GetPwUid(u);\n            if (ppw)\n                c++;\n        }\n        gettimeofday(&tcurr, NULL);\n        timersub(&tcurr, &tinit, &tdiff);\n        if (i == 0)\n            tlast = tdiff;\n        printf(\"loop %u, %u items: %lu.%06lu\\n\", i, c, tdiff.tv_sec, tdiff.tv_usec);\n        if (i == 0)\n        {\n           printf(\"  Insertion rate: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n                  MAX_UID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n        }\n        else if (i == 1)\n        {\n            printf(\"  Elapsed time: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n                   MAX_UID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n\n            ratio = (tlast.tv_sec*1000000.0 + tlast.tv_usec)/(tdiff.tv_sec*1000000.0 + tdiff.tv_usec);\n            printf(\"  SPEED-UP (vs insert): x%.2f\\n\", ratio);\n\n            ratio = (tref_u.tv_sec*1000000.0 + tref_u.tv_usec)/(tdiff.tv_sec*1000000.0 + tdiff.tv_usec);\n            printf(\"  SPEED-UP (vs ref): x%.2f\\n\", ratio);\n        }\n        tinit = tcurr;\n    }\n\n    /* Now, check that the values returned are correct */\n    for (u = 0; u < MAX_UID; u++)\n    {\n        const struct passwd *ppw;\n        char buf[50];\n\n        ppw = GetPwUid(u);\n        assert(ppw != NULL);\n        assert(ppw->pw_uid == u);\n\n        sprintf(buf, \"%ld\", (long)u);\n        assert(strcmp(ppw->pw_name, buf) == 0);\n    }\n\n    assert(GetPwUid(MAX_UID) == NULL);\n\n    printf(\"\\nTest of group cache\\n\");\n\n    gettimeofday(&tinit, NULL);\n\n    for (i = 0; i <= 10; i++)\n    {\n        c = 0;\n        for (g = 0; g < MAX_GID; g++)\n        {\n            const struct group *pgr;\n\n            pgr = GetGrGid(g);\n            if (pgr)\n                c++;\n        }\n        gettimeofday(&tcurr, NULL);\n        timersub(&tcurr, &tinit, &tdiff);\n        if (i == 0)\n            tlast = tdiff;\n        printf(\"loop %u, %u items: %lu.%06lu\\n\", i, c, tdiff.tv_sec, tdiff.tv_usec);\n        if (i == 0)\n        {\n           printf(\"  Insertion rate: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n                  MAX_GID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n        }\n        else if (i == 1)\n        {\n            /* compute speedup */\n            ratio = (tlast.tv_sec*1000000.0 + tlast.tv_usec)/(tdiff.tv_sec*1000000.0 + tdiff.tv_usec);\n            printf(\"  SPEED-UP (vs insert): x%.2f\\n\", ratio);\n\n            ratio = (tref_g.tv_sec*1000000.0 + tref_g.tv_usec)/(tdiff.tv_sec*1000000.0 + tdiff.tv_usec);\n            printf(\"  SPEED-UP (vs ref): x%.2f\\n\", ratio);\n\n            printf(\"  Elapsed time: %ld.%06ld (%.1f/s)\\n\", tdiff.tv_sec, tdiff.tv_usec,\n                   MAX_GID / (tdiff.tv_sec + tdiff.tv_usec/1000000.0));\n        }\n        tinit = tcurr;\n    }\n\n    /* Now, check that the values returned are correct */\n    for (g = 0; g < MAX_GID; g++)\n    {\n        const struct group *pgr;\n        char buf[50];\n\n        pgr = GetGrGid(g);\n        assert(pgr != NULL);\n        assert(pgr->gr_gid == g);\n\n        sprintf(buf, \"%ld\", (long)g);\n        assert(strcmp(pgr->gr_name, buf) == 0);\n    }\n\n    assert(GetGrGid(MAX_GID) == NULL);\n\n    printf(\"Stats:\\n\");\n    printf(\"  password cache hit=%d, miss=%d\\n\", pw_nb_get, pw_nb_set);\n    printf(\"  group cache hit=%d, miss=%d\\n\", gr_nb_get, gr_nb_set);\n\n    assert(pw_nb_get == 11 * MAX_UID);\n    assert(pw_nb_set == MAX_UID);\n    assert(gr_nb_get == 11 * MAX_GID);\n    assert(gr_nb_set == MAX_GID);\n\n    return 0;\n}\n"
  },
  {
    "path": "src/tests/tst.data/bad.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path =\n"
  },
  {
    "path": "src/tests/tst.data/inc2.inc",
    "content": "block_inc3 {\n    subsubfile = yes;\n}\n"
  },
  {
    "path": "src/tests/tst.data/ok.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = /mnt/lustre;\n\tfs_type = lustre;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = stdout;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n    stats_interval = 10s;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_lustre\";\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = innodb;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackend\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n}\n\nFS_Scan\n{\n    # number of threads used for scanning the filesystem\n    nb_threads_scan        =     16 ;\n    Ignore { path == \"/mnt/lustre/.lustre\" }\n}\n\nEntryProcessor\n{\n    nb_threads = 16;\n#    max_batch_size = 1;\n#    STAGE_DB_APPLY_threads_max = 8;\n}\n\nFilesets {\n    Fileclass dir2 {\n        definition { tree == \"**/dir.2\" }\n    }\n    Fileclass dir3 {\n        definition { tree == \"**/dir.3\" }\n        lhsm_archive_action_params { cos = 1; }\n        lhsm_release_action_params { foo = 1; }\n        lhsm_archive_action_params { foo = 1; }\n    }\n}\n\ndefine_policy lhsm_archive\n{\n    status_manager = lhsm(archive);\n    scope { type == file and (status == new or status == modified) }\n    default_action = lhsm.archive;\n    default_lru_sort_attr = last_mod;\n\n#    default_action_params {\n#        foo = bar;\n#    }\n}\n\nlhsm_archive_parameters\n{\n    nb_threads = 2;\n    max_action_count = 100;\n\n    action = cmd(\"lfs hsm_archive {fullpath}\");\n\n    action_params {\n        foo = bar;\n        derive = \"{foo}\";\n    }\n}\n\nlhsm_archive_trigger\n{\n    trigger_on = periodic;\n    check_interval = 30s;\n\n    action_params {\n        foo = bar;\n    }\n}\n\nlhsm_archive_rules\n{\n    ignore { type != file }\n    ignore_fileclass = dir2;\n    rule arch_dir3 {\n        target_fileclass = dir3;\n        condition { last_mod > 10 }\n        action = cmd(\"lfs hsm_archive /mnt/lustre/.lustre/fid/{fid}\");\n        action_params {\n            cos = 42;\n            xyz = \"{fileclass} {rule}\";\n            # These values are not sent\n            lsp_pool = \"\";\n            lsp_stripe_size = 0;\n            lsp_stripe_offset = -1;\n            lsp_stripe_pattern = 0;\n            lsp_stripe_count = 2;\n            lsp_osts = [];\n\n            # But these are\n            mdt_index = -1;\n            lsp = '{\"pool\":\"{lsp_pool}\",\"stripe_size\":{lsp_stripe_size},\"stripe_offset\":{lsp_stripe_offset},\"stripe_pattern\":{lsp_stripe_pattern},\"stripe_count\":{lsp_stripe_count},\"osts\":{lsp_osts}}'; \n        }\n    }\n}\n\ndefine_policy lhsm_release\n{\n    status_manager = lhsm(release);\n    scope { type == file and status == synchro }\n    default_action = lhsm.release;\n    default_lru_sort_attr = last_access;\n}\n\nlhsm_release_parameters\n{\n    nb_threads = 2;\n}\n\nlhsm_release_trigger\n{\n    trigger_on = ost_usage;\n    check_interval = 30s;\n\n    high_threshold_pct = 10%;\n    low_threshold_pct = 09%;\n    \n}\n\nlhsm_release_rules\n{\n    ignore { type != file or size == 0}\n\n    rule arch_dir3 {\n        target_fileclass = dir3;\n        condition { last_access > 10 }\n    }\n}\n\ndefine_policy lhsm_remove\n{\n    status_manager = lhsm(removed);\n    scope { type == file }\n    default_action = lhsm.hsm_remove;\n    default_lru_sort_attr = rm_time;\n}\n\ndefine_policy check\n {\n    status_manager = basic;\n    scope {type == file and status == \"\"}\n    default_lru_sort_attr = last_access; #TODO random;\n    default_action = cmd(\"/usr/bin/md5sum {path}\");\n}\ncheck_parameters {\n    nb_threads = 2;\n}\ncheck_trigger {\n    trigger_on = periodic;\n    check_interval = 10min;\n}\ncheck_rules {\n    rule default {\n        condition {last_access > 30s}\n    }\n}\n\ndefine_policy check2 {\n    status_manager = basic; # first basic\n    scope { type == file and status != \"ok\"}\n    default_lru_sort_attr = last_access; #random;\n    default_action = cmd(\"/usr/bin/file {path}\");\n}\ncheck2_parameters {\n    nb_threads = 2;\n}\ncheck2_trigger {\n    trigger_on = periodic;\n    check_interval = 10min;\n}\ncheck2_rules {\n    rule default {\n        condition {last_access > 30s}\n    }\n}\n\ndefine_policy cleanup {\n    status_manager = none;\n    scope { type == file }\n    default_action = common.unlink;\n    default_lru_sort_attr = last_access;\n}\ncleanup_trigger\n{\n    trigger_on = user_usage;\n    check_interval = 6h;\n    # clean when user usage > 1PB;\n    high_threshold_vol = 1PB;\n    low_threshold_vol  = 950TB;\n}\ncleanup_rules {\n    rule default {\n        condition { last_access > 1d }\n    }\n}\n\ndefine_policy backup {\n    status_manager = backup(archive);\n    scope { type == file and (status == new or status == modified) }\n    default_action = common.copy;\n    default_lru_sort_attr = creation;\n}\n\nbackup_parameters\n{\n    nb_threads = 2;\n    max_action_count = 100;\n}\n\nbackup_trigger\n{\n    trigger_on = periodic;\n    check_interval = 30s;\n}\n\nbackup_rules\n{\n    ignore { type != file }\n    ignore_fileclass = dir2;\n    rule arch_dir3 {\n        target_fileclass = dir3;\n        condition { last_mod > 10 }\n    }\n}\n\nbackup_config {\n    root = /tmp/backend2;\n    check_mounted = no;\n}\n"
  },
  {
    "path": "src/tests/tst.data/test.conf",
    "content": "block1 {\n    # test unique value\n    fs_path = /test_dir;\n}\n\nblock2 {\n    # test duplicate value\n    fs_path = /test_dir1;\n    fs_path = /test_dir2;\n}\n\n# test absent value\nblock3 {\n    foo = bar;\n}\n\nblock4 {\n    # env var resolution\n    env = $TEST_VAL;\n}\n\n%include \"test.inc\"\n"
  },
  {
    "path": "src/tests/tst.data/test.inc",
    "content": "\nblock_inc1 {\n    # variable in include\n    include_var = 42;\n}\n\n# include with an environment variable (value: \"inc2.inc\")\n%include \"$FILE_INC2\"\n\nblock_inc2 {\n    # env var in include\n    var_env = $TEST_VAL;\n}\n"
  },
  {
    "path": "src/tools/Makefile.am",
    "content": "AM_CFLAGS= $(CC_OPT) $(DB_CFLAGS) $(PURPOSE_CFLAGS)\nAM_LDFLAGS= -lpthread\n\nDATE=`date '+%F %T'`\nMISC_FLAGS=\"-DCOMPIL_DATE=\\\"$(DATE)\\\"\"\n\nall_libs=   ../cfg_parsing/librbhcfg.la         \\\n            ../fs_scan/libfsscan.la             \\\n            ../entry_processor/libentryproc.la  \\\n            ../policies/libpolicies.la\n\nif CHANGELOGS\nall_libs += ../chglog_reader/libchglog_rd.la\nendif\n\nall_libs += ../robinhood/librbhhelpers.la ../list_mgr/liblistmgr.la \\\n            ../common/libcommontools.la ../cfg_parsing/libconfigparsing.la\n\nsbin_PROGRAMS=\n\n#Lustre 2.x only\nif LUSTRE\nif USER_LOVEA\nsbin_PROGRAMS+=read_lovea set_lovea gen_lov_objid ost_fids_remap\n\nset_lovea_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nread_lovea_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\n\ngen_lov_objid_DEPENDENCIES=$(all_libs)\ngen_lov_objid_CFLAGS=-static $(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\ngen_lov_objid_LDFLAGS=-static\ngen_lov_objid_LDADD=$(all_libs) $(DB_LDFLAGS) $(FS_LDFLAGS) $(PURPOSE_LDFLAGS)\n\nost_fids_remap_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nost_fids_remap_LDADD=../common/basename.o\nendif\n\nif LUSTRE_HSM\nsbin_PROGRAMS+=lhsmtool_cmd\nlhsmtool_cmd_CFLAGS=$(AM_CFLAGS) $(FS_CFLAGS) $(MISC_FLAGS)\nlhsmtool_cmd_LDFLAGS=-lrt\nlhsmtool_cmd_LDADD=$(FS_LDFLAGS)\nendif\n\nendif\n"
  },
  {
    "path": "src/tools/gen_lov_objid.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/**\n * Command for retrieving stats about filesystem.\n */\n\n#define TAG \"gen_lov_objid\"\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_cfg.h\"\n#include \"rbh_logs.h\"\n#include \"rbh_misc.h\"\n#include \"rbh_basename.h\"\n#include <unistd.h>\n#include <getopt.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include \"../robinhood/cmd_helpers.h\"\n#include \"../list_mgr/database.h\"\n\n#define OPT_STRING    \"l:f:o:m:\"\n\nstatic const char *help_string =\n    _B \"Usage:\" B_ \" %s [-f <cfg_file>][-l <lvl_debug>][-o <output_file>][-m <margin>]\\n\"\n    \"\\n\"\n    \"Generate a lov_objid file for MDT according to max stripe object indexes.\\n\"\n    \"Increment each index by the safety \"_U\"margin\"U_\".\\n\";\n\nstatic inline void display_help(const char *bin_name)\n{\n    printf(help_string, bin_name);\n}\n\n#define MAX_OPT_LEN 1024\n\n/**\n * Main daemon routine\n */\nint main( int argc, char **argv )\n{\n    int            c = 0;\n    const char    *bin;\n    int            rc;\n    char           err_msg[4096];\n    bool           chgd = false;\n\n    /* options */\n    char           config_file[MAX_OPT_LEN] = \"\";\n    char           badcfg[RBH_PATH_MAX];\n    bool           force_log_level = false;\n    int            log_level = 0;\n    int            margin = 0;\n    char           output_file[MAX_OPT_LEN] = \"/tmp/lov_objid\";\n\n    lmgr_t         lmgr;\n    FILE         * out;\n\n    bin = rh_basename(argv[0]);\n\n    /* parse command line options */\n    while ((c = getopt(argc, argv, OPT_STRING)) != -1)\n    {\n        switch (c)\n        {\n            case 'l':\n                force_log_level = true;\n                log_level = str2debuglevel(optarg);\n                if (log_level == -1)\n                {\n                    fprintf( stderr,\n                             \"Unsupported log level '%s'. CRIT, MAJOR, EVENT, VERB, DEBUG or FULL expected.\\n\",\n                             optarg );\n                    exit(1);\n                }\n                break;\n            case 'f':\n                rh_strncpy(config_file, optarg, MAX_OPT_LEN);\n                break;\n            case 'o':\n                rh_strncpy(output_file, optarg, MAX_OPT_LEN);\n                break;\n            case 'm':\n                margin = str2int(optarg);\n                if (margin < 0)\n                {\n                    fprintf( stderr,\n                             \"Invalid parameter '%s' for '-m' option: positive integer expected\\n\",\n                             optarg );\n                    exit(1);\n                }\n                break;\n            case ':':\n            case '?':\n            default:\n                display_help(bin);\n                exit( 1 );\n                break;\n        }\n    }\n\n    /* get default config file, if not specified */\n    if (SearchConfig(config_file, config_file, &chgd, badcfg, MAX_OPT_LEN) != 0)\n    {\n        fprintf(stderr, \"No config file (or too many) found matching %s\\n\", badcfg);\n        exit(2);\n    }\n    else if (chgd)\n    {\n        fprintf(stderr, \"Using config file '%s'.\\n\", config_file );\n    }\n\n    /* initialize internal resources (glib, llapi, internal resources...) */\n    rc = rbh_init_internals();\n    if (rc != 0)\n        exit(rc);\n\n    /* load and set modules configuration */\n    if(rbh_cfg_load(0, config_file, err_msg))\n    {\n        fprintf(stderr, \"Error reading configuration file '%s': %s\\n\",\n                config_file, err_msg);\n        exit(1);\n    }\n\n    if (force_log_level)\n        log_config.debug_level = log_level;\n    else\n        log_config.debug_level = LVL_MAJOR; /* no event message */\n\n    /* set logging to stderr for this tool */\n    strcpy(log_config.log_file, \"stderr\");\n    strcpy(log_config.report_file, \"stderr\");\n    strcpy(log_config.alert_file, \"stderr\");\n\n    /* Initialize logging */\n    rc = InitializeLogs(bin);\n    if (rc)\n    {\n        fprintf(stderr, \"Error opening log files: rc=%d, errno=%d: %s\\n\",\n                 rc, errno, strerror(errno));\n        exit(rc);\n    }\n\n    /* Initialize list manager */\n    rc = ListMgr_Init(true);\n    if ( rc )\n    {\n        DisplayLog( LVL_CRIT, TAG, \"Error %d initializing list manager\", rc );\n        exit( rc );\n    }\n    else\n        DisplayLog( LVL_DEBUG, TAG, \"ListManager successfully initialized\" );\n\n    if ( CheckLastFS(  ) != 0 )\n        exit( 1 );\n\n    /* Create database access */\n    rc = ListMgr_InitAccess( &lmgr );\n    if ( rc )\n    {\n        DisplayLog( LVL_CRIT, TAG, \"Error %d: cannot connect to database\", rc );\n        exit( rc );\n    }\n\n    out = fopen(output_file, \"w\");\n    if (!out)\n    {\n        DisplayLog(LVL_CRIT, TAG, \"Failed to open '%s' for writing: %s\", output_file,\n                   strerror(errno));\n        return errno;\n    }\n\n    /* direct SQL request to retrieve the max object index from DB */\n    result_handle_t res;\n    /* FIXME max on the low weight 32bits of the 'objid' 64bits value */\n    rc = db_exec_sql(&lmgr.conn, \"SELECT ostidx, max(hex(cast(reverse(cast(details as binary(8))) as binary(4)))) \"\n                     \"FROM \"STRIPE_ITEMS_TABLE\" GROUP BY ostidx ORDER BY ostidx\", &res);\n    if (rc)\n        goto db_error;\n\n    int index = -1;\n    do\n    {\n        char *resstr[2];\n        unsigned int ostidx;\n        unsigned int objid;\n        unsigned long long objid_long;\n        resstr[0] = resstr[1] = NULL;\n\n        rc = db_next_record( &lmgr.conn, &res, resstr, 2 );\n        if (rc == DB_END_OF_LIST)\n            break;\n        else if (rc != DB_SUCCESS)\n            goto db_error;\n\n        index ++;\n\n        if (resstr[0] == NULL || resstr[1] == NULL)\n        {\n            DisplayLog(LVL_MAJOR, TAG, \"ERROR: got NULL record from DB at index %u\", index);\n            rc = EINVAL;\n            goto out;\n        }\n\n        /* resstr[0] is ost_idx */\n        if (sscanf(resstr[0], \"%u\", &ostidx) != 1)\n        {\n            DisplayLog(LVL_MAJOR, TAG, \"ERROR: cannot parse OST index '%s' at index %u\", resstr[0], index);\n            rc = EINVAL;\n            goto out;\n        }\n        else if (ostidx != index)\n        {\n            DisplayLog(LVL_MAJOR, TAG, \"Warning: OST index %u not found in database, assuming current objid=1\",\n                       index);\n            objid_long = 1 + margin;\n            printf(\"ostidx=%u, max objid=%016LX\\n\", ostidx, objid_long);\n            fwrite(&objid_long, sizeof(objid_long), 1, out);\n            continue;\n        }\n\n        /* resstr[1] is objid (hexa) */\n        if (sscanf(resstr[1], \"%X\", &objid) != 1)\n        {\n            DisplayLog(LVL_MAJOR, TAG, \"ERROR: cannot parse objid '%s' at index %u\", resstr[1], index);\n            rc = EINVAL;\n            goto out;\n        }\n\n        objid_long = objid + margin;\n        printf(\"ostidx=%u, objid=%016LX\\n\", ostidx, objid_long);\n        fwrite(&objid_long, sizeof(objid_long), 1, out);\n\n    } while(rc == 0);\n\n    fclose(out);\n    ListMgr_CloseAccess( &lmgr );\n    return 0;\n\ndb_error:\n    DisplayLog( LVL_CRIT, TAG, \"Database error %d\\n\", rc);\nout:\n    ListMgr_CloseAccess( &lmgr );\n    return rc;\n}\n"
  },
  {
    "path": "src/tools/lhsmtool_cmd.c",
    "content": "/*\n * GPL HEADER START\n *\n * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License version 2 only,\n * as published by the Free Software Foundation.\n *\n * This program is distributed in the hope that it will be useful, but\n * WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * General Public License version 2 for more details (a copy is included\n * in the LICENSE file that accompanied this code).\n *\n * You should have received a copy of the GNU General Public License\n * version 2 along with this program; If not, see\n * http://www.gnu.org/licenses/gpl-2.0.htm\n *\n * GPL HEADER END\n */\n/*\n * (C) Copyright 2016 Commissariat a l'Energie Atomique et aux Energies\n *     Alternatives\n */\n\n/*\n * HSM copytool program for user-defined external commands.\n * Receives orders from coordinator and execute subprocesses accordingly.\n * Pass lustre file descriptor and fid as command arguments.\n *\n * Example configuration file:\n * #\n * # Each command should include the following variables related to the file\n * # to archive/restore:\n * # - {fd} will be the file descriptor number (seekable)\n * # - {fid} will be the Lustre FID\n * #\n * # Note that for restore, the file descriptor is a volatile file and thus\n * # is NOT set to the original file size.\n * #\n * # For a very basic posix copytool:\n * [commands]\n * archive = dd if=/proc/self/{fd} of=/tmp/arch/{fid}\n * restore = dd if=/tmp/arch/{fid} of=/proc/self/{fd}\n *\n */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE\n#endif\n#include <ctype.h>\n#include <stdio.h>\n#include <errno.h>\n#include <string.h>\n#include <stdint.h>\n#include <stdlib.h>\n#include <dirent.h>\n#include <assert.h>\n#include <unistd.h>\n#include <getopt.h>\n#include <stddef.h>\n#include <stdbool.h>\n#include <pthread.h>\n#include <time.h>\n#include <fcntl.h>\n#include <sys/time.h>\n#include <sys/xattr.h>\n#include <sys/syscall.h>\n#include <sys/types.h>\n#include <lustre/lustreapi.h>\n\n#include <glib.h>\n\n\n/* Progress reporting period */\n#define REPORT_INTERVAL_DEFAULT 30\n\n/* Default configuration file path */\n#define CONFIG_FILE_DEFAULT\t\"/etc/lhsm_cmd.conf\"\n\n/* .ini group label under which to define the commands format strings */\n#define CFG_GROUP_COMMANDS\t\"commands\"\n\n/* Default max number of commands to execute in parallel */\n#define FANOUT_DEFAULT\t\t8\n\n/* GLib spawn flags to execute subprocesses */\n#define CMD_EXEC_FLAGS\t(G_SPAWN_SEARCH_PATH |\t\t\\\n\t\t\t G_SPAWN_DO_NOT_REAP_CHILD |\t\\\n\t\t\t G_SPAWN_LEAVE_DESCRIPTORS_OPEN)\n\n#ifndef LL_HSM_MAX_ARCHIVE\n#define LL_HSM_MAX_ARCHIVE (sizeof(uint32_t) * 8)\n#endif\n\n/* long long presentation macro used to represent FIDs */\n#ifndef LPX64\n#define LPX64 \"%#llx\"\n#endif\n\n\n/** Move HAIs along with a copy of the HAL flags */\nstruct hai_desc {\n\tunsigned long\t\t hd_flags;\n\tsize_t\t\t\t hd_datalen;\n\tchar\t\t\t hd_data[0];\n};\n\nstruct options {\n\tint\t\t\t o_daemonize;\n\tint\t\t\t o_dry_run;\n\tint\t\t\t o_abort_on_error;\n\tint\t\t\t o_verbose;\n\tint\t\t\t o_fanout;\n\tint\t\t\t o_report_int;\n\tint\t\t\t o_archive_cnt;\n\tint\t\t\t o_archive_id[LL_HSM_MAX_ARCHIVE];\n\tunsigned long long\t o_bandwidth;\n\tsize_t\t\t\t o_chunk_size;\n\tchar\t\t\t*o_config;\n\tchar\t\t\t*o_event_fifo;\n\tchar\t\t\t*o_mnt;\n\tint\t\t\t o_mnt_fd;\n};\n\n/* Everything else is zeroed */\nstatic struct options opt = {\n\t.o_verbose \t= LLAPI_MSG_INFO,\n\t.o_fanout\t= FANOUT_DEFAULT,\n\t.o_report_int\t= REPORT_INTERVAL_DEFAULT,\n\t.o_config\t= CONFIG_FILE_DEFAULT,\n};\n\n/** Commands to execute on incoming HSM action orders */\nstatic char *ct_commands[] = {\n\t[HSMA_ARCHIVE]\t= NULL,\n\t[HSMA_RESTORE]\t= NULL,\n\t[HSMA_CANCEL]\t= NULL,\n\t[HSMA_REMOVE]\t= NULL,\n};\n\n\nstatic int err_major;\n\nstatic char cmd_name[PATH_MAX];\nstatic char fs_name[MAX_OBD_NAME + 1];\n\nstatic struct hsm_copytool_private *ctdata;\n\nstatic GAsyncQueue\t*mqueue;\nstatic bool\t\t stop;\nstatic GRegex\t\t*fd_regex;\nstatic GRegex\t\t*fid_regex;\nstatic GRegex\t\t*ctdata_regex;\n\n\nstatic inline double ct_now(void)\n{\n\tstruct timeval tv;\n\n\tgettimeofday(&tv, NULL);\n\treturn tv.tv_sec + 0.000001 * tv.tv_usec;\n}\n\nstatic inline pid_t sys_gettid(void)\n{\n\treturn syscall(SYS_gettid);\n}\n\n#define LOG_ERROR(_rc, _format, ...)\t\t\t\t\t\\\n\tllapi_error(LLAPI_MSG_ERROR, _rc,\t\t\t\t\\\n\t\t    \"%f %s[%d]: \"_format,\t\t\t\t\\\n\t\t    ct_now(), cmd_name, sys_gettid(), ## __VA_ARGS__)\n\n#define LOG_DEBUG(_format, ...)\t\t\t\t\t\t\\\n\tllapi_error(LLAPI_MSG_DEBUG | LLAPI_MSG_NO_ERRNO, 0,\t\t\\\n\t\t    \"%f %s[%d]: \"_format,\t\t\t\t\\\n\t\t    ct_now(), cmd_name, sys_gettid(), ## __VA_ARGS__)\n\nstatic void usage(const char *name, int rc)\n{\n\tfprintf(stdout,\n\t\" Usage: %s [options] <lustre_mount_point>\\n\"\n\t\"   --daemon\t\t  Daemon mode, run in background\\n\"\n\t\"   --abort-on-error\t  Abort operation on major error\\n\"\n\t\"   -A, --archive <#>\t  Archive number (repeatable)\\n\"\n\t\"   --dry-run\t\t  Don't run, just show what would be done\\n\"\n\t\"   -f, --event-fifo <path>   Write events stream to fifo\\n\"\n\t\"   -F, --fanout <n>\t  Max parallel commands (number of threads)\\n\"\n\t\"   -q, --quiet\t\t  Produce less verbose output\\n\"\n\t\"   -u, --update-interval <s> Interval between progress reports sent\\n\"\n\t\"\t\t\t     to Coordinator\\n\"\n\t\"   -v, --verbose\t  Produce more verbose output\\n\", cmd_name);\n\texit(rc);\n}\n\nstatic int ct_parseopts(int argc, char * const *argv)\n{\n\tstruct option long_opts[] = {\n\t\t{\"abort-on-error\", no_argument,\t      &opt.o_abort_on_error, 1},\n\t\t{\"abort_on_error\", no_argument,\t      &opt.o_abort_on_error, 1},\n\t\t{\"archive\",\t   required_argument, NULL,\t\t   'A'},\n\t\t{\"daemon\",\t   no_argument,\t      &opt.o_daemonize,\t     1},\n\t\t{\"config\",\t   required_argument, NULL,\t\t   'c'},\n\t\t{\"event-fifo\",\t   required_argument, NULL,\t\t   'f'},\n\t\t{\"event_fifo\",\t   required_argument, NULL,\t\t   'f'},\n\t\t{\"dry-run\",\t   no_argument,\t      &opt.o_dry_run,\t     1},\n\t\t{\"fanout\",\t   required_argument, NULL,\t\t   'F'},\n\t\t{\"help\",\t   no_argument,\t      NULL,\t\t   'h'},\n\t\t{\"quiet\",\t   no_argument,\t      NULL,\t\t   'q'},\n\t\t{\"update-interval\", required_argument,\tNULL,\t\t   'u'},\n\t\t{\"update_interval\", required_argument,\tNULL,\t\t   'u'},\n\t\t{\"verbose\",\t   no_argument,\t      NULL,\t\t   'v'},\n\t\t{0, 0, 0, 0}\n\t};\n\tint\t\t\t c;\n\tint\t\t\t rc;\n\n\toptind = 0;\n\twhile ((c = getopt_long(argc, argv, \"A:c:f:hqu:v\",\n\t\t\t\tlong_opts, NULL)) != -1) {\n\t\tswitch (c) {\n\t\tcase 'A':\n\t\t\tif ((opt.o_archive_cnt >= LL_HSM_MAX_ARCHIVE) ||\n\t\t\t    (atoi(optarg) >= LL_HSM_MAX_ARCHIVE)) {\n\t\t\t\trc = -E2BIG;\n\t\t\t\tLOG_ERROR(rc, \"archive number must be less\"\n\t\t\t\t\t  \"than %zu\", LL_HSM_MAX_ARCHIVE);\n\t\t\t\treturn rc;\n\t\t\t}\n\t\t\topt.o_archive_id[opt.o_archive_cnt] = atoi(optarg);\n\t\t\topt.o_archive_cnt++;\n\t\t\tbreak;\n\t\tcase 'c':\n\t\t\topt.o_config = optarg;\n\t\t\tbreak;\n\t\tcase 'f':\n\t\t\topt.o_event_fifo = optarg;\n\t\t\tbreak;\n\t\tcase 'F':\n\t\t\topt.o_fanout = atoi(optarg);\n\t\t\tif (opt.o_fanout < 1) {\n\t\t\t\trc = -EINVAL;\n\t\t\t\tLOG_ERROR(rc, \"bad value for -%c '%s'\", c,\n\t\t\t\t\t  optarg);\n\t\t\t\treturn rc;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase 'h':\n\t\t\tusage(argv[0], 0);\n\t\tcase 'q':\n\t\t\topt.o_verbose--;\n\t\t\tbreak;\n\t\tcase 'u':\n\t\t\topt.o_report_int = atoi(optarg);\n\t\t\tif (opt.o_report_int < 0) {\n\t\t\t\trc = -EINVAL;\n\t\t\t\tLOG_ERROR(rc, \"bad value for -%c '%s'\", c,\n\t\t\t\t\t  optarg);\n\t\t\t\treturn rc;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase 'v':\n\t\t\topt.o_verbose++;\n\t\t\tbreak;\n\t\tcase 0:\n\t\t\tbreak;\n\t\tdefault:\n\t\t\treturn -EINVAL;\n\t\t}\n\t}\n\n\tif (argc != optind + 1) {\n\t\trc = -EINVAL;\n\t\tLOG_ERROR(rc, \"no mount point specified\");\n\t\treturn rc;\n\t}\n\n\topt.o_mnt = argv[optind];\n\topt.o_mnt_fd = -1;\n\n\treturn 0;\n}\n\nstatic int ct_path_lustre(char *buf, int sz, const char *mnt,\n\t\t\t  const lustre_fid *fid)\n{\n\treturn snprintf(buf, sz, \"%s/%s/fid/\"DFID_NOBRACE, mnt,\n\t\t\tdot_lustre_name, PFID(fid));\n}\n\nstatic int ct_begin_restore(struct hsm_copyaction_private **phcp,\n\t\t\t    const struct hsm_action_item *hai,\n\t\t\t    int mdt_index, int open_flags)\n{\n\tchar\t src[PATH_MAX];\n\tint\t rc;\n\n\trc = llapi_hsm_action_begin(phcp, ctdata, hai, mdt_index, open_flags,\n\t\t\t\t    false);\n\tif (rc < 0) {\n\t\tct_path_lustre(src, sizeof(src), opt.o_mnt, &hai->hai_fid);\n\t\tLOG_ERROR(rc, \"llapi_hsm_action_begin() on '%s' failed\", src);\n\t}\n\n\treturn rc;\n}\n\nstatic int ct_begin(struct hsm_copyaction_private **phcp,\n\t\t    const struct hsm_action_item *hai)\n{\n\t/* Restore takes specific parameters. Call the same function w/ default\n\t * values for all other operations. */\n\treturn ct_begin_restore(phcp, hai, -1, 0);\n}\n\nstatic int ct_fini(struct hsm_copyaction_private **phcp,\n\t\t   const struct hsm_action_item *hai, int hp_flags, int ct_rc)\n{\n\tstruct hsm_copyaction_private\t*hcp;\n\tchar\t\t\t\t lstr[PATH_MAX];\n\tint\t\t\t\t rc;\n\n\tLOG_DEBUG(\"Action completed, notifying coordinator \"\n\t\t  \"cookie=\"LPX64\", FID=\"DFID\", hp_flags=%d err=%d\",\n\t\t  hai->hai_cookie, PFID(&hai->hai_fid), hp_flags, -ct_rc);\n\n\tct_path_lustre(lstr, sizeof(lstr), opt.o_mnt, &hai->hai_fid);\n\n\tif (phcp == NULL || *phcp == NULL) {\n\t\trc = llapi_hsm_action_begin(&hcp, ctdata, hai, -1, 0, true);\n\t\tif (rc < 0) {\n\t\t\tLOG_ERROR(rc, \"llapi_hsm_action_begin() on '%s' failed\",\n\t\t\t\t  lstr);\n\t\t\treturn rc;\n\t\t}\n\t\tphcp = &hcp;\n\t}\n\n\trc = llapi_hsm_action_end(phcp, &hai->hai_extent, hp_flags, abs(ct_rc));\n\tif (rc == -ECANCELED)\n\t\tLOG_ERROR(rc, \"completed action on '%s' has been canceled: \"\n\t\t\t  \"cookie=\"LPX64\", FID=\"DFID, lstr, hai->hai_cookie,\n\t\t\t PFID(&hai->hai_fid));\n\telse if (rc < 0)\n\t\tLOG_ERROR(rc, \"llapi_hsm_action_end on '%s' failed\", lstr);\n\telse\n\t\tLOG_DEBUG(\"llapi_hsm_action_end on '%s' ok (rc=%d)\", lstr, rc);\n\n\treturn rc;\n}\n\nstatic bool hai_data_expandable(const struct hsm_action_item *hai)\n{\n\tsize_t\tdatalen = hai->hai_len - sizeof(*hai);\n\tint\ti;\n\n\tfor (i = 0; i < datalen; i++)\n\t\tif (!isprint(hai->hai_data[i]))\n\t\t\treturn false;\n\n\treturn true;\n}\n\nstatic int ct_build_cmd(const enum hsm_copytool_action hsma, gchar **cmd,\n\t\t\tconst struct hsm_action_item *hai, int fd)\n{\n\tconst char\t*cmd_format = ct_commands[hsma];\n\tgchar\t\t*res_cmd_fd = NULL;\n\tgchar\t\t*res_cmd_fid = NULL;\n\tchar\t\t tmpstr[128];\n\tGError          *err = NULL;\n\tint              rc = 0;\n\n\tif (cmd_format == NULL)\n\t\treturn -ENOSYS;\n\n\t/* replace all {fd} placeholders by fd number */\n\tsnprintf(tmpstr, sizeof(tmpstr), \"%d\", fd);\n\tres_cmd_fd = g_regex_replace_literal(fd_regex, cmd_format, -1, 0,\n\t\t\t\t\t     tmpstr, 0, &err);\n\tif (err != NULL) {\n\t    rc = -EINVAL;\n\t    LOG_ERROR(rc, \"Cannot apply FD regex: %s\", err->message);\n\t    goto out_err;\n        }\n\n\t/* replace all {fid} placeholders by lustre fid */\n\tsnprintf(tmpstr, sizeof(tmpstr), DFID, PFID(&hai->hai_dfid));\n\n\tres_cmd_fid = g_regex_replace_literal(fid_regex, res_cmd_fd, -1, 0,\n\t\t\t\t\t      tmpstr, 0, &err);\n\tif (err != NULL) {\n\t    rc = -EINVAL;\n\t    LOG_ERROR(rc, \"Cannot apply FID regex: %s\", err->message);\n\t    goto out_err;\n        }\n\n\t/* replace all {ctdata} placeholders by received data blob */\n\tif (hai_data_expandable(hai))\n\t\t*cmd = g_regex_replace_literal(ctdata_regex, res_cmd_fid, -1, 0,\n\t\t\t\t\t       hai->hai_data, 0, &err);\n\telse\n\t\t*cmd = g_regex_replace_literal(ctdata_regex, res_cmd_fid, -1, 0,\n\t\t\t\t\t       \"\", 0, &err);\n\tif (err != NULL) {\n\t    rc = -EINVAL;\n\t    LOG_ERROR(rc, \"Cannot apply data regex: %s\", err->message);\n\t    goto out_err;\n        }\n\nout_err:\n        if (err != NULL)\n            g_error_free(err);\n\n\tg_free(res_cmd_fid);\n\tg_free(res_cmd_fd);\n\treturn rc;\n}\n\n\nstruct cmd_cb_args {\n\tstruct hsm_copyaction_private\t*hcp;\n\tconst struct hsm_action_item\t*hai;\n\tstruct hsm_extent\t\t he;\n\tint\t\t\t\t fd;\n\toff_t\t\t\t\t last_pos;\n\tint\t\t\t\t retcode;\n\tGMainLoop\t\t\t*loop;\n};\n\n/**\n * Report progress to the coordinator.\n * Sneak into fd, shared with child cmd, to get current position.\n * We only report progress bytes since last report (relative value).\n */\nstatic gboolean cmd_progress_timer_cb(gpointer ud)\n{\n\tstruct cmd_cb_args\t*args = ud;\n\tstruct hsm_extent\t*phe = &args->he;\n\toff_t\t\t\t pos;\n\tint\t\t\t rc;\n\n\tpos = lseek(args->fd, 0, SEEK_CUR);\n\tif (pos < 0) {\n\t\trc = -errno;\n\t\tLOG_ERROR(rc, \"cmd_progress_timer_cb: lseek failed for \"DFID,\n\t\t\t  PFID(&args->hai->hai_fid));\n\t\treturn FALSE;\t\t\t/* stop progress report */\n\t}\n\tif (pos > args->last_pos) {\n\t\tphe->length = pos - phe->offset;\n\t\targs->last_pos = pos;\n\t}\n\n\trc = llapi_hsm_action_progress(args->hcp, phe, phe->length, 0);\n\tif (rc) {\n\t\tLOG_ERROR(rc, \"llapi_hsm_action_progress failed for \"DFID,\n\t\t\t  PFID(&args->hai->hai_fid));\n\t\treturn FALSE;\t\t\t/* stop progress report */\n\t}\n\tphe->offset = pos;\n\n\treturn TRUE;\n}\n\nstatic void cmd_termination_cb(GPid pid, gint status, gpointer ud)\n{\n\tstruct cmd_cb_args\t*args = ud;\n\n\tif (WIFEXITED(status)) {\n\t\tif (WEXITSTATUS(status) == 0) {\n\t\t\targs->retcode = 0;\n\t\t} else {\n\t\t\tLOG_DEBUG(\"command failed with exit status %d\",\n\t\t\t\t  WEXITSTATUS(status));\n\t\t\targs->retcode = -ECHILD;\n\t\t}\n\t} else {\n\t\tLOG_DEBUG(\"command failed w/o exit status)\");\n\t\targs->retcode = -ECHILD;\n\t}\n\n\tg_spawn_close_pid(pid);\n\n\t/* Note that sources that have already been dispatched when\n\t * g_main_loop_quit() is called will still be executed. */\n\tg_main_loop_quit(args->loop);\n}\n\n/**\n * Register a periodic timer callback to the thread-local context.\n */\nstatic GSource *timer_subscribe(GMainLoop *loop, GSourceFunc func,\n\t\t\t\tgpointer udata)\n{\n\tGSource\t*gsrc;\n\n\tgsrc = g_timeout_source_new_seconds(opt.o_report_int);\n\tg_source_set_callback(gsrc, func, udata, NULL);\n\tg_source_attach(gsrc, g_main_loop_get_context(loop));\n\tg_source_unref(gsrc);\n\treturn gsrc;\n}\n\n/**\n * Register a subprocess termination callback to the thread-local context.\n */\nstatic GSource *term_subscribe(GMainLoop *loop, GPid pid, GChildWatchFunc func,\n\t\t\t       gpointer udata)\n{\n\tGSource\t*gsrc;\n\n\tgsrc = g_child_watch_source_new(pid);\n\tg_source_set_callback(gsrc, (GSourceFunc)func, udata, NULL);\n\tg_source_attach(gsrc, g_main_loop_get_context(loop));\n\tg_source_unref(gsrc);\n\treturn gsrc;\n}\n\n\n/**\n * Start a new HSM copytool I/O command: archive or restore.\n */\nstatic int ct_hsm_io_cmd(const enum hsm_copytool_action hsma, GMainLoop *loop,\n\t\t\t const struct hsm_action_item *hai, const long hal_flags)\n{\n\tstruct cmd_cb_args\t *cb_args;\n\tGError\t\t\t *err = NULL;\n\tGPid\t\t\t  pid;\n\tGSource\t\t\t *timer_gsrc;\n\tGSource\t\t\t *term_gsrc;\n\tgint\t\t\t  ac;\n\tgchar\t\t\t**av = NULL;\n\tconst char\t\t *hsma_name = hsm_copytool_action2name(hsma);\n\tbool\t\t\t  ok;\n\tgchar\t\t\t *cmd = NULL;\n\tint\t\t\t  mdt_idx = -1;\n\tint\t\t\t  rc;\n\n\tcb_args = calloc(1, sizeof(*cb_args));\n\tif (cb_args == NULL) {\n\t\trc = -ENOMEM;\n\t\tLOG_ERROR(rc, \"cannot allocate context to archive \"DFID,\n\t\t\t  PFID(&hai->hai_fid));\n\t\terr_major++;\n\t\tgoto out;\n\t}\n\tcb_args->retcode\t= -1;\t/* for debugging */\n\tcb_args->fd\t\t= -1;\n\n\tif (hsma == HSMA_ARCHIVE || hsma == HSMA_REMOVE) {\n\n\t\trc = ct_begin(&cb_args->hcp, hai);\n\t\tif (rc < 0) {\n\t\t\tLOG_ERROR(rc, \"ct_begin failed for \"DFID, PFID(&hai->hai_fid));\n\t\t\terr_major++;\n\t\t\tgoto out;\n\t\t}\n\n\t} else if (hsma == HSMA_RESTORE) {\n\n#if HAVE_LLAPI_GET_MDT_INDEX_BY_FID\n\t\trc = llapi_get_mdt_index_by_fid(opt.o_mnt_fd, &hai->hai_fid, &mdt_idx);\n\t\tif (rc < 0) {\n\t\t\tLOG_ERROR(rc, \"cannot get MDT index for \"DFID,\n\t\t\t\t  PFID(&hai->hai_fid));\n\t\t\terr_major++;\n\t\t\tgoto out;\n\t\t}\n#endif\n\t\trc = ct_begin_restore(&cb_args->hcp, hai, mdt_idx, 0);\n\t\tif (rc < 0) {\n\t\t\tLOG_ERROR(rc, \"cannot start restore operation for \"DFID,\n\t\t\t\t  PFID(&hai->hai_fid));\n\t\t\terr_major++;\n\t\t\tgoto out;\n\t\t}\n\t}\n\n\tcb_args->hai = hai;\n\tcb_args->fd = llapi_hsm_action_get_fd(cb_args->hcp);\n\n\tct_build_cmd(hsma, &cmd, hai, cb_args->fd);\n\tif (cmd)\n\t\tLOG_DEBUG(\"Running %s command: '%s'\", hsma_name, cmd);\n\tif (opt.o_dry_run || !cmd) {\n\t\terr_major++;\n\t\tgoto out;\n\t}\n\n\tok = g_shell_parse_argv(cmd, &ac, &av, &err);\n\tif (!ok) {\n\t\tLOG_ERROR(EINVAL, \"Invalid cmd '%s': %s\", cmd, err->message);\n\t\tg_error_free(err);\n\t\terr_major++;\n\t\tgoto out;\n\t}\n\n\tok = g_spawn_async(NULL,\t\t/* working directory */\n\t\t\t   av,\t\t\t/* parsed command line */\n\t\t\t   NULL,\t\t/* environment vars */\n\t\t\t   CMD_EXEC_FLAGS,\t/* execution flags */\n\t\t\t   NULL,\t\t/* child setup function */\n\t\t\t   NULL,\t\t/* user data pointer */\n\t\t\t   &pid,\t\t/* child pid address */\n\t\t\t   &err);\t\t/* error marker */\n\n\tif (!ok) {\n\t\tLOG_ERROR(ECHILD, \"Cannot spawn subprocess: %s\", err->message);\n\t\tg_error_free(err);\n\t\terr_major++;\n\t\tgoto out;\n\t}\n\n\t/* register a periodic timer callback for progress report */\n\ttimer_gsrc = timer_subscribe(loop, cmd_progress_timer_cb, cb_args);\n\n\t/* register a subprocess termination callback */\n\tterm_gsrc = term_subscribe(loop, pid, cmd_termination_cb, cb_args);\n\n\tcb_args->he.offset = hai->hai_extent.offset;\n\tcb_args->loop      = loop;\n\n\tg_main_loop_run(loop);\n\n\t/* This loop will run again, we need to explicitly destroy sources */\n\tg_source_destroy(term_gsrc);\n\tg_source_destroy(timer_gsrc);\n\nout:\n\tg_free(cmd);\n\tg_strfreev(av);\n\n\t/* Obscure voodoo forces are summoned in this function in the\n\t * restore case. Do not close the volatile before! */\n\trc = ct_fini(&cb_args->hcp, hai, 0,\n\t\t     cb_args ? cb_args->retcode : -ENOMEM);\n\n\tif (cb_args && cb_args->fd >= 0)\n\t\tclose(cb_args->fd);\n\n\tfree(cb_args);\n\n\treturn rc;\n}\n\nstatic void handler(int signal)\n{\n\tstop = true;\n\tpsignal(signal, \"exiting\");\n\t/* If we don't clean up upon interrupt, umount thinks there's a ref\n\t * and doesn't remove us from mtab (EINPROGRESS). The lustre client\n\t * does successfully unmount and the mount is actually gone, but the\n\t * mtab entry remains. So this just makes mtab happier. */\n\tllapi_hsm_copytool_unregister(&ctdata);\n\n\t/* Also remove fifo upon signal as during normal/error exit */\n\tif (opt.o_event_fifo != NULL)\n\t\tllapi_hsm_unregister_event_fifo(opt.o_event_fifo);\n\t_exit(1);\n}\n\n/* Daemon waits for messages from the kernel; run it in the background. */\nstatic int ct_run(void)\n{\n\tstruct sigaction\tact;\n\tint\t\t\trc;\n\n\tif (opt.o_daemonize) {\n\t\trc = daemon(1, 1);\n\t\tif (rc < 0) {\n\t\t\trc = -errno;\n\t\t\tLOG_ERROR(rc, \"cannot daemonize\");\n\t\t\treturn rc;\n\t\t}\n\t}\n\n\tif (opt.o_event_fifo != NULL) {\n\t\trc = llapi_hsm_register_event_fifo(opt.o_event_fifo);\n\t\tif (rc < 0) {\n\t\t\tLOG_ERROR(rc, \"failed to register event fifo\");\n\t\t\treturn rc;\n\t\t}\n\t\tllapi_error_callback_set(llapi_hsm_log_error);\n\t}\n\n\trc = llapi_hsm_copytool_register(&ctdata, opt.o_mnt,\n\t\t\t\t\t opt.o_archive_cnt,\n\t\t\t\t\t opt.o_archive_id, 0);\n\tif (rc < 0) {\n\t\tLOG_ERROR(rc, \"cannot start copytool interface\");\n\t\treturn rc;\n\t}\n\n\tmemset (&act, 0, sizeof(act));\n\tact.sa_handler = &handler;\n\tif (sigaction(SIGINT, &act, NULL) < 0\n\t    || sigaction(SIGTERM, &act, NULL) < 0) {\n\t\trc = -errno;\n\t\tLOG_ERROR(rc, \"cannot set signal handler for SIGINT/SIGTERM\");\n\t\treturn rc;\n\t}\n\tact.sa_handler = SIG_IGN;\n\tif (sigaction(SIGPIPE, &act, NULL) < 0) {\n\t\trc = -errno;\n\t\tLOG_ERROR(rc, \"cannot ignore signal SIGPIPE\");\n\t\treturn rc;\n\t}\n\n\twhile (1) {\n\t\tstruct hsm_action_list\t*hal;\n\t\tstruct hsm_action_item\t*hai;\n\t\tint\t\t\t msgsize;\n\t\tint\t\t\t i = 0;\n\n\t\tLOG_DEBUG(\"waiting for message from kernel\");\n\n\t\trc = llapi_hsm_copytool_recv(ctdata, &hal, &msgsize);\n\t\tif (rc == -ESHUTDOWN) {\n\t\t\tLOG_DEBUG(\"shutting down\");\n\t\t\tbreak;\n\t\t} else if (rc < 0) {\n\t\t\tfprintf(stderr, \"cannot receive action list: %s\\n\",\n\t\t\t\tstrerror(-rc));\n\t\t\terr_major++;\n\t\t\tif (opt.o_abort_on_error)\n\t\t\t\tbreak;\n\t\t\telse\n\t\t\t\tcontinue;\n\t\t}\n\n\t\tLOG_DEBUG(\"copytool fs=%s archive#=%d item_count=%d\",\n\t\t\t  hal->hal_fsname, hal->hal_archive_id, hal->hal_count);\n\n\t\tif (strcmp(hal->hal_fsname, fs_name) != 0) {\n\t\t\trc = -EINVAL;\n\t\t\tLOG_ERROR(rc, \"'%s' invalid fs name, expecting: %s\",\n\t\t\t\t hal->hal_fsname, fs_name);\n\t\t\terr_major++;\n\t\t\tif (opt.o_abort_on_error)\n\t\t\t\tbreak;\n\t\t\telse\n\t\t\t\tcontinue;\n\t\t}\n\n\t\thai = hai_first(hal);\n\t\twhile (++i <= hal->hal_count) {\n\t\t\tstruct hai_desc *hd;\n\n\t\t\tif ((char *)hai - (char *)hal > msgsize) {\n\t\t\t\trc = -EPROTO;\n\t\t\t\tLOG_ERROR(rc,\n\t\t\t\t\t  \"'%s' item %d past end of message!\",\n\t\t\t\t\t  opt.o_mnt, i);\n\t\t\t\terr_major++;\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\thd = malloc(sizeof(*hd) + hai->hai_len);\n\t\t\tif (hd == NULL) {\n\t\t\t\trc = -ENOMEM;\n\t\t\t\tLOG_ERROR(rc, \"'%s' item cannot be processed\",\n\t\t\t\t\t  opt.o_mnt);\n\t\t\t\terr_major++;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\thd->hd_flags   = hal->hal_flags;\n\t\t\thd->hd_datalen = hai->hai_len;\n\t\t\tmemcpy(hd->hd_data, hai, hai->hai_len);\n\n\t\t\tg_async_queue_push(mqueue, hd);\n\t\t\thai = hai_next(hai);\n\t\t}\n\n\t\tif (opt.o_abort_on_error && err_major) {\n\t\t\tLOG_DEBUG(\"copytool aborting on error\");\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tstop = true;\n\tllapi_hsm_copytool_unregister(&ctdata);\n\tif (opt.o_event_fifo != NULL)\n\t\tllapi_hsm_unregister_event_fifo(opt.o_event_fifo);\n\n\treturn rc;\n}\n\nstatic gpointer subproc_mgr_main(gpointer data)\n{\n\tGMainContext\t*mctx;\n\tGMainLoop\t*loop;\n\n\tmctx = g_main_context_new();\n\tg_main_context_push_thread_default(mctx);\n\n\tloop = g_main_loop_new(mctx, false);\n\n\tg_async_queue_ref(mqueue);\n\n\twhile (!stop) {\n\t\tstruct hsm_action_item\t*hai;\n\t\tstruct hai_desc\t\t*hd;\n\n\t\thd = g_async_queue_pop(mqueue);\n\t\tif (hd->hd_datalen < sizeof(*hai)) {\n\t\t\tLOG_ERROR(EPROTO, \"Invalid record (ignoring)\");\n\t\t\tcontinue;\n\t\t}\n\n\t\thai = (struct hsm_action_item *)hd->hd_data;\n\t\tswitch (hai->hai_action) {\n\t\tcase HSMA_ARCHIVE:\n\t\tcase HSMA_RESTORE:\n\t\tcase HSMA_REMOVE:\n\t\t\tct_hsm_io_cmd(hai->hai_action, loop, hai, hd->hd_flags);\n\t\t\tbreak;\n\t\tcase HSMA_CANCEL:\n\t\t\tLOG_ERROR(ENOTSUP, \"Operation not implemented\");\n\t\t\tbreak;\n\t\t}\n\t\tfflush(stderr);\n\t\tfree(hd);\n\t}\n\n\tg_async_queue_unref(mqueue);\n\tg_main_loop_unref(loop);\n\tg_main_context_unref(mctx);\n\treturn NULL;\n}\n\n/**\n * Setup the producer thread, ie the one that reads records from coordinator\n * and publish them to the working threads.\n *\n * It keeps an fd open on the lustre filesystem root to prevent it from being\n * unmounter accidentally.\n */\nstatic int ct_producer_setup(void)\n{\n\tint\trc;\n\n\trc = llapi_search_fsname(opt.o_mnt, fs_name);\n\tif (rc < 0) {\n\t\tLOG_ERROR(rc, \"cannot find a Lustre filesystem mounted at '%s'\",\n\t\t\t opt.o_mnt);\n\t\treturn rc;\n\t}\n\n\topt.o_mnt_fd = open(opt.o_mnt, O_RDONLY);\n\tif (opt.o_mnt_fd < 0) {\n\t\trc = -errno;\n\t\tLOG_ERROR(rc, \"cannot open mount point at '%s'\",\n\t\t\t  opt.o_mnt);\n\t\treturn rc;\n\t}\n\n\treturn 0;\n}\n\n/**\n * Load configuration file.\n * This is a GLib KeyFile, similar to .ini files.\n * The command are grouped under a same group (CFG_GROUP_COMMANDS) and expressed\n * as format strings associated to the HSM operation they correspond to.\n *\n * See:\n * https://developer.gnome.org/glib/unstable/glib-Key-value-file-parser.html\n */\nstatic int ct_load_cfg_file(void)\n{\n\tGKeyFile\t*keys = g_key_file_new();\n\tGError\t\t*err  = NULL;\n\tint\t\t rc = 0;\n\n\tif (!g_key_file_load_from_file(keys, opt.o_config,\n\t\t\t\t       G_KEY_FILE_NONE, &err)) {\n\t\trc = -EINVAL;\n\t\tLOG_ERROR(rc, \"cannot load configuration at '%s': %s\",\n\t\t\t  opt.o_config, err->message);\n\t\tg_error_free(err);\n\t\tgoto out;\n\t}\n\n\t/* Although the functions below will ensure that the group exists, this\n\t * allows us to exit early with a clear error message and ignore errors\n\t * due to missing keys (no command is mandatory). */\n\tif (!g_key_file_has_group(keys, CFG_GROUP_COMMANDS)) {\n\t\trc = -EINVAL;\n\t\tLOG_ERROR(rc, \"Missing group '%s'\", CFG_GROUP_COMMANDS);\n\t\tgoto out;\n\t}\n\n\t/* commands are to be freed using g_free() and can be NULL */\n\tct_commands[HSMA_ARCHIVE] = g_key_file_get_string(keys,\n\t\t\t\t\t\t\t  CFG_GROUP_COMMANDS,\n\t\t\t\t\t\t\t  \"archive\", NULL);\n\tct_commands[HSMA_RESTORE] = g_key_file_get_string(keys,\n\t\t\t\t\t\t\t  CFG_GROUP_COMMANDS,\n\t\t\t\t\t\t\t  \"restore\", NULL);\n\tct_commands[HSMA_REMOVE] = g_key_file_get_string(keys,\n\t\t\t\t\t\t\t CFG_GROUP_COMMANDS,\n\t\t\t\t\t\t\t \"remove\", NULL);\n\tct_commands[HSMA_CANCEL] = g_key_file_get_string(keys,\n\t\t\t\t\t\t\t CFG_GROUP_COMMANDS,\n\t\t\t\t\t\t\t \"cancel\", NULL);\nout:\n\tg_key_file_free(keys);\n\treturn rc;\n}\n\n/**\n * Fill in structures and spawn working threads.\n */\nstatic int ct_setup(void)\n{\n\tint i;\n\tint rc;\n\n\t/* Initialize regular expression patterns for argument substitution */\n\tfd_regex  = g_regex_new(\"{fd}\", G_REGEX_OPTIMIZE, 0, NULL);\n\tfid_regex = g_regex_new(\"{fid}\", G_REGEX_OPTIMIZE, 0, NULL);\n\tctdata_regex = g_regex_new(\"{ctdata}\", G_REGEX_OPTIMIZE, 0, NULL);\n\n#if !(GLIB_CHECK_VERSION(2, 32, 0))\n\tg_thread_init(NULL);\n#endif\n\n\tllapi_msg_set_level(opt.o_verbose);\n\trc = ct_load_cfg_file();\n\tif (rc)\n\t\treturn rc;\n\n\trc = ct_producer_setup();\n\tif (rc)\n\t\treturn rc;\n\n\t/* Start working threads and communication channel */\n\tmqueue = g_async_queue_new();\n\n\tfor (i = 0; i < opt.o_fanout; i++)\n#if !(GLIB_CHECK_VERSION(2, 32, 0))\n\t\tg_thread_create(subproc_mgr_main, NULL, false, NULL);\n#else\n\t\tg_thread_new(\"subproc_mgr_main\", subproc_mgr_main, NULL);\n#endif\n\n\treturn 0;\n}\n\n/**\n * Clear everything before exit() for the sake of sane valgrind sessions.\n */\nstatic int ct_cleanup(void)\n{\n\tif (mqueue != NULL)\n\t\tg_async_queue_unref(mqueue);\n\n\tg_regex_unref(ctdata_regex);\n\tg_regex_unref(fid_regex);\n\tg_regex_unref(fd_regex);\n\n\tg_free(ct_commands[HSMA_ARCHIVE]);\n\tg_free(ct_commands[HSMA_RESTORE]);\n\tg_free(ct_commands[HSMA_REMOVE]);\n\tg_free(ct_commands[HSMA_CANCEL]);\n\treturn 0;\n}\n\nint main(int argc, char **argv)\n{\n\tint\trc;\n\n\tstrncpy(cmd_name, basename(argv[0]), sizeof(cmd_name) - 1);\n\trc = ct_parseopts(argc, argv);\n\tif (rc < 0) {\n\t\tfprintf(stderr, \"try '%s --help' for more information\\n\",\n\t\t\tcmd_name);\n\t\treturn -rc;\n\t}\n\n\trc = ct_setup();\n\tif (rc < 0)\n\t\tgoto error_cleanup;\n\n\trc = ct_run();\n\nerror_cleanup:\n\tct_cleanup();\n\n\treturn -rc;\n}\n"
  },
  {
    "path": "src/tools/ost_fids_remap.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2013 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/* read rbh-diff fid_remap as input, and update trusted.fid xattr for OST objects */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include \"rbh_basename.h\"\n\n#include <stdio.h>\n#include <sys/types.h>\n#include <sys/xattr.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <sys/param.h>\n#include <search.h>\n#include <stdint.h>\n#include <inttypes.h>\n#include <unistd.h>\n#include <string.h>\n\n#include \"lustre_extended_types.h\"\n\nstatic void usage(const char *argv0)\n{\n    fprintf(stderr, \"Usage: %s <ost_index> <ost_mount_point> <fid_remap_file>\\n\",\n            rh_basename(argv0));\n    exit(1);\n}\n\n/**\n * Convert a string to an integer\n * @return -1 on error.\n */\nstatic int str2int(const char *str)\n{\n    char           suffix[256];\n    int            nb_read, value;\n\n    if (str == NULL)\n        return -1;\n\n    nb_read = sscanf(str, \"%d%s\", &value, suffix);\n\n    if (nb_read <= 0)\n        return -1;              /* invalid format */\n\n    if ((nb_read == 1) || (suffix[0] == '\\0'))\n        return value;           /* no suffix => 0K */\n    else\n        return -1;\n}\n\nint main(int argc, char ** argv)\n{\n    char buff[4096];\n    char path[PATH_MAX];\n    char xattr[4096];\n    ssize_t s;\n    int len, rc;\n    int nl = 0;\n    int errors = 0;\n    int ignored_lines = 0;\n    int ok = 0;\n    int match_ost = 0;\n    const char *ost_root, *file;\n    int idx = 0;\n    FILE *f;\n\n    if (argc != 4)\n        usage(argv[0]);\n\n    if (argv[2][0] != '/')\n    {\n        fprintf(stderr, \"ERROR: absolute path expected for <ost_mount_point>\\n\");\n        usage(argv[0]);\n    }\n    idx = str2int(argv[1]);\n    if (idx == -1)\n    {\n        fprintf(stderr, \"ERROR: positive integer expected for <ost_index>\\n\");\n        usage(argv[0]);\n    }\n    ost_root = argv[2];\n    file = argv[3];\n\n    f = fopen(file,\"r\");\n    if (f == NULL)\n    {\n        rc = errno;\n        fprintf(stderr,\"Failed to open %s for reading: %s\\n\",\n                file, strerror(errno));\n        exit(rc);\n    }\n\n    while(fgets(buff, 4096, f))\n    {\n        unsigned int ost_idx = 0, snum = 0;\n        uint64_t     obj_id = 0;\n        lustre_fid   oldfid = {0},\n                     newfid = {0};\n\n        nl++;\n\n        len = strlen(buff);\n        /* remove final '\\n' */\n        if (len > 0 && buff[len-1] == '\\n')\n            buff[len-1] = '\\0';\n\n        /* line format: ost_idx obj_id oldfid newfid */\n        if (sscanf(buff, \"%u %u %\"PRIu64\" [\"SFID\"] [\"SFID\"]\",\n                   &ost_idx, &snum, &obj_id, RFID(&oldfid), RFID(&newfid)) != 9)\n        {\n            fprintf(stderr, \"ERROR: Invalid line format or empty line at line %u\\n\", nl);\n            ignored_lines++;\n            continue;\n        }\n\n        if (idx != ost_idx)\n        {\n            ignored_lines++;\n            continue;\n        }\n        match_ost++;\n\n        /* build path related to object index */\n        sprintf(path,\"%s/O/0/d%u/%\"PRIu64, ost_root, (unsigned int)(obj_id % 32), obj_id);\n\n        /* get previous fid for the object */\n        s = lgetxattr(path, \"trusted.fid\", xattr, 4096);\n        if (s < 0)\n        {\n            fprintf(stderr, \"Can't check previous FID for object %\"PRIu64\" (%s): %s.\\n\",\n                    obj_id, path, strerror(errno));\n            errors ++;\n            continue;\n        }\n        if (s != sizeof(struct filter_fid))\n        {\n            fprintf(stderr, \"ERROR: unexpected size for fid xattr: %zu != %zu\\n\",\n                    s, sizeof(lustre_fid));\n            errors ++;\n            continue;\n        }\n        struct filter_fid *ffid = (struct filter_fid *)xattr;\n\n        // ff_parent.f_ver == file stripe number\n        oldfid.f_ver = snum;\n        newfid.f_ver = snum;\n\n        if (memcmp(&ffid->ff_parent, &oldfid, sizeof(lustre_fid)))\n        {\n            if (memcmp(&ffid->ff_parent, &newfid, sizeof(lustre_fid)) == 0)\n                fprintf(stderr, \"ERROR: new FID is already set for object %\"PRIu64\" (%s): \"\n                        \"current=\"DFID\", old=\"DFID\", new=\"DFID\"\\n\", obj_id, path,\n                        PFID(&ffid->ff_parent), PFID(&oldfid), PFID(&newfid));\n            else\n                fprintf(stderr, \"ERROR: unexpected FID for object %\"PRIu64\" (%s): \"\n                        \"current=\"DFID\", expected=\"DFID\"\\n\", obj_id, path,\n                        PFID(&ffid->ff_parent), PFID(&oldfid));\n            errors ++;\n            continue;\n        }\n        if (ffid->ff_objid != obj_id)\n        {\n            fprintf(stderr, \"ERROR: object id doesn't match! got: %\"PRIu64\", expected: %\"PRIu64\" (%s)\\n\",\n                    (uint64_t)ffid->ff_objid, obj_id, path);\n            errors ++;\n            continue;\n        }\n\n        /* set the filter with the right fid */\n        memcpy(&ffid->ff_parent, &newfid, sizeof(lustre_fid));\n        printf(\"objid %\"PRIu64\": \"DFID\"->\"DFID\"\\n\", obj_id, PFID(&oldfid), PFID(&newfid));\n        if (lsetxattr(path, \"trusted.fid\", ffid, sizeof(struct filter_fid), XATTR_REPLACE))\n        {\n            fprintf(stderr, \"ERROR: failed to update object's FID for object %\"PRIu64\" (%s): %s\",\n                    obj_id, path, strerror(errno));\n            errors ++;\n            continue;\n        }\n        ok ++;\n    }\n    printf(\"\\nSummary: %u input lines, %u matching ost#%u, %u success, %u ignored, %u errors\\n\",\n           nl, match_ost, idx, ok, ignored_lines, errors);\n    if (errors)\n        exit(1);\n    else\n        exit(0);\n}\n"
  },
  {
    "path": "src/tools/read_lovea.c",
    "content": "#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdint.h>\n#include <sys/types.h>\n#include <sys/xattr.h>\n#include \"lustre_extended_types.h\"\n\nstatic const char * pattern2str(uint32_t pattern)\n{\n    switch(pattern)\n    {\n        case LOV_PATTERN_RAID0: return \"RAID0\";\n        case LOV_PATTERN_RAID1: return \"RAID1\";\n    }\n    return \"?\";\n}\n\nstatic int print_lov(struct lov_user_md * p_lum)\n{\n#ifdef LOV_USER_MAGIC_V3\n    struct lov_user_md_v3 *p_lum3;\n#endif\n    unsigned int   i;\n\n    /* Check protocol version number */\n    if ( p_lum->lmm_magic == LOV_USER_MAGIC_V1 )\n    {\n        printf(\"magic=%#x (LOV_USER_MAGIC_V1)\\n\", p_lum->lmm_magic);\n        printf(\"pattern=%#x (%s)\\n\", p_lum->lmm_pattern, pattern2str(p_lum->lmm_pattern));\n#ifdef HAVE_OBJ_ID\n        printf(\"object_id=%#Lx\\n\", p_lum->lmm_object_id);\n#ifdef _HAVE_FID\n        printf(\"object_seq=%#Lx\\n\", p_lum->lmm_object_seq);\n#else /* lmm_object_gr for Lustre 1.x */\n        printf(\"object_gr=%#Lx\\n\", p_lum->lmm_object_gr);\n#endif\n#else\n        printf(\"object_id=%#Lx\\n\", p_lum->lmm_oi.oi.oi_id);\n        printf(\"object_seq=%#Lx\\n\", p_lum->lmm_oi.oi.oi_seq);\n#endif\n        printf(\"stripe_size=%u\\n\", p_lum->lmm_stripe_size);\n        printf(\"stripe_count=%hu\\n\", p_lum->lmm_stripe_count);\n        printf(\"stripe_offset=%hd\\n\", p_lum->lmm_stripe_offset);\n        printf(\"stripe objects:\\n\");\n        for (i = 0; i < p_lum->lmm_stripe_count; i++)\n        {\n            printf(\"   [%u] ost_idx=%u\\n\", i, p_lum->lmm_objects[i].l_ost_idx);\n            printf(\"   [%u] ost_gen=%u\\n\", i, p_lum->lmm_objects[i].l_ost_gen);\n#ifdef HAVE_OBJ_ID\n            printf(\"   [%u] object_id=%Lu\\n\", i, p_lum->lmm_objects[i].l_object_id);\n#ifdef HAVE_OBJ_SEQ\n            printf(\"   [%u] object_seq=%Lu\\n\", i, p_lum->lmm_objects[i].l_object_seq);\n#else\n            printf(\"   [%u] object_gr=%Lu\\n\", i, p_lum->lmm_objects[i].l_object_gr);\n#endif\n#else /* new structure (union of fid and id/seq) */\n            printf(\"   [%u] object_id=%Lu\\n\", i, p_lum->lmm_objects[i].l_ost_oi.oi.oi_id);\n            printf(\"   [%u] object_seq=%Lu\\n\", i, p_lum->lmm_objects[i].l_ost_oi.oi.oi_seq);\n#endif\n        }\n        return 0;\n    }\n#ifdef LOV_USER_MAGIC_V3\n    else if ( p_lum->lmm_magic == LOV_USER_MAGIC_V3 )\n    {\n        p_lum3 = ( struct lov_user_md_v3 * ) p_lum;\n        char pool_name[LOV_MAXPOOLNAME+1];\n\n        printf(\"magic=%#x (LOV_USER_MAGIC_V3)\\n\", p_lum3->lmm_magic);\n        printf(\"pattern=%#x (%s)\\n\", p_lum3->lmm_pattern, pattern2str(p_lum3->lmm_pattern));\n#ifdef HAVE_OBJ_ID\n        printf(\"object_id=%#Lx\\n\", p_lum3->lmm_object_id);\n#ifdef _HAVE_FID\n        printf(\"object_seq=%#Lx\\n\", p_lum3->lmm_object_seq);\n#else /* lmm_object_gr for Lustre 1.x */\n        printf(\"object_gr=%#Lx\\n\", p_lum3->lmm_object_gr);\n#endif\n#else\n        printf(\"object_id=%#Lx\\n\", p_lum3->lmm_oi.oi.oi_id);\n        printf(\"object_seq=%#Lx\\n\", p_lum3->lmm_oi.oi.oi_seq);\n#endif\n\n        printf(\"stripe_size=%u\\n\", p_lum3->lmm_stripe_size);\n        printf(\"stripe_count=%hu\\n\", p_lum3->lmm_stripe_count);\n        printf(\"stripe_offset=%hd\\n\", p_lum3->lmm_stripe_offset);\n        strncpy(pool_name, p_lum3->lmm_pool_name, LOV_MAXPOOLNAME);\n        pool_name[LOV_MAXPOOLNAME] = '\\0';\n        printf(\"pool_name=%s\\n\", pool_name);\n        printf(\"stripe objects:\\n\");\n        for (i = 0; i < p_lum3->lmm_stripe_count; i++)\n        {\n            printf(\"   [%u] ost_idx=%u\\n\", i, p_lum3->lmm_objects[i].l_ost_idx);\n            printf(\"   [%u] ost_gen=%u\\n\", i, p_lum3->lmm_objects[i].l_ost_gen);\n#ifdef HAVE_OBJ_ID\n            printf(\"   [%u] object_id=%Lu\\n\", i, p_lum3->lmm_objects[i].l_object_id);\n#ifdef HAVE_OBJ_SEQ\n            printf(\"   [%u] object_seq=%Lu\\n\", i, p_lum3->lmm_objects[i].l_object_seq);\n#else\n            printf(\"   [%u] object_gr=%Lu\\n\", i, p_lum3->lmm_objects[i].l_object_gr);\n#endif\n#else /* new structure (union of fid and id/seq) */\n            printf(\"   [%u] object_id=%Lu\\n\", i, p_lum3->lmm_objects[i].l_ost_oi.oi.oi_id);\n            printf(\"   [%u] object_seq=%Lu\\n\", i, p_lum3->lmm_objects[i].l_ost_oi.oi.oi_seq);\n#endif\n        }\n        return 0;\n    }\n#endif\n    else\n    {\n        fprintf(stderr, \"Error: wrong magic %#x for lov_user_md\\n\",  p_lum->lmm_magic);\n        return -1;\n    }\n}\n\n\nstatic void usage(const char * bin)\n{\n    fprintf(stderr, \"usage: %s <mdt_file>\\n\", bin);\n}\n\nint main(int argc, char ** argv)\n{\n    ssize_t len;\n    char buff[4096];\n\n    if (argc != 2)\n    {\n        usage(argv[0]);\n        exit(1);\n    }\n\n    len = lgetxattr (argv[1], XATTR_NAME_LOV, buff, 4096);\n\n    if (len < 1)\n    {\n        fprintf(stderr, \"%s: \"XATTR_NAME_LOV\" is not readable\\n\", argv[1]);\n        exit(1);\n    }\n    else if (len == 0)\n    {\n        fprintf(stderr, \"%s: \"XATTR_NAME_LOV\" is empty\\n\", argv[1]);\n        exit(1);\n    }\n    else if (len < sizeof(struct lov_user_md_v1)) /* v1 is the smaller (?) */\n    {\n        fprintf(stderr, \"%s: \"XATTR_NAME_LOV\" size too small\\n\", argv[1]);\n        exit(1);\n    }\n\n    /* attr is OK */\n    print_lov((struct lov_user_md *) buff);\n\n    exit(0);\n\n\n}\n\n"
  },
  {
    "path": "src/tools/set_lovea.c",
    "content": "/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n * vim:expandtab:shiftwidth=4:tabstop=4:\n */\n/*\n * Copyright (C) 2009, 2010 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/* read rbh-diff as input, and set lov for MDT objects */\n\n#ifdef HAVE_CONFIG_H\n#include \"config.h\"\n#endif\n\n#include <stdio.h>\n#include <sys/types.h>\n#include <sys/xattr.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <sys/param.h>\n\n#include \"lustre_extended_types.h\"\n\nstatic ssize_t hex2bin(const char * hex, void * buff)\n{\n    const char *currh = hex;\n    unsigned char *currb = buff;\n    while(*currh)\n    {\n        if (sscanf(currh,\"%2hhx\", currb) != 1)\n        {\n            fprintf(stderr,\"ERROR: could not read a byte from hex value at '%s'\\n\", currh);\n            return -1;\n        }\n        currh += 2;\n        currb ++;\n    }\n    return (currb-(unsigned char*)buff);\n}\n\nstatic int set_lov_ea(struct lov_user_md * p_lum, size_t sz_in, const char * path)\n{\n    if (p_lum->lmm_magic == LOV_USER_MAGIC_V1)\n    {\n        size_t len = sizeof(struct lov_user_md_v1) + p_lum->lmm_stripe_count*sizeof(struct lov_user_ost_data_v1);\n        if (sz_in != len)\n        {\n            fprintf(stderr, \"ERROR: wrong input size for lov_user_md_v1: %lu (%lu expected, stripe count=%u)\\n\",\n                    sz_in, len, p_lum->lmm_stripe_count);\n            return -1;\n        }\n        if (lsetxattr (path, XATTR_NAME_LOV, p_lum, len, 0 /* create or replace*/ ))\n        {\n            fprintf(stderr, \"ERROR setting \"XATTR_NAME_LOV\" xattr on %s: %s\\n\",\n                    path, strerror(errno));\n            return -1;\n        }\n    }\n#ifdef LOV_USER_MAGIC_V3\n    else if (p_lum->lmm_magic == LOV_USER_MAGIC_V3)\n    {\n        struct lov_user_md_v3 *p_lum3 = (struct lov_user_md_v3 *)p_lum;\n\n        size_t len = sizeof(struct lov_user_md_v3) + p_lum3->lmm_stripe_count*sizeof(struct lov_user_ost_data_v1);\n        if (sz_in != len)\n        {\n            fprintf(stderr, \"ERROR: wrong input size for lov_user_md_v3: %lu (%lu expected)\\n\",\n                    sz_in, len);\n            return -1;\n        }\n        if (lsetxattr (path, XATTR_NAME_LOV, p_lum3, len, 0 /* create or replace*/ ))\n        {\n            fprintf(stderr, \"ERROR setting \"XATTR_NAME_LOV\" xattr on %s: %s\\n\",\n                    path, strerror(errno));\n            return -1;\n        }\n    }\n#endif\n    else\n    {\n        fprintf(stderr, \"Invalid magic number %#x in lov_user_md\\n\", p_lum->lmm_magic);\n        return -1;\n    }\n    printf(\"%s: \"XATTR_NAME_LOV\" set successfully\\n\", path);\n    return 0;\n}\n\nint main(int argc, char ** argv)\n{\n    char buff[4096];\n    char path[MAXPATHLEN];\n    char lum_buff[4096];\n    int len;\n    ssize_t s;\n    char *lovea;\n    int nl = 0;\n    int errors = 0;\n    int ignored_lines = 0;\n    int ok = 0;\n    char * mdt_root;\n    FILE * lovea_stream = stdin;\n\n    if (argc != 2 && argc != 3)\n    {\n        fprintf(stderr, \"Usage: %s <mdt_mount_point> [lovea_file]\\n\", argv[0]);\n        exit(1);\n    }\n    if (argv[1][0] != '/')\n    {\n        fprintf(stderr, \"ERROR: absolute path expected for <mdt_mount_point>\\n\");\n        fprintf(stderr, \"Usage: %s <mdt_mount_point> [lovea_file]\\n\", argv[0]);\n        exit(1);\n    }\n    mdt_root = argv[1];\n    if (argc == 3)\n    {\n        lovea_stream = fopen(argv[2],\"r\");\n        if (!lovea_stream)\n        {\n            fprintf(stderr,\"Failed to open %s for reading: %s\\n\",\n                argv[2], strerror(errno));\n            exit(1);\n        }\n    }\n\n    while(fgets(buff, 4096, lovea_stream))\n    {\n        nl++;\n\n        len = strlen(buff);\n        /* remove final '\\n' */\n        if (len > 0 && buff[len-1] == '\\n')\n            buff[len-1] = '\\0';\n\n        /* line format: <relative path of file> <lov_ea(hex)>*/\n        lovea = strrchr(buff, ' ');\n        if (!lovea)\n        {\n            fprintf(stderr, \"ERROR: Invalid line format or empty line at line %u\\n\", nl);\n            ignored_lines++;\n            continue;\n        }\n        /* split path and lovea */\n        *lovea = '\\0';\n        lovea++;\n\n        /* convert hex buffer to binary */\n        s = hex2bin(lovea, lum_buff);\n        if (s < 0)\n        {\n            errors ++;\n            continue;\n        }\n\n        if (s < sizeof(struct lov_user_md_v1))\n        {\n            fprintf(stderr, \"ERROR: lov_ea is too small: %Lu/%Lu bytes\\n\",\n                    (unsigned long long)s, (unsigned long long)sizeof(struct lov_user_md_v1));\n            errors ++;\n            continue;\n        }\n\n        sprintf(path, \"%s/ROOT/%s\", mdt_root, buff);\n        if (set_lov_ea((struct lov_user_md *)lum_buff, s, path))\n        {\n            errors ++;\n            continue;\n        }\n        ok ++;\n    }\n    printf(\"\\nSummary: %u input lines, %u ok, %u ignored, %u errors\\n\", nl, ok, ignored_lines, errors);\n    if (errors)\n        exit(1);\n    else\n        exit(0);\n}\n"
  },
  {
    "path": "tests/Makefile.am",
    "content": "SUBDIRS=test_suite\n\npkgdatadir=@datarootdir@/@PACKAGE_NAME@/tests\n\nif COMMON_RPMS\nnobase_dist_pkgdata_DATA =                      \\\n    $(srcdir)/test_suite/README.rst             \\\n    $(srcdir)/test_suite/cfg/*.conf             \\\n    $(srcdir)/test_suite/cfg/*.sql              \\\n    $(srcdir)/test_suite/valgrind.supp          \\\n    $(srcdir)/huge_posix/cfg/*.conf\n\nnobase_dist_pkgdata_SCRIPTS =                   \\\n    $(srcdir)/test_suite/1-test_setup_lustre.sh \\\n    $(srcdir)/test_suite/1-test_setup_posix.sh  \\\n    $(srcdir)/test_suite/2-run-tests.sh         \\\n    $(srcdir)/test_suite/3-tests-lustre.sh      \\\n    $(srcdir)/test_suite/cleanup.sh             \\\n    $(srcdir)/test_suite/bench_rpc.sh           \\\n    $(srcdir)/test_suite/rm_script              \\\n    $(srcdir)/test_suite/lsetup.sh              \\\n    $(srcdir)/huge_posix/1-test_setup.sh        \\\n    $(srcdir)/huge_posix/2-run-tests.sh         \\\n    $(srcdir)/test_rpmbuild.sh                  \\\n    $(srcdir)/fill_fs.sh                        \\\n    $(srcdir)/completion.sh\nendif\n"
  },
  {
    "path": "tests/completion.sh",
    "content": "#!/bin/bash\n\ncfg=$1\nfspath=$2\noutput=$3\n\necho $cfg > $output.1\necho $fspath > $output.2\n"
  },
  {
    "path": "tests/create_files.sh",
    "content": "#/!bin/sh\n\n# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# this script fills a filesystem continuously\n# while migration and purges are triggered\n# by Policy Engine.\n\nROOT=$1\n\nif [[ -z $ROOT ]]; then\n    echo \"Usage: $0 <path>\";\n    exit 1;\nfi\n\nMAX_DEPTH=4\nSUBDIRS=30 # subdirs at each level\nLEAVES=30 # nbr of files at lower level\nFILE_SZ_MB=2 # file size\n\nTOTAL_FILES=0\nTIME_START=`date +%s.%N`\n\nfunction mksubtree\n{\n    local DIR=$1\n    local LVL=$2\n    local d\n    local f\n\n    if (( $LVL >= MAX_DEPTH )); then\n        for f in `seq 1 $LEAVES`; do\n           #echo \"Creating file $DIR/file.$f...\"\n           touch $DIR/file.$f\n\t   if (( $? != 0 )); then\n\t\techo \"ERROR $!\"\n           fi\n\t   ((TOTAL_FILES=$TOTAL_FILES+1))\n\t    if (( $(($TOTAL_FILES % 1000)) == 0 )); then\n\t\tnow=`date +%s.%N`\n\t\tsec=`echo $now - $TIME_START | bc -l`\n\t\techo \"$TOTAL_FILES files created in $sec s\"\n\t    fi\n        done\n    else\n        for d in `seq 1 $SUBDIRS`; do\n            mkdir -p $DIR/dir.$d\n            mksubtree $DIR/dir.$d $(( $LVL + 1 ))\n        done\n    fi\n}\n\nwhile (( 1 )); do\n\n     mksubtree $ROOT 1\n\ndone\n"
  },
  {
    "path": "tests/fill_fs.sh",
    "content": "\nROOT=$1\nCOUNT=$2\n\nif [[ -z \"$ROOT\" || -z \"$COUNT\" ]]; then\n\techo \"Usage: $0 <dir> <ino_count>\"\n\texit 1\nfi\n\nif [ ! -d $ROOT ]; then\n\techo \"Missing directory: $ROOT\"\n\texit 1\nfi\n\nifree=`df -i $ROOT | tail -1 | awk '{ print $(NF-2) }'`\nicnt=$(( $COUNT * 105 / 100 )) # add 5% for dirs\nif (($ifree <= $icnt)); then\n\tilimit=$(( $ifree * 100 / 105 ))\n\techo \"Not enough free inodes: setting limit to $ilimit\"\n\tCOUNT=$ilimit\nfi\n\n# if count < 100, no dir level\n# if count < 10k, 1 single dir level\n# if count > 10k, 2 dir levels\n\nif (( $COUNT <= 100 )); then\n\tfor f in `seq 1 $COUNT`; do touch $ROOT/file.$f; done;\nelif (( $COUNT <= 10000 )); then\n\tfpd=$(( $COUNT/100 ))\n\tfor d in `seq 1 100`; do\n\t\techo  \"$ROOT/dir.$d\"\n\t\tmkdir -p \"$ROOT/dir.$d\" || exit 1\n\t\tfor f in `seq 1 $fpd`; do\n\t\t\ttouch \"$ROOT/dir.$d/file.$f\" || exit 1\n\t\tdone\n\tdone\nelse\n\tfpd=$(( $COUNT/10000 ))\n\tfor d in `seq 1 95`; do for s in `seq 1 95`; do\n\t\techo  \"$ROOT/dir.$d/subdir.$s\"\n\t\tmkdir -p $ROOT/dir.$d/subdir.$s || exit 1\n\t\tfor f in `seq 1 $fpd`; do\n\t\t\ttouch $ROOT/dir.$d/subdir.$s/file.$f || exit 1\n\t\tdone\n\tdone; done\nfi\n"
  },
  {
    "path": "tests/gprof-helper.c",
    "content": "/* gprof-helper.c -- preload library to profile pthread-enabled programs\n *\n * Authors: Sam Hocevar <sam at zoy dot org>\n *          Daniel Jnsson <danieljo at fagotten dot org>\n *\n *  This program is free software; you can redistribute it and/or\n *  modify it under the terms of the Do What The Fuck You Want To\n *  Public License as published by Banlu Kemiyatorn. See\n *  http://sam.zoy.org/projects/COPYING.WTFPL for more details.\n *\n * Compilation example:\n * gcc -shared -fPIC gprof-helper.c -o gprof-helper.so -lpthread -ldl\n *\n * Usage example:\n * LD_PRELOAD=./gprof-helper.so your_program\n */\n\n#define _GNU_SOURCE\n#include <sys/time.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <dlfcn.h>\n#include <pthread.h>\n\nstatic void * wrapper_routine(void *);\n\n/* Original pthread function */\nstatic int (*pthread_create_orig)(pthread_t *__restrict,\n                                  __const pthread_attr_t *__restrict,\n                                  void *(*)(void *),\n                                  void *__restrict) = NULL;\n\n/* Library initialization function */\nvoid wooinit(void) __attribute__((constructor));\n\nvoid wooinit(void)\n{\n    pthread_create_orig = dlsym(RTLD_NEXT, \"pthread_create\");\n    fprintf(stderr, \"pthreads: using profiling hooks for gprof\\n\");\n    if(pthread_create_orig == NULL)\n    {\n        char *error = dlerror();\n        if(error == NULL)\n        {\n            error = \"pthread_create is NULL\";\n        }\n        fprintf(stderr, \"%s\\n\", error);\n        exit(EXIT_FAILURE);\n    }\n}\n\n/* Our data structure passed to the wrapper */\ntypedef struct wrapper_s\n{\n    void * (*start_routine)(void *);\n    void * arg;\n\n    pthread_mutex_t lock;\n    pthread_cond_t  wait;\n\n    struct itimerval itimer;\n\n} wrapper_t;\n\n/* The wrapper function in charge for setting the itimer value */\nstatic void * wrapper_routine(void * data)\n{\n    /* Put user data in thread-local variables */\n    void * (*start_routine)(void *) = ((wrapper_t*)data)->start_routine;\n    void * arg = ((wrapper_t*)data)->arg;\n\n    /* Set the profile timer value */\n    setitimer(ITIMER_PROF, &((wrapper_t*)data)->itimer, NULL);\n\n    /* Tell the calling thread that we don't need its data anymore */\n    pthread_mutex_lock(&((wrapper_t*)data)->lock);\n    pthread_cond_signal(&((wrapper_t*)data)->wait);\n    pthread_mutex_unlock(&((wrapper_t*)data)->lock);\n\n    /* Call the real function */\n    return start_routine(arg);\n}\n\n/* Our wrapper function for the real pthread_create() */\nint pthread_create(pthread_t *__restrict thread,\n                   __const pthread_attr_t *__restrict attr,\n                   void * (*start_routine)(void *),\n                   void *__restrict arg)\n{\n    wrapper_t wrapper_data;\n    int i_return;\n\n    /* Initialize the wrapper structure */\n    wrapper_data.start_routine = start_routine;\n    wrapper_data.arg = arg;\n    getitimer(ITIMER_PROF, &wrapper_data.itimer);\n    pthread_cond_init(&wrapper_data.wait, NULL);\n    pthread_mutex_init(&wrapper_data.lock, NULL);\n    pthread_mutex_lock(&wrapper_data.lock);\n\n    /* The real pthread_create call */\n    i_return = pthread_create_orig(thread,\n                                   attr,\n                                   &wrapper_routine,\n                                   &wrapper_data);\n\n    /* If the thread was successfully spawned, wait for the data\n     * to be released */\n    if(i_return == 0)\n    {\n        pthread_cond_wait(&wrapper_data.wait, &wrapper_data.lock);\n    }\n\n    pthread_mutex_unlock(&wrapper_data.lock);\n    pthread_mutex_destroy(&wrapper_data.lock);\n    pthread_cond_destroy(&wrapper_data.wait);\n\n    return i_return;\n}\n"
  },
  {
    "path": "tests/huge_posix/1-test_setup.sh",
    "content": "#!/bin/sh\n\nCFG_SCRIPT=\"../../scripts/rbh-config\"\n\nservice mysqld start\n\n$CFG_SCRIPT test_db  robinhood_test robinhood || $CFG_SCRIPT create_db robinhood_test localhost robinhood\n$CFG_SCRIPT empty_db robinhood_test\n\nLOOP_FILE=/tmp/rbh.loop.huge_cont\nMNT_PT=/tmp/mnt.rbh_huge\n\nCONT_SIZE_MB=4000\n\necho \"Checking test filesystem...\"\n\nif [[ ! -d $MNT_PT ]]; then\n    echo \"creating $MNT_PT\"\n    mkdir $MNT_PT || exit 1\nfi\n\nif [[ ! -s $LOOP_FILE ]]; then\n    echo \"creating big file container $LOOP_FILE...\"\n\n    # check available size\n    avail=`df -k /tmp | xargs | awk '{print $(NF-2)}'`\n    if (( $avail < $(( $CONT_SIZE_MB * 1024 )) )); then\n\techo \"Not enough space available in /tmp: free=$avail KB , needed=$(( $CONT_SIZE_MB * 1024 )) KB\"\n\texit 1\n    fi\n\n    dd if=/dev/zero of=$LOOP_FILE bs=1M count=$CONT_SIZE_MB || exit 1\n    echo \"formatting as ext3...\"\n    mkfs.ext3 -F $LOOP_FILE -i 1024 || exit 1\nfi\n\n# mount\nmnted=`mount | grep $MNT_PT | grep loop | wc -l`\nif (( $mnted == 0 )); then\n    mount -o loop -t ext3 $LOOP_FILE $MNT_PT || exit 1\nfi\n\n# fill it when plenty inodes\nino=`df -i $MNT_PT | xargs | awk '{print $(NF-2)}'`\necho \"$ino free inodes\"\n# take 10% margin\nino=$(( $ino * 9/10 ))\n\necho \"creating $ino inodes...\"\n../fill_fs.sh $MNT_PT $ino || exit 1\n\n"
  },
  {
    "path": "tests/huge_posix/2-run-tests.sh",
    "content": "#/bin/sh\n\nROOT=\"/tmp/mnt.rbh_huge\"\nBKROOT=\"/tmp/backend\"\nRBH_OPT=\"\"\nDB=robinhood_test\n\nXML=\"test_report.xml\"\nTMPXML_PREFIX=\"/tmp/report.xml.$$\"\nTMPERR_FILE=\"/tmp/err_str.$$\"\n\nTEMPLATE_DIR='../../doc/templates'\n\nif [[ -z \"$PURPOSE\" || $PURPOSE = \"TMP_FS_MGR\" ]]; then\n\tis_backup=0\n\tRH=\"../../src/robinhood/robinhood $RBH_OPT\"\n\tREPORT=\"../../src/robinhood/rbh-report $RBH_OPT\"\n\tCMD=robinhood\n\tPURPOSE=\"TMP_FS_MGR\"\nelif [[ $PURPOSE = \"BACKUP\" ]]; then\n\tis_backup=1\n\tRH=\"../../src/robinhood/rbh-backup $RBH_OPT\"\n\tREPORT=\"../../src/robinhood/rbh-backup-report $RBH_OPT\"\n\tCMD=rbh-backup\nfi\n\nPROC=$CMD\nCFG_SCRIPT=\"../../scripts/rbh-config\"\nCLEAN=\"rh_scan.log rh_migr.log rh_rm.log rh.pid rh_purge.log rh_report.log report.out rh_syntax.log\"\n\nSUMMARY=\"/tmp/test_${PROC}_summary.$$\"\n\nERROR=0\nRC=0\nSKIP=0\nSUCCESS=0\nDO_SKIP=0\n\nfunction error_reset\n{\n\tERROR=0\n\tDO_SKIP=0\n\tcp /dev/null $TMPERR_FILE\n}\n\nfunction error\n{\n\techo \"ERROR $@\"\n\t((ERROR=$ERROR+1))\n\n\tif (($junit)); then\n\t \tgrep -i error *.log >> $TMPERR_FILE\n\t\techo \"ERROR $@\" >> $TMPERR_FILE\n\tfi\n}\n\nfunction set_skipped\n{\n\tDO_SKIP=1\n}\n\nfunction clean_logs\n{\n\tfor f in $CLEAN; do\n\t\tif [ -s $f ]; then\n\t\t\tcp /dev/null $f\n\t\tfi\n\tdone\n}\n\n\nfunction clean_db\n{\n\techo \"Destroying any running instance of robinhood...\"\n\tpkill robinhood\n\tpkill rbh-backup\n\n\tif [ -f rh.pid ]; then\n\t\techo \"killing remaining robinhood process...\"\n\t\tkill `cat rh.pid`\n\t\trm -f rh.pid\n\tfi\n\n\tsleep 1\n\techo \"Cleaning robinhood's DB...\"\n\t$CFG_SCRIPT empty_db $DB > /dev/null\n}\n\n############################## TEST SECTION #########################\n\nfunction scan_progress\n{\n\tcfg=$1\n\t# wait for command to start\n\tsleep 1\n\twhile pgrep $CMD >/dev/null ; do\n\t\t#$REPORT -f $cfg -i --csv | grep \"Total\"\n\t\t$REPORT -f $cfg -a --csv > /tmp/report.out\n\t\tentries=`grep entries_scanned /tmp/report.out | cut -d ',' -f 2 | tr -d ' '`\n\t\t[ -z $entries ] && entries=0\n\t\tspeed=`grep scan_current_speed /tmp/report.out | cut -d ',' -f 2 | tr -d ' '`\n\t\t[ -z $speed ] && speed=0\n\t\techo -ne \"\\r$entries entries scanned @ $speed entries/sec        \"\n\t\tsleep 10\n\tdone\n\techo\n}\n\nfunction test_scan_report\n{\n\tconfig_file=$1\n\tpolicy_str=\"$2\"\n\n\tclean_logs\n\n\techo \"1-Scanning...\"\n\tscan_progress ./cfg/$config_file &\n\tscan_t0=`date +%s.%N`\n\t$RH -f ./cfg/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"scanning filesystem\"\n\tscan_t1=`date +%s.%N`\n\n\t# wait for progress function to end\n\twait %1\n\n\t# get stats from log:\n\tcompl_line=`grep \"Full scan of $ROOT completed\" rh_scan.log | cut -d '|' -f 2`\n\techo $compl_line\n\n\tduration=`echo $compl_line | awk '{print $NF}' | sed -e \"s/[ s]//g\"`\n\tdur_1=`echo \"$scan_t1 - $scan_t0\" | bc -l`\n\tdur_1=`printf \"%.2f\" $dur_1`\n\techo \"Duration: scan=$duration, total=$dur_1\"\n\n\tcp /dev/null rh_scan.log\n\n\t# testing second scan time\n\techo \"2-Second scan...\"\n\tscan_progress ./cfg/$config_file &\n\tscan_t0=`date +%s.%N`\n\t$RH -f ./cfg/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"scanning filesystem\"\n\tscan_t1=`date +%s.%N`\n\n\t# wait for progress function to end\n\twait %1\n\n\t# get stats from log:\n\tcompl_line=`grep \"Full scan of $ROOT completed\" rh_scan.log | cut -d '|' -f 2`\n\techo $compl_line\n\n\tduration_2=`echo $compl_line | awk '{print $NF}' | sed -e \"s/[ s]//g\"`\n\tdur_2=`echo \"$scan_t1 - $scan_t0\" | bc -l`\n\tdur_2=`printf \"%.2f\" $dur_2`\n\techo \"Duration: scan=$duration_2, total=$dur_2\"\n\n\techo \"3-Compare with find -ls...\"\n\tscan_t0=`date +%s.%N`\n\tfind $ROOT -ls > /dev/null\n\tscan_t1=`date +%s.%N`\n\tdur_3=`echo \"$scan_t1 - $scan_t0\" | bc -l`\n\tdur_3=`printf \"%.2f\" $dur_3`\n\techo \"Duration: scan=$dur_3\"\n\n\techo \"`date`; scan1: $duration, $dur_1 ; scan2: $duration_2, $dur_2; find: $dur_3\" >> perf_history.log\n\n\t# duration of report commands:\n\tfor opt in \"--fs-info\" \"--class-info\" \"--user-info=root\" \"--group-info=root\" \"--top-dirs\" \"--top-size\" \"--top-purge\" \"--top-rmdir\" \"--top-users\"; do\n\t\treport_t0=`date +%s.%N`\n\t\t$REPORT -f ./cfg/$config_file $opt > /dev/null\n\t\treport_t1=`date +%s.%N`\n\t\tdiff=`echo \"$report_t1 - $report_t0\" | bc -l`\n\t\tdiff_ms=`printf \"%.3f\" $diff`\n\t\tdiff_s=`printf \"%.0f\" $diff`\n\t\techo \"Report time for $opt: $diff_ms sec\"\n\t\techo \"`date`; report $opt: $diff_ms\" >> perf_history.log\n\t\t(( $diff_s > 1 )) && echo \"$opt is slow!\"\n\tdone\n\n}\n\n######################### END OF TEST FUNCTIONS #####################\n\nonly_test=\"\"\nquiet=0\njunit=0\n\nwhile getopts qj o\ndo\tcase \"$o\" in\n\tq)\tquiet=1;;\n\tj)\tjunit=1;;\n\t[?])\tprint >&2 \"Usage: $0 [-q] [-j] test_nbr ...\"\n\t\texit 1;;\n\tesac\ndone\nshift $(($OPTIND-1))\n\nif [[ -n \"$1\" ]]; then\n\tonly_test=$1\nfi\n\n# initialize tmp files for XML report\nfunction junit_init\n{\n\tcp /dev/null $TMPXML_PREFIX.stderr\n\tcp /dev/null $TMPXML_PREFIX.stdout\n\tcp /dev/null $TMPXML_PREFIX.tc\n}\n\n# report a success for a test\nfunction junit_report_success # (class, test_name, time)\n{\n\tclass=\"$1\"\n\tname=\"$2\"\n\ttime=\"$3\"\n\n\t# remove quotes in name\n\tname=`echo \"$name\" | sed -e 's/\"//g'`\n\n\techo \"<testcase classname=\\\"$class\\\" name=\\\"$name\\\" time=\\\"$time\\\" />\" >> $TMPXML_PREFIX.tc\n}\n\n# report a failure for a test\nfunction junit_report_failure # (class, test_name, time, err_type)\n{\n\tclass=\"$1\"\n\tname=\"$2\"\n\ttime=\"$3\"\n\terr_type=\"$4\"\n\n\t# remove quotes in name\n\tname=`echo \"$name\" | sed -e 's/\"//g'`\n\n\techo \"<testcase classname=\\\"$class\\\" name=\\\"$name\\\" time=\\\"$time\\\">\" >> $TMPXML_PREFIX.tc\n\techo -n \"<failure type=\\\"$err_type\\\"><![CDATA[\" >> $TMPXML_PREFIX.tc\n\tcat $TMPERR_FILE\t>> $TMPXML_PREFIX.tc\n\techo \"]]></failure>\" \t>> $TMPXML_PREFIX.tc\n\techo \"</testcase>\" \t>> $TMPXML_PREFIX.tc\n}\n\nfunction junit_write_xml # (time, nb_failure, tests)\n{\n\ttime=$1\n\tfailure=$2\n\ttests=$3\n\n\tcp /dev/null $XML\n#\techo \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\" ?>\" > $XML\n\techo \"<?xml version=\\\"1.0\\\" encoding=\\\"ISO8859-2\\\" ?>\" > $XML\n\techo \"<testsuite name=\\\"robinhood.PosixTests\\\" errors=\\\"0\\\" failures=\\\"$failure\\\" tests=\\\"$tests\\\" time=\\\"$time\\\">\" >> $XML\n\tcat $TMPXML_PREFIX.tc \t\t>> $XML\n\techo -n \"<system-out><![CDATA[\" >> $XML\n\tcat $TMPXML_PREFIX.stdout \t>> $XML\n\techo \"]]></system-out>\"\t\t>> $XML\n\techo -n \"<system-err><![CDATA[\" >> $XML\n\tcat $TMPXML_PREFIX.stderr \t>> $XML\n\techo \"]]></system-err>\" \t>> $XML\n\techo \"</testsuite>\"\t\t>> $XML\n}\n\n\nfunction cleanup\n{\n\techo \"cleanup...\"\n        if (( $quiet == 1 )); then\n                clean_db | tee \"rh_test.log\" | egrep -i -e \"OK|ERR|Fail|skip|pass\"\n        else\n                clean_db\n        fi\n}\n\nfunction run_test\n{\n\tindex=$1\n\tfunc=$2\n\tdesc=$4\n\tshift\n\n\tindex_clean=`echo $index | sed -e 's/[a-z]//'`\n\n\tif [[ -z $only_test || \"$only_test\" = \"$index\" || \"$only_test\" = \"$index_clean\" ]]; then\n\t\tcleanup\n\t\techo\n\t\techo \"==== TEST #$index $func ($desc) ====\"\n\n\t\terror_reset\n\n\t\tt0=`date \"+%s.%N\"`\n\n\t\tif (($junit == 1)); then\n\t\t\t# markup in log\n\t\t\techo \"==== TEST #$index $func ($desc) ====\" >> $TMPXML_PREFIX.stdout\n\t\t\techo \"==== TEST #$index $func ($desc) ====\" >> $TMPXML_PREFIX.stderr\n\t\t\t\"$@\" 2>> $TMPXML_PREFIX.stderr >> $TMPXML_PREFIX.stdout\n\t\telif (( $quiet == 1 )); then\n\t\t\t\"$@\" 2>&1 > rh_test.log\n\t\t\tegrep -i -e \"OK|ERR|Fail|skip|pass\" rh_test.log\n\t\telse\n\t\t\t\"$@\"\n\t\tfi\n\n\t\tt1=`date \"+%s.%N\"`\n\t\tdur=`echo \"($t1-$t0)\" | bc -l`\n\t\techo \"duration: $dur sec\"\n\n\t\tif (( $DO_SKIP )); then\n\t\t\techo \"(TEST #$index : skipped)\" >> $SUMMARY\n\t\t\tSKIP=$(($SKIP+1))\n\t\telif (( $ERROR > 0 )); then\n\t\t\techo \"TEST #$index : *FAILED*\" >> $SUMMARY\n\t\t\tRC=$(($RC+1))\n\t\t\tif (( $junit )); then\n\t\t\t\tjunit_report_failure \"robinhood.$PURPOSE.Posix\" \"Test #$index: $desc\" \"$dur\" \"ERROR\"\n\t\t\tfi\n\t\telse\n\t\t\techo \"TEST #$index : OK\" >> $SUMMARY\n\t\t\tSUCCESS=$(($SUCCESS+1))\n\t\t\tif (( $junit )); then\n\t\t\t\tjunit_report_success \"robinhood.$PURPOSE.Posix\" \"Test #$index: $desc\" \"$dur\"\n\t\t\tfi\n\n\t\tfi\n\tfi\n}\n\n# clear summary\ncp /dev/null $SUMMARY\n\n#init xml report\nif (( $junit )); then\n\tjunit_init\n\ttinit=`date \"+%s.%N\"`\nfi\n\n\n######### TEST LIST ###########\n\n# syntax: run_test function  config\tdescr\targs\n# e.g.\n# run_test 218\ttest_rmdir \trmdir.conf  \"rmdir policies\"\t16 32\n\nrun_test\t1\ttest_scan_report common.conf \"scan and reports on large FS\"\nrun_test\t2\ttest_scan_report innodb.conf \"scan and reports on large FS (innodb)\"\n\n\n\necho\necho \"========== TEST SUMMARY ($PURPOSE) ==========\"\ncat $SUMMARY\necho \"=============================================\"\n\n#init xml report\nif (( $junit )); then\n\ttfinal=`date \"+%s.%N\"`\n\tdur=`echo \"($tfinal-$tinit)\" | bc -l`\n\techo \"total test duration: $dur sec\"\n\tjunit_write_xml \"$dur\" $RC $(( $RC + $SUCCESS ))\n\trm -f $TMPXML_PREFIX.stderr $TMPXML_PREFIX.stdout $TMPXML_PREFIX.tc\nfi\n\nrm -f $SUMMARY\nif (( $RC > 0 )); then\n\techo \"$RC tests FAILED, $SUCCESS successful, $SKIP skipped\"\nelse\n\techo \"All tests passed ($SUCCESS successful, $SKIP skipped)\"\nfi\nrm -f $TMPERR_FILE\nexit $RC\n"
  },
  {
    "path": "tests/huge_posix/cfg/common.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = \"/tmp/mnt.rbh_huge\";\n\tfs_type = ext3;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/dev/null\";\n\n    # stats update interval (in DB)\n    stats_interval = 10s;\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_test\";\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n\n#\taccounting = no;\n}\n\n# for tests with backup purpose\nBackend\n{\n\topt_string = \"root=/tmp/backend\";\n}\n\n# Entry Processor configuration\nEntryProcessor\n{\n    # nbr of worker threads for processing pipeline tasks\n    nb_threads = 9 ;\n\n    # Max number of operations in the Entry Processor pipeline.\n    # If the number of pending operations exceeds this limit,\n    # info collectors are suspended until this count decreases\n#    max_pending_operations = 100000 ;\n    max_pending_operations = 10000 ;\n\n    # Optionnaly specify a maximum thread count for each stage of the pipeline:\n    # <stagename>_threads_max = <n> (0: use default)\n    # STAGE_GET_FID_threads_max\t= 8 ;\n    # STAGE_GET_INFO_DB_threads_max\t= 8 ;\n    # STAGE_GET_INFO_FS_threads_max\t= 8 ;\n    # STAGE_REPORTING_threads_max\t= 8 ;\n    STAGE_DB_APPLY_threads_max\t= 8;\n\n    # if set to FALSE, classes will only be matched\n    # at policy application time (not during a scan or reading changelog)\n    match_classes = TRUE;\n}\n\n"
  },
  {
    "path": "tests/huge_posix/cfg/innodb.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = \"/tmp/mnt.rbh_huge\";\n\tfs_type = ext3;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/dev/null\";\n\n    # stats update interval (in DB)\n    stats_interval = 10s;\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = \"robinhood_test\";\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n\t\tengine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n\n#\taccounting = no;\n}\n\n# for tests with backup purpose\nBackend\n{\n\topt_string = \"root=/tmp/backend\";\n}\n\n# Entry Processor configuration\nEntryProcessor\n{\n    # nbr of worker threads for processing pipeline tasks\n    nb_threads = 9 ;\n\n    # Max number of operations in the Entry Processor pipeline.\n    # If the number of pending operations exceeds this limit,\n    # info collectors are suspended until this count decreases\n#    max_pending_operations = 100000 ;\n    max_pending_operations = 10000 ;\n\n    # Optionnaly specify a maximum thread count for each stage of the pipeline:\n    # <stagename>_threads_max = <n> (0: use default)\n    # STAGE_GET_FID_threads_max\t= 8 ;\n    # STAGE_GET_INFO_DB_threads_max\t= 8 ;\n    # STAGE_GET_INFO_FS_threads_max\t= 8 ;\n    # STAGE_REPORTING_threads_max\t= 8 ;\n    STAGE_DB_APPLY_threads_max\t= 8;\n\n    # if set to FALSE, classes will only be matched\n    # at policy application time (not during a scan or reading changelog)\n    match_classes = TRUE;\n}\n\n"
  },
  {
    "path": "tests/shook_configure.sh",
    "content": "#!/bin/sh\n\nSHOOK_DIR=~/shook.git\nexport CFLAGS=\"-g -I$SHOOK_DIR/src/server -I$SHOOK_DIR/src/common\"\nexport LDFLAGS=\"-L$SHOOK_DIR/src/server/.libs\"\n./configure --with-purpose=HSM_LITE\n"
  },
  {
    "path": "tests/test_compil_switches.sh",
    "content": "#!/bin/sh\n\npurp_list=$*\n\nif [[ -z $purp_list ]]; then\n\techo \"Usage: $0 <purpose list>\"\n\texit 1\nfi\n\n# count NB procs\nNB_PROC=`cat /proc/cpuinfo | grep processor | tail -1 | cut -d ':' -f 2 | tr -d ' '`\n((NB_PROC=$NB_PROC+1))\n\necho \"Compilation using $NB_PROC processors\"\n\nERRORS=\"\"\n\nfor purp in $purp_list; do\nfor lustre in \"--enable-lustre\" \"--disable-lustre\"; do\n\n# default per purpose and DB\nconfig_cmd=\"./configure --with-purpose=$purp $lustre\"\n\nif [[ $purp = \"LUSTRE_HSM\" && $lustre = \"--disable-lustre\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\n\necho \"TEST: $config_cmd\"\n\n(CFLAGS=\"$CFLAGS_OPT\" $config_cmd && make -j $NB_PROC ) 2>&1 | grep -v Werror | grep -v \"unused variable\" | grep -v \"not used\" | egrep -i 'error|warning' \\\n\t\t&& ( echo FAILED; ERRORS=\"$ERRORS Error using compilation switches:$config_cmd\\n\" )\n\nmake clean 2>&1 >/dev/null\n\ndone\ndone\n\nif [[ -n $ERRORS ]]; then\n\techo \"$ERRORS\"\n\texit 1\nfi\n\necho \"Building rpms\"\n\nERRORS=\"\"\n\nfor purp in $purp_list; do\nfor lustre in \"--enable-lustre\" \"--disable-lustre\"; do\n\n# default per purpose and DB\nconfig_cmd=\"./configure --with-purpose=$purp $lustre\"\n\nif [[ $purp = \"LUSTRE_HSM\" && $lustre = \"--disable-lustre\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\n\necho \"TEST: $config_cmd\"\n\n(CFLAGS=\"$CFLAGS_OPT\" $config_cmd && make rpm ) 2>&1 | grep -v Werror | grep -v \"unused variable\" |  grep -v \"not used\" | egrep -i 'error|warning' \\\n\t\t&& ( echo FAILED; ERRORS=\"$ERRORS Error using compilation switches:$config_cmd\\n\" )\n\nmake clean 2>&1 >/dev/null\n\ndone\ndone\n\nif [[ -n $ERRORS ]]; then\n\techo \"$ERRORS\"\n\texit 1\nfi\n\necho \"Now testing advanced compilation switches\"\n\n\n# advanced switches\n\nfor purp in $purp_list; do\nfor lustre in \"--enable-lustre\" \"--disable-lustre\"; do\nfor fid in \"--disable-fid-support\" \"--enable-fid-support\"; do\nfor chglog in \"--disable-changelogs\" \"--enable-changelogs\"; do\nfor db in MYSQL SQLITE; do\nfor mdsstat in \"--disable-mds-stat\" \"--enable-mds-stat\"; do\n\nconfig_cmd=\"./configure --with-db=$db --with-purpose=$purp $lustre $fid $chglog $mdsstat\"\n\n# skip conflicting options\nif [[ $lustre = \"--disable-lustre\" && $fid = \"--enable-fid-support\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $lustre = \"--disable-lustre\" && $chglog = \"--enable-changelogs\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $lustre = \"--disable-lustre\" && $mdsstat = \"--enable-mds-stat\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $fid = \"--disable-fid-support\" && $chglog = \"--enable-changelogs\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $purp = \"LUSTRE_HSM\" && $lustre = \"--disable-lustre\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $purp = \"LUSTRE_HSM\" && $fid = \"--disable-fid-support\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\nif [[ $purp = \"LUSTRE_HSM\" && $chglog = \"--disable-changelogs\" ]]; then echo \"skipping conflicting switches: $config_cmd\"; continue; fi\n\necho \"TEST: $config_cmd\"\n\n(CFLAGS=\"$CFLAGS_OPT\" $config_cmd && make -j $NB_PROC ) 2>&1 | grep -v Werror | grep -v \"unused variable\" | grep -v \"not used\" | egrep -i 'error|warning' \\\n\t\t&& ( echo FAILED; ERRORS=\"$ERRORS Error using compilation switches:$config_cmd\\n\" )\n\nmake clean 2>&1 >/dev/null\n\ndone\ndone\ndone\ndone\ndone\ndone\n\necho -e $ERRORS\n\n"
  },
  {
    "path": "tests/test_rpmbuild.sh",
    "content": "#!/bin/bash\nset -e\nset -x\n\ntopdir=$(readlink -m $(dirname $0)/..)\ncd $topdir\nrpmdir=$topdir/rpms\n\nfunction error\n{\n\techo $* >&2\n\texit 1\n}\n\nfunction clean_rpms\n{\n\tlocal dir=$1\n\t# clean rpms\n\tfind $dir/RPMS -type f -name \"*.rpm\" -delete\n}\n\nfunction check_posix_rpm\n{\n\tlocal dir=$1\n\n\t# check that posix RPM was generated (default RPM build)\n\tlocal rpm=$(ls -t $dir/RPMS/*/robinhood-posix*)\n\t[ -z \"$rpm\" ] && error \"robinhood-posix not found\"\n\n\trpm -qpR $rpm | grep lustre && error \"nothing expected about lustre\" || true\n}\n\nfunction check_lustre_rpm\n{\n\tlocal dir=$1\n\n\t# check that Lustre RPM was generated\n\tlocal rpm=$(ls -t $dir/RPMS/*/robinhood-lustre*)\n\t[ -z \"$rpm\" ] && error \"robinhood-lustre not found\"\n\n\trpm -qpR $rpm | grep \"^lustre\" || error \"expected requirement about lustre\"\n\trpm -qpl $rpm | grep mod_lhsm || error \"No lhsm module?\"\n\trpm -qpl $rpm | grep mod_backup || error \"No backup module?\"\n}\n\t\n\n############# MAIN #############\n\n# clean all RPMS\nfind $rpmdir -type f -name \"*.rpm\" -delete\n\n### check SRPM behaviors ###\n./configure --enable-dist\nmake srpm\n\n# get rpm name\nsrpm=$(ls -t $rpmdir/SRPMS/*.src.rpm | head -n 1)\n[ -z \"$srpm\" ] && error \"src rpm not found\"\n\n# check that default build is POSIX\nrpmbuild --rebuild $srpm --define=\"_topdir $rpmdir\"\n\ncheck_posix_rpm $rpmdir\nclean_rpms $rpmdir\n\n# now tests for Lustre\n\n# is there any package providing lustre-client?\np=$(rpm -q --whatprovides lustre-client | grep -v \"no package\") || true\nif [ -z \"$p\" ]; then\n\tp=$(rpm -q lustre | grep -v \"is not installed\") || true\n\tif [ -z \"$p\" ]; then\n\t\terror \"No lustre package found\"\n\tfi\n\n\t# explicit lustre package name\n\trpmbuild --rebuild $srpm --define=\"_topdir $rpmdir\" --with lustre --define \"lpackage lustre\"\nelse\n\t# default is lustre-client\n\trpmbuild --rebuild $srpm --define=\"_topdir $rpmdir\" --with lustre\nfi\n\ncheck_lustre_rpm $rpmdir\nclean_rpms $rpmdir\n\n### check 'make rpm' behaviors ###\n./configure --disable-lustre \nmake rpm\n\ncheck_posix_rpm $rpmdir\nclean_rpms $rpmdir\n\n./configure --enable-lustre\nmake rpm\n\ncheck_lustre_rpm $rpmdir\nclean_rpms $rpmdir\n\n"
  },
  {
    "path": "tests/test_suite/1-test_setup_lustre.sh",
    "content": "#!/bin/bash\n\n# Test whether the testsuite is running from the source tree or has\n# been installed.\nif [ -d \"../../src/robinhood\" ]; then\n    RBH_TEST_INSTALLED=0\nelse\n    RBH_TEST_INSTALLED=1\nfi\n\nif [ $RBH_TEST_INSTALLED = 0 ]; then\n    CFG_SCRIPT=\"../../scripts/rbh-config\"\nelse\n    CFG_SCRIPT=\"rbh-config\"\nfi\n\nBKROOT=\"/tmp/backend\"\n\nif [ -z \"$LFS\" ]; then\n\tLFS=lfs\n\tLCTL=lctl\n\tCOPYTOOL=lhsmtool_posix\nelse\n\tlutils_dir=$(dirname $LFS)\n\tLCTL=$lutils_dir/lctl\n\tCOPYTOOL=$lutils_dir/lhsmtool_posix\nfi\n\nfunction start_service {\n    if [ -x /usr/bin/systemctl ]; then\n        #RHEL7\n        systemctl start $1\n    else\n        #RHEL6 or less\n        service $1 start\n    fi\n}\n\nif rpm -q mariadb; then\n\tstart_service mariadb\nelse\n\tstart_service mysqld\nfi\n\n$CFG_SCRIPT test_db  robinhood_lustre robinhood || $CFG_SCRIPT create_db robinhood_lustre localhost robinhood\n$CFG_SCRIPT empty_db robinhood_lustre\n$CFG_SCRIPT repair_db robinhood_lustre\n\nif [[ -z \"$NOLOG\" || $NOLOG = \"0\" ]]; then\n\t$CFG_SCRIPT enable_chglogs lustre\nfi\n\nif [[ $PURPOSE = \"LUSTRE_HSM\" ]]; then\n\n\tfor mdt in $(lctl list_param mdt.*) ; do\n\t\techo -n \"checking coordinator status on $(basename $mdt): \"\n\t\tstatus=`lctl get_param -n $mdt.hsm_control`\n\t\techo $status\n\n\t\tif [[ $status != \"enabled\" ]]; then\n\t\t\t$LCTL set_param $mdt.hsm_control=enabled\n\t\t\tsleep 2\n\t\tfi\n\n\t\t$LCTL set_param $mdt.hsm.grace_delay=10\n\t\t$LCTL set_param $mdt.hsm.loop_period=1\n\t\t$LCTL set_param $mdt.hsm.max_requests=8\n\tdone\n\n    # start copytool on a distinct mount point\n    [ -d \"/mnt/lustre2\" ] || mkdir \"/mnt/lustre2\"\n    # test if lustre2 is mounted\n    mount | grep \"/mnt/lustre2 \"\n    if (( $? != 0 )); then\n        mnt_str=$(mount | grep \"/mnt/lustre \" | awk '{print $1}' | head -n 1)\n        if [[ -z \"$mnt_str\" ]]; then\n            echo \"/mnt/lustre is not mounted\"\n            exit 1\n        fi\n        mnt_opt=$(mount | grep \"/mnt/lustre \" | sed -e 's/.*(\\([^)]*\\))/\\1/' | head -n 1 | sed -e \"s/rw,//\" | sed -e \"s/seclabel,//\")\n        mount -t lustre -o $mnt_opt $mnt_str /mnt/lustre2 || exit 1\n    fi\n\n\techo \"Checking if copytool is already running...\"\n\tif (( `pgrep lhsmtool_posix | wc -l` > 0 )); then\n\t\techo \"Already running\"\n\telse\n\t\tmkdir -p $BKROOT\n\t\t$COPYTOOL --hsm_root=$BKROOT --no-shadow --daemon /mnt/lustre2 2>/dev/null &\n\tfi\nfi\n\n# workaround for statahead issues\nlctl set_param llite.lustre-*.statahead_max=0\n\n# lazy statfs make 'df' tests fail\nlctl set_param llite.lustre-*.lazystatfs=0\nlctl set_param llite.lustre-*.statfs_max_age=0\n\n# create testuser\ngetent passwd testuser || useradd testuser || exit 1\n\n# create testgroup\ngetent group testgroup || groupadd testgroup || exit 1\n"
  },
  {
    "path": "tests/test_suite/1-test_setup_posix.sh",
    "content": "#!/bin/bash\n\n# Test whether the testsuite is running from the source tree or has\n# been installed.\nif [ -d \"../../src/robinhood\" ]; then\n    RBH_TEST_INSTALLED=0\nelse\n    RBH_TEST_INSTALLED=1\nfi\n\nif [ $RBH_TEST_INSTALLED = 0 ]; then\n    CFG_SCRIPT=\"../../scripts/rbh-config\"\nelse\n    CFG_SCRIPT=\"rbh-config\"\nfi\n\n\nfunction start_service {\nif [ -x /usr/bin/systemctl ]; then\n\t#RHEL7\n\tsystemctl start $1\nelse\n\t#RHEL6 or less\n\tservice $1 start\nfi\n}\n\nif rpm -q mariadb; then\n\tstart_service mariadb\nelse\n\tstart_service mysqld\nfi\n\n$CFG_SCRIPT test_db  robinhood_test robinhood || $CFG_SCRIPT create_db robinhood_test localhost robinhood\n$CFG_SCRIPT empty_db robinhood_test\n$CFG_SCRIPT repair_db robinhood_test\n\n\nLOOP_FILE=/tmp/rbh.loop.cont\nMNT_PT=/tmp/mnt.rbh\n\necho \"Checking test filesystem...\"\n\nif [[ ! -d $MNT_PT ]]; then\n    echo \"creating $MNT_PT\"\n    mkdir $MNT_PT || exit 1\nfi\n\nmnted=`mount | grep $MNT_PT | grep loop | wc -l`\n\nif (( $mnted == 0 )); then\n    if [[ ! -s $LOOP_FILE ]]; then\n        echo \"creating file container $LOOP_FILE...\"\n        dd if=/dev/zero of=$LOOP_FILE bs=1M count=400 || exit 1\n        echo \"formatting as ext4...\"\n        mkfs.ext4 -F $LOOP_FILE || exit 1\n    else\n        # make sure it is consistent\n        mkfs.ext4 -F $LOOP_FILE || exit 1\n    fi\n\n    mount -o loop,user_xattr -t ext4 $LOOP_FILE $MNT_PT || exit 1\nfi\n\ndf $MNT_PT\n\n# create testuser\ngetent passwd testuser || useradd testuser || exit 1\n\n# create testgroup\ngetent group testgroup || groupadd testgroup || exit 1\n"
  },
  {
    "path": "tests/test_suite/2-run-tests.sh",
    "content": "#!/bin/bash\n# -*- mode: shell; sh-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n[ -z \"$LFS\" ] && LFS=lfs\n[ -z \"$TESTOPEN\" ] && TESTOPEN=/usr/lib64/lustre/tests/openfile\n\nif [ -z \"$POSIX_MODE\" ]; then\n    export RH_ROOT=\"/mnt/lustre\"\n    export FS_TYPE=lustre\n    export RH_DB=robinhood_lustre\n    echo \"Lustre test mode\"\nelse\n    export RH_ROOT=\"/tmp/mnt.rbh\"\n    export FS_TYPE=ext4\n    export RH_DB=robinhood_test\n    echo \"POSIX test mode\"\n    # force no log\n    NOLOG=1\nfi\n\nBKROOT=\"/tmp/backend\"\nRBH_OPT=\"\"\n\n# Test whether the testsuite is running from the source tree or has\n# been installed.\nif [ -d \"../../src/robinhood\" ]; then\n    CFG_SCRIPT=\"../../scripts/rbh-config\"\n    RBH_BINDIR=\"../../src/robinhood\"\n    RBH_SBINDIR=\"../../src/robinhood\"\n    RBH_MODDIR=$(readlink -m \"../../src/modules/.libs\")\n    export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RBH_MODDIR\n    RBH_TEMPLATE_DIR=\"../../doc/templates\"\n    RBH_TESTS_DIR=\"..\"\n    RBH_CFG_DIR=\"./cfg\"\nelse\n    CFG_SCRIPT=\"rbh-config\"\n    RBH_BINDIR=${RBH_BINDIR:-\"/usr/bin\"}\n    RBH_SBINDIR=${RBH_SBINDIR:-\"/usr/sbin\"}\n    RBH_INSTALL_DIR=${RBH_INSTALL_DIR:-\"/usr/share/robinhood\"}\n    RBH_TESTS_DIR=\"${RBH_INSTALL_DIR}/tests\"\n    RBH_CFG_DIR=\"$RBH_TESTS_DIR/test_suite/cfg\"\n    RBH_TEMPLATE_DIR=\"$RBH_INSTALL_DIR/doc/templates\"\nfi\n\nexport RBH_TEST_POLICIES=\"$(pwd)/test_policies.inc\"\nexport RBH_TEST_LAST_ACCESS_ONLY_ATIME=${RH_TEST_LAST_ACCESS_ONLY_ATIME:-no}\nexport RBH_NUM_UIDGID=${RBH_NUM_UIDGID:-no}\n\n# Retrieve the testuser UID and testgroup GID, as we may need them\n# later\nif [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n    testuser_str=$(getent passwd testuser | cut -d: -f3)\n    testgroup_str=$(getent group testgroup | cut -d: -f3)\n    root_str=0\nelse\n    testuser_str=testuser\n    testgroup_str=testgroup\n    root_str=root\nfi\n\nXML=\"test_report.xml\"\nTMPXML_PREFIX=\"/tmp/report.xml.$$\"\nTMPERR_FILE=\"/tmp/err_str.$$\"\n\nif [[ ! -d $RH_ROOT ]]; then\n\techo \"Creating directory $RH_ROOT\"\n\tmkdir -p \"$RH_ROOT\"\nelse\n\techo \"Creating directory $RH_ROOT\"\nfi\n\nSYNC_OPT=\"--run=migration(all) --force-all\"\nPURGE_OPT=\"--run=purge(all,target-usage=0)\"\n\nif [ -z ${WITH_VALGRIND+x} ]; then\n    VALGRIND=\nelse\n    # Run all executables under valgrind. Each instance will create a\n    # valgrind log file, vg-test_<test number>-<pid>.log\n    rm -f vg-test_*.log\n    export G_SLICE=always-malloc,debug-blocks\n    export G_DEBUG=fatal-warnings,fatal-criticals,gc-friendly\n    VALGRIND=\"valgrind --gen-suppressions=all --suppressions=valgrind.supp --leak-check=full --log-file=vg-test_%q{index}-%p.log\"\nfi\n\nRH=\"$VALGRIND $RBH_SBINDIR/robinhood $RBH_OPT\"\nREPORT=\"$VALGRIND $RBH_SBINDIR/rbh-report $RBH_OPT\"\nFIND=\"$VALGRIND $RBH_BINDIR/rbh-find\"\nDU=\"$VALGRIND $RBH_BINDIR/rbh-du\"\nDIFF=\"$VALGRIND $RBH_SBINDIR/rbh-diff\"\nUNDELETE=\"$VALGRIND $RBH_SBINDIR/rbh-undelete\"\n#IMPORT=\"$VALGRIND $RBH_SBINDIR/rbh-import $RBH_OPT\"\nCMD=robinhood\nARCH_STR=\"migration success for\"\nREL_STR=\"purge success for\"\nHSMRM_STR=\"hsm_remove success for\"\n\n#default: TMP_FS_MGR\nif [[ -z \"$PURPOSE\" || $PURPOSE = \"TMP\"* ]]; then\n    is_lhsm=0\n    is_hsmlite=0\n    shook=0\n    extra_dir=0\n    PURPOSE=\"TMP_FS_MGR\"\n    STATUS_MGR=none\n    # get include for this flavor\n    cp -f ${RBH_TEMPLATE_DIR}/includes/tmpfs.inc $RBH_TEST_POLICIES || exit 1\n    # change policy names to the test framework names\n    sed -e \"s/cleanup/purge/\" -i $RBH_TEST_POLICIES\nelif [[ $PURPOSE = \"LUSTRE_HSM\" ]]; then\n    is_lhsm=1\n    is_hsmlite=0\n    shook=0\n    extra_dir=0\n    STATUS_MGR=lhsm\n    # get include for this flavor\n    cp -f ${RBH_TEMPLATE_DIR}/includes/lhsm.inc $RBH_TEST_POLICIES || exit 1\n    # change policy names to the test framework names\n    sed -e \"s/lhsm_archive/migration/\" -i $RBH_TEST_POLICIES\n    sed -e \"s/lhsm_release/purge/\" -i $RBH_TEST_POLICIES\n    sed -e \"s/lhsm_remove/hsm_remove/\" -i $RBH_TEST_POLICIES\n\nelif [[ $PURPOSE = \"BACKUP\" ]]; then\n    is_lhsm=0\n    shook=0\n    is_hsmlite=1\n    extra_dir=0\n    STATUS_MGR=backup\n    # get include for this flavor\n    cp -f ${RBH_TEMPLATE_DIR}/includes/backup.inc $RBH_TEST_POLICIES || exit 1\n    # change policy names to the test framework names\n    sed -e \"s/backup_archive/migration/\" -i $RBH_TEST_POLICIES\n    sed -e \"s/backup_remove/hsm_remove/\" -i $RBH_TEST_POLICIES\n    # append a basic purge policy to run some purge tests\n    cat >> $RBH_TEST_POLICIES << EOF\ndefine_policy purge {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = none;\n}\nEOF\n    mkdir -p $BKROOT\nelif [[ $PURPOSE = \"SHOOK\" ]]; then\n    is_lhsm=0\n    is_hsmlite=1\n    shook=1\n    extra_dir=3\n    STATUS_MGR=shook\n    # get include for this flavor\n    cp -f ${RBH_TEMPLATE_DIR}/includes/shook.inc $RBH_TEST_POLICIES || exit 1\n    # change policy names to the test framework names\n    sed -e \"s/shook_archive/migration/\" -i $RBH_TEST_POLICIES\n    sed -e \"s/shook_release/purge/\" -i $RBH_TEST_POLICIES\n    sed -e \"s/shook_remove/hsm_remove/\" -i $RBH_TEST_POLICIES\n    mkdir -p $BKROOT\nelse\n    echo \"Invalid PURPOSE '$PURPOSE'\" >&2\n    exit 1\nfi\n\n# Compatibility macros. Some lfs setstripe options changed in Lustre\n# 2.3 (7a454853).\n#\n# --size|-s became --stripe-size|-S                (use $LFS_SS_SZ_OPT)\n# --index|-i|--offset|-o became --stripe-index|-i  (no macro, use -i)\n# --count|-c became --stripe-count|-c              (no macro, use -c)\n$LFS setstripe 2>&1 | grep stripe-index > /dev/null\nif [ $? -eq 0 ]; then\n    LFS_SS_SZ_OPT=\"--stripe-size\"\nelse\n    LFS_SS_SZ_OPT=\"--size\"\nfi\n\nfunction flush_data\n{\n    if [[ -n \"$SYNC\" ]]; then\n      # if the agent is on the same node as the writer, we are not sure\n      # data has been flushed to OSTs\n      echo \"Flushing data to OSTs\"\n      sync\n    fi\n}\n\nfunction clean_caches\n{\n    echo 3 > /proc/sys/vm/drop_caches\n    lctl set_param ldlm.namespaces.lustre-*.lru_size=clear > /dev/null\n}\n\nfunction wait_stable_df\n{\n       sync\n       clean_caches\n\n       $LFS df $RH_ROOT > /tmp/lfsdf.1\n       df $RH_ROOT > /tmp/df.1\n       while (( 1 )); do\n               # check df & lfs df evolution after some time\n               sleep 5\n               $LFS df $RH_ROOT > /tmp/lfsdf.2\n               df $RH_ROOT > /tmp/df.2\n               diff /tmp/lfsdf.1 /tmp/lfsdf.2 > /dev/null && \\\n                    diff /tmp/df.1 /tmp/df.2 > /dev/null && break\n               echo \"waiting for df update...\"\n               mv -f /tmp/lfsdf.2 /tmp/lfsdf.1\n               mv -f /tmp/df.2 /tmp/df.1\n       done\n}\n\n# Prints all, or part of Lustre's version\n#\n# lustre_version {all,major}\nfunction lustre_version\n{\n    local version_file=\"/sys/fs/lustre/version\"\n    # Support for older versions of Lustre\n    [ -f \"$version_file\" ] || version_file=\"${version_file/\\/sys//proc}\"\n\n    local version=\"$(grep -o \"[1-9].*\" \"$version_file\" 2>/dev/null)\"\n    if [ -z \"$version\" ]; then\n        printf \"Unable to determine Lustre's version\\n\" >&2\n        return 1\n    fi\n\n    case \"${1:-all}\" in\n    all)\n        printf \"$version\"\n        ;;\n    major)\n        printf \"${version%%.*}\"\n        ;;\n    *)\n        printf \"Invalid argument: '$1', {all,major} expected\\n\" >&2\n        return 64\n        ;;\n    esac\n}\n\nLVERSION=\"$(lustre_version)\"\nif [ -z \"$POSIX_MODE\" ]; then\n    lustre_major=$(lustre_version major) || exit 1\nelse\n    # avoid failing comparisons for POSIX mode\n    lustre_major=0\nfi\n\nif [[ -z \"$NOLOG\" || $NOLOG = \"0\" ]]; then\n\tno_log=0\nelse\n\tno_log=1\nfi\n\nPROC=$CMD\n\nLOGS=(rh_chglogs.log rh_migr.log rh_rm.log rh.pid rh_purge.log rh_report.log\n      rh_syntax.log recov.log rh_scan.log /tmp/rh_alert.log rh_rmdir.log\n      rh.log)\n\nSUMMARY=\"/tmp/test_${PROC}_summary.$$\"\n\nNB_ERROR=0\nRC=0\nSKIP=0\nSUCCESS=0\nDO_SKIP=0\n\nfunction error_reset\n{\n\tNB_ERROR=0\n\tDO_SKIP=0\n\tcp /dev/null $TMPERR_FILE\n}\n\nfunction error\n{\n    grep_ctx_opt=\"-B 5 -A 1\"\n\n\techo \"ERROR $@\"\n    # prefilter false errors\n    grep -v \"[ (]0 errors\" *.log | grep -v \"LastScanErrors\" | grep -i $grep_ctx_opt error\n\tNB_ERROR=$(($NB_ERROR+1))\n\n\tif (($junit)); then\n        # prefilter false errors\n        grep -v \"[ (]0 errors\" *.log | grep -v \"LastScanErrors\" | grep -i $grep_ctx_opt error  >> $TMPERR_FILE\n\t\techo \"ERROR $@\" >> $TMPERR_FILE\n\tfi\n\n    # exit on error\n    [ \"$EXIT_ON_ERROR\" = \"1\" ] && exit 1\n\n    # avoid displaying the same log many times\n    [ \"$DEBUG\" = \"1\" ] || clean_logs\n}\n\nfunction set_skipped\n{\n\tDO_SKIP=1\n}\n\nfunction clean_logs\n{\n    local f\n\tfor f in \"${LOGS[@]}\"; do\n\t\tif [ -s \"$f\" ]; then\n\t\t\tcp /dev/null \"$f\"\n\t\tfi\n\tdone\n}\n\nfunction list_actions\n{\n    if [[ -n \"$MDS\" ]]; then\n        ssh $MDS lctl get_param -n 'mdt.lustre-*.hsm.actions' | \\\n            egrep -v \"SUCCEED|CANCELED\"\n    else\n        lctl get_param -n 'mdt.lustre-*.hsm.actions' | \\\n            egrep -v \"SUCCEED|CANCELED\"\n    fi\n}\n\nfunction count_actions\n{\n    list_actions | wc -l\n}\n\nfunction wait_done\n{\n\tmax_sec=$1\n\tsec=0\n\n\taction_count=$(count_actions)\n\n\tif (( $action_count > 0 )); then\n\t\techo \"Current actions:\"\n\t\tlist_actions\n\n\t\techo -n \"Waiting for copy requests to end.\"\n\t\twhile (( $action_count > 0 )) ; do\n\t\t\techo -n \".\"\n\t\t\tsleep 1;\n\t\t\t((sec=$sec+1))\n\t\t\t(( $sec > $max_sec )) && return 1\n\t\t\taction_count=$(count_actions)\n\t\tdone\n\t\tlist_actions\n\t\techo \" Done ($sec sec)\"\n\tfi\n\n\treturn 0\n}\n\n# Wait for a file to reach a given HSM state\n# arg 1 = full file name\n# arg 2 = HSM state (such as 0x00000001)\nfunction wait_hsm_state {\n    # Poll state for 10 seconds\n    LIM=$((`date +%s`+10))\n    while :\n    do\n        if [ `date +%s` -ge $LIM ]; then\n            error \"HSM state for file \\\"$1\\\" isn't $2\"\n            break # if EXIT_ON_ERROR is not set, error won't exit and this\n                  # will loop forever. So we break here.\n        fi\n\n        $LFS hsm_state $1 | grep --quiet $2 && break\n        sleep .5\n    done\n}\n\n# wait filesystem usage percentage returned by 'df' to be under a given value\nfunction wait_low_usage\n{\n    local target=$1\n    local timeout=30\n    local i=0\n\n    while (( $i < $timeout )); do\n        clean_caches\n        u=$(fs_usage)\n\n        if (( $u > $target )); then\n            echo \"filesystem is still ${u}% full. waiting for df update...\"\n            sleep 1\n        else\n            return 0\n        fi\n        ((i++))\n    done\n    echo \"Timeout $timeout reached without reaching usage target $target\"\n    return 1\n}\n\n# wait a specific OST to be under a given usage percentage\nfunction wait_low_OST_usage\n{\n    local ost=$1\n    local target=$2\n    local timeout=30\n    local i=0\n\n    while (( $i < $timeout )); do\n        clean_caches\n\n\t    u=$($LFS df $RH_ROOT | grep ${ost}_ | awk '{print $5}' | tr -d '%')\n        if (( $u > $target )); then\n            echo \"OST ${ost} is still ${u}% full. waiting for lfs df update...\"\n            sleep 1\n        else\n            return 0\n        fi\n        ((i++))\n    done\n    echo \"Timeout $timeout reached without reaching usage target $target for\n$ost\"\n    return 1\n}\n\n\n# wait filsystem usage percentage returned by 'df' to be above a given value\nfunction wait_high_usage\n{\n    local target=$1\n    local timeout=30\n    local i=0\n\n    while (( $i < $timeout )); do\n        u=$(fs_usage)\n\n        if (( $u < $target )); then\n            echo \"filesystem is ${u}% full, still under ${target}%. waiting for df update...\"\n            clean_caches\n            sleep 1\n        else\n            return 0\n        fi\n        ((i++))\n    done\n    echo \"Timeout $timeout reached without reaching usage target $target\"\n    return 1\n}\n\n# Wait filesystem inode usage returned by 'df' and 'lfs df' to be above a given\n# value.\n# The function checks both df and lfs df in the case of lustre.\nfunction wait_high_inodes\n{\n    local target=$1\n    local timeout=30\n    local i=0\n\n    while (( $i < $timeout )); do\n        u=$(inode_usage)\n\n        if (( $u < $target )); then\n            echo \"inode usage is ${u}, still under ${target}. waiting for df update...\"\n            clean_caches\n            sleep 1\n        elif [ -z \"$POSIX_MODE\" ]; then\n            # for lustre, also check lfs df -i\n            lu=$(lfs df -i $RH_ROOT | grep \"$RH_ROOT\" | tail -n 1 | awk '{print $3}')\n            if (( $lu < $target )); then\n                echo \"Lustre inode usage $lu still under ${target}. Waiting for lfs df update...\"\n                clean_caches\n                sleep 1\n            else\n                return 0\n            fi\n        else\n            return 0\n        fi\n        ((i++))\n    done\n    echo \"Timeout $timeout reached without reaching usage target $target\"\n    return 1\n}\n\n\nfunction clean_fs\n{\n\tif (( $is_lhsm != 0 )); then\n\t\techo \"Cancelling agent actions...\"\n\t\tif [[ -n \"$MDS\" ]]; then\n\t\t\tssh $MDS \"lctl set_param mdt.lustre-*.hsm_control=purge\"\n\t\telse\n\t\t\tlctl set_param \"mdt.lustre-*.hsm_control=purge\"\n\t\tfi\n\n\t\techo \"Waiting for end of data migration...\"\n\t\twait_done 60\n\tfi\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"Cleaning filesystem...\"\n\tif [[ -n \"$RH_ROOT\" ]]; then\n\t\t find \"$RH_ROOT\" -mindepth 1 -delete 2>/dev/null\n\tfi\n\n\tif (( $is_hsmlite + $is_lhsm != 0 )); then\n\t\tif [[ -n \"$BKROOT\" ]]; then\n\t\t\t[ \"$DEBUG\" = \"1\" ] && echo \"Cleaning backend content...\"\n\t\t\tfind \"$BKROOT\" -mindepth 1 -delete 2>/dev/null\n\t\tfi\n\tfi\n\n\t[ \"$DEBUG\" = \"1\" ] && echo \"Destroying any running instance of robinhood...\"\n\tpkill robinhood\n\n\tif [ -f rh.pid ]; then\n\t\techo \"killing remaining robinhood process...\"\n\t\tkill `cat rh.pid`\n\t\trm -f rh.pid\n\tfi\n\n\tpgrep robinhood && sleep 1 && pkill -9 robinhood\n\t[ \"$DEBUG\" = \"1\" ] && echo \"Cleaning robinhood's DB...\"\n\t$CFG_SCRIPT empty_db $RH_DB > /dev/null\n\n\t[ \"$DEBUG\" = \"1\" ] && echo \"Cleaning changelogs...\"\n\tif (( $no_log==0 )); then\n\t   $LFS changelog_clear lustre-MDT0000 cl1 0\n\tfi\n}\n\nfunction ensure_init_backend()\n{\n\tmnted=`mount | grep $BKROOT | grep loop | wc -l`\n    if (( $mnted == 0 )); then\n        LOOP_FILE=/tmp/rbh_backend.loop.cont\n        if [[ ! -s $LOOP_FILE ]]; then\n            echo \"creating file container $LOOP_FILE...\"\n            dd if=/dev/zero of=$LOOP_FILE bs=1M count=400 || return 1\n            echo \"formatting as ext4...\"\n            mkfs.ext4 -q -F $LOOP_FILE || return 1\n        fi\n\n        echo \"Mounting $LOOP_FILE as $BKROOT\"\n        mount -o loop -t ext4 $LOOP_FILE $BKROOT || return 1\n    \techo \"Cleaning backend content...\"\n\t\tfind \"$BKROOT\" -mindepth 1 -delete 2>/dev/null\n    fi\n    return 0\n}\n\n\nfunction kill_from_pidfile\n{\n    if [ -f rh.pid ]; then\n        kill $(cat rh.pid)\n        sleep 1\n        # wait a second until it stops\n        if [ -f rh.pid ]; then\n            kill -9 $(cat rh.pid)\n            rm -f rh.pid\n        fi\n    fi\n    # security: drop old process\n\tpkill -9 $PROC\n}\n\nPOOL1=ost0\nPOOL2=ost1\nPOOL_CREATED=0\n\nfunction create_pools\n{\n  if [[ -n \"$MDS\" ]]; then\n\tdo_mds=\"ssh $MDS\"\n  else\n\tdo_mds=\"\"\n  fi\n\n  (($POOL_CREATED != 0 )) && return\n  $do_mds $LFS pool_list lustre | grep lustre.$POOL1 && POOL_CREATED=1\n  $do_mds $LFS pool_list lustre | grep lustre.$POOL2 && ((POOL_CREATED=$POOL_CREATED+1))\n  (($POOL_CREATED == 2 )) && return\n\n  $do_mds lctl pool_new lustre.$POOL1 || error \"creating pool $POOL1\"\n  $do_mds lctl pool_add lustre.$POOL1 lustre-OST0000 || error \"adding OST0000 to pool $POOL1\"\n  $do_mds lctl pool_new lustre.$POOL2 || error \"creating pool $POOL2\"\n  $do_mds lctl pool_add lustre.$POOL2 lustre-OST0001 || error \"adding OST0001 to pool $POOL2\"\n\n  $do_mds $LFS pool_list lustre.$POOL1\n  $do_mds $LFS pool_list lustre.$POOL2\n\n  POOL_CREATED=1\n}\n\nfunction check_db_error\n{\n        grep DB_REQUEST_FAILED $1 && error \"DB request error\"\n}\n\nfunction get_id\n{\n    local p=\"$1\"\n\n    if (( $lustre_major >= 2 )); then\n         $LFS path2fid \"$p\" | tr -d '[]'\n    else\n         stat -c \"/%i\" \"$p\"\n    fi\n}\n\nfunction create_nostripe\n{\n    local f=$1\n    $TESTOPEN -f O_RDWR:O_CREAT:O_LOV_DELAY_CREATE -m 0644 \"$f\" || return 1\n    $LFS getstripe \"$f\" | grep \"no stripe info\" || error \"$f should not have stripe info\"\n}\n\nfunction migration_test\n{\n\tconfig_file=$1\n\texpected_migr=$2\n\tsleep_time=$3\n\tpolicy_str=\"$4\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\tif (( $no_log == 0 )); then\n\t\techo \"Initial scan of empty filesystem\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\n\t# create and fill 10 files\n\n\techo \"1-Modifing files...\"\n\tfor i in a `seq 1 10`; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n\n\techo \"2-Reading changelogs...\"\n\t# read changelogs\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# start a migration files should not be migrated this time\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log || error \"\"\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != 0)); then\n\t\terror \"********** TEST FAILED: No migration expected, $nb_migr started\"\n\telse\n\t\techo \"OK: no files migrated\"\n\tfi\n\n\tif (( $is_lhsm != 0 )); then\n\t    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR --csv -q --count-min=1 > report.out\n        [ \"$DEBUG\" = \"1\" ] && cat report.out\n        check_status_count report.out file archiving 0\n        check_status_count report.out file archived 0\n    fi\n\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\techo \"3-Applying migration policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != $expected_migr)); then\n\t\terror \"********** TEST FAILED: $expected_migr migrations expected, $nb_migr started\"\n\telse\n\t\techo \"OK: $nb_migr files migrated\"\n\tfi\n\n\tif (( $is_lhsm != 0 )); then\n\t    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR --csv -q --count-min=1 > report.out\n        [ \"$DEBUG\" = \"1\" ] && cat report.out\n        check_status_count report.out file archiving $expected_migr\n\n\t\twait_done 60 || error \"Migration timeout\"\n        # get completion log\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"readlog\"\n\n        # should be archived now\n\t    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR --csv -q --count-min=1 > report.out\n        [ \"$DEBUG\" = \"1\" ] && cat report.out\n        check_status_count report.out file synchro $expected_migr\n    fi\n\n    rm -f report.out\n}\n\n# Create a file with a UUID, archive it and delete. Make sure that its\n# UUID makes it to the ENTRIES table. Do the same test with a file\n# without UUID to test some bad paths.\nfunction archive_uuid1\n{\n    config_file=$1\n\n    if (( $is_lhsm == 0 )); then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    echo \"Initial scan of empty filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    # create 2 files\n    echo \"1-Creating files...\"\n    for i in a `seq 1 3`; do\n        rm -f $RH_ROOT/file.$i\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1k count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    # Set a fake UUID only on the first file\n    local fake_uuid=\"8bc54fd0-5a7e-49f2-ad32-adc4147d31a2\"\n    setfattr -n trusted.lhsm.uuid -v \"$fake_uuid\" $RH_ROOT/file.1\n    getfattr -n trusted.lhsm.uuid $RH_ROOT/file.1 || error \"UUID wasn't set\"\n\n    # Set bad (too small) UUID on 2nd file\n    local bad_uuid=\"arbitrary_id_$$\"\n    setfattr -n trusted.lhsm.uuid -v \"$bad_uuid\" $RH_ROOT/file.2\n    getfattr -n trusted.lhsm.uuid $RH_ROOT/file.2 || error \"UUID wasn't set\"\n\n    local fid1=$(get_id \"$RH_ROOT/file.1\")\n    local fid2=$(get_id \"$RH_ROOT/file.2\")\n    local fid3=$(get_id \"$RH_ROOT/file.3\")\n\n    echo \"2- scan filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n    egrep -e \"UUID size is too small \\(([0-9]*)\\) for fid $fid2\" rh_chglogs.log ||\n\terror \"UUID of file.2 should had been considered as bad\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid1 | egrep \"lhsm\\.uuid\\s+:\\s+$fake_uuid\" || error \"UUID not found in ENTRIES for file1\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid2 | grep \"lhsm\\.uuid\" && error \"UUID found in ENTRIES for file2\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid3 | grep \"lhsm\\.uuid\" && error \"UUID found in ENTRIES for file3\"\n\n    echo \"3-Test rbh-find with UUID\"\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%p %Rm{lhsm.uuid}\\\\n\" | grep \"$fake_uuid\" || error \"UUID not found by rbh-find for file1\"\n\n    rm -f report.out\n}\n\n# Create a file with a UUID, archive it and delete. Make sure that its\n# UUID makes it to the ENTRIES and SOFT_RM tables. Do the same test\n# for a file without UUID to test some bad paths.\nfunction archive_uuid2\n{\n    config_file=$1\n\n    if (( $is_lhsm == 0 )); then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    echo \"Initial scan of empty filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    # create 2 files\n    echo \"1-Creating files...\"\n    for i in a `seq 1 2`; do\n        rm -f $RH_ROOT/file.$i\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1k count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    # Set a fake UUID only on the first file\n    local fake_uuid=\"2363c3ed-5a7e-49f2-ad32-adc4147d31a2\"\n    setfattr -n trusted.lhsm.uuid -v \"$fake_uuid\" $RH_ROOT/file.1\n    getfattr -n trusted.lhsm.uuid $RH_ROOT/file.1 || error \"UUID wasn't set\"\n\n    local fid1=$(get_id \"$RH_ROOT/file.1\")\n    local fid2=$(get_id \"$RH_ROOT/file.2\")\n\n    echo \"2-Reading changelogs...\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n    check_db_error rh_chglogs.log\n\n    echo \"3-Archiving the files\"\n    $LFS hsm_archive $RH_ROOT/file.1 || error \"executing lfs hsm_archive\"\n    $LFS hsm_archive $RH_ROOT/file.2 || error \"executing lfs hsm_archive\"\n\n    wait_hsm_state $RH_ROOT/file.1 0x00000009\n    wait_hsm_state $RH_ROOT/file.2 0x00000009\n\n    echo \"4-Reading changelogs...\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid1 | egrep \"lhsm\\.uuid\\s+:\\s+$fake_uuid\" || error \"UUID not found in ENTRIES for file1\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid2 | grep \"lhsm\\.uuid\" && error \"UUID found in ENTRIES for file2\"\n\n    echo \"5-Test soft rm\"\n    rm -f $RH_ROOT/file.1\n    rm -f $RH_ROOT/file.2\n\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    mysql $RH_DB -e \"SELECT lhsm_uuid FROM SOFT_RM WHERE id='$fid1'\" | grep \"$fake_uuid\" || error \"UUID not found in SOFT_RM for file1\"\n    mysql $RH_DB -e \"SELECT lhsm_uuid FROM SOFT_RM WHERE id='$fid2'\" | grep \"NULL\" || error \"UUID found in SOFT_RM for file2\"\n\n    echo \"6-Test undelete\"\n    $UNDELETE -f $RBH_CFG_DIR/$config_file -L | grep 'file'\n    $UNDELETE -f $RBH_CFG_DIR/$config_file -R $RH_ROOT/file.1\n\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    getfattr -n trusted.lhsm.uuid $RH_ROOT/file.1 || error \"UUID wasn't set\"\n    getfattr -n trusted.lhsm.uuid $RH_ROOT/file.1 | grep \"$fake_uuid\" || error \"Bad UUID undeleted\"\n\n    local fid1=$(get_id \"$RH_ROOT/file.1\")\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $fid1 | egrep \"lhsm\\.uuid\\s+:\\s+$fake_uuid\" || error \"UUID not found in ENTRIES for file1\"\n    mysql $RH_DB -e \"SELECT lhsm_uuid FROM SOFT_RM WHERE id='$fid2'\" | grep \"NULL\" || error \"UUID found in SOFT_RM for file2\"\n\n    echo \"7-Test rbh-find with UUID\"\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%p %Rm{lhsm.uuid}\\\\n\" | grep \"$fake_uuid\" || error \"UUID not found by rbh-find for file1\"\n\n    rm -f report.out\n}\n\n# migrate a single file\nfunction migration_test_single\n{\n\tconfig_file=$1\n\texpected_migr=$2\n\tsleep_time=$3\n\tpolicy_str=\"$4\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\t# create and fill 10 files\n\n\techo \"1-Modifing files...\"\n\tfor i in a `seq 1 10`; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n\n\tcount=0\n\techo \"2-Trying to migrate files before we know them...\"\n\tfor i in a `seq 1 10`; do\n\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/file.$i -L rh_migr.log -l DEBUG 2>/dev/null\n\t\tgrep \"$RH_ROOT/file.$i\" rh_migr.log | grep \"not known in database\" && count=$(($count+1))\n\tdone\n\n\tif (( $count == $expected_migr )); then\n\t\techo \"OK: all $expected_migr files are not known in database\"\n\telse\n\t\terror \"$count files are not known in database, $expected_migr expected\"\n\tfi\n\n\tcp /dev/null rh_migr.log\n\tsleep 1\n\n\techo \"3-Reading changelogs...\"\n\t# read changelogs\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\tcount=0\n\tcp /dev/null rh_migr.log\n\techo \"4-Applying migration policy ($policy_str)...\"\n\t# files should not be migrated this time: do not match policy\n\tfor i in a `seq 1 10`; do\n\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/file.$i -l DEBUG -L rh_migr.log 2>/dev/null\n\t\tgrep \"$RH_ROOT/file.$i\" rh_migr.log | grep \"doesn't match condition for policy rule\" && count=$(($count+1))\n\tdone\n\n\tif (( $count == $expected_migr )); then\n\t\techo \"OK: all $expected_migr files are not eligible for migration\"\n\telse\n\t\terror \"$count files are not eligible, $expected_migr expected\"\n\tfi\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != 0)); then\n\t\terror \"********** TEST FAILED: No migration expected, $nb_migr started\"\n\telse\n\t\techo \"OK: no files migrated\"\n\tfi\n\n\tcp /dev/null rh_migr.log\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\tcount=0\n\techo \"5-Applying migration policy again ($policy_str)...\"\n\tfor i in a `seq 1 10`; do\n\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/file.$i -l DEBUG -L rh_migr.log 2>/dev/null\n\t\tgrep \"$RH_ROOT/file.$i\" rh_migr.log | grep \"$ARCH_STR\" && count=$(($count+1))\n\tdone\n\n\tif (( $count == $expected_migr )); then\n\t\techo \"OK: all $expected_migr files have been migrated successfully\"\n\telse\n\t\terror \"$count files migrated, $expected_migr expected\"\n\tfi\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != $expected_migr)); then\n\t\terror \"********** TEST FAILED: $expected_migr migrations expected, $nb_migr started\"\n\telse\n\t\techo \"OK: $nb_migr files migrated\"\n\tfi\n}\n\n\n\n# migrate a symlink\nfunction migrate_symlink\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_hsmlite == 0 )); then\n\t\techo \"Backup test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\t# create a symlink\n\n\techo \"1-Create a symlink\"\n\tln -s \"this is a symlink\" \"$RH_ROOT/link.1\" || error \"creating symlink\"\n\n\techo \"2-Reading changelogs...\"\n\t# read changelogs\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\tcount=0\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# files should not be migrated this time: do not match policy\n\t$RH -f $RBH_CFG_DIR/$config_file  --run=migration --target=file:$RH_ROOT/link.1 -l DEBUG -L rh_migr.log 2>/dev/null\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != 0)); then\n\t\terror \"********** TEST FAILED: No migration expected, $nb_migr started\"\n\telse\n\t\techo \"OK: no entries migrated\"\n\tfi\n\n\tcp /dev/null rh_migr.log\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\tcount=0\n\techo \"5-Applying migration policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/link.1 -l DEBUG -L rh_migr.log 2>/dev/null\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != 1)); then\n\t\terror \"********** TEST FAILED: 1 migration expected, $nb_migr started\"\n\telse\n\t\techo \"OK: $nb_migr files migrated\"\n\tfi\n\n\techo \"6-Scanning...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n    \tcheck_db_error rh_chglogs.log\n\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-status=$STATUS_MGR:synchro --csv -q > report.out\n        [ \"$DEBUG\" = \"1\" ] && cat report.out\n    \tcount=$(wc -l report.out | awk '{print $1}')\n\tif  (($count == 1)); then\n\t\techo \"OK: 1 synchro symlink\"\n\telse\n\t\terror \"1 symlink is expected to be synchro (found $count)\"\n\tfi\n    \trm -f report.out\n\n\tcp /dev/null rh_migr.log\n\techo \"7-Applying migration policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/link.1 -l DEBUG -L rh_migr.log\n\n\tif grep \" 1 entries skipped\" rh_migr.log; then\n\t\techo \"OK: entry skipped\"\n\telse\n\t\terror \"1 entry should be skipped\"\n\tfi\n}\n\n# helper for test_rmdir\n# run rmdir_empty and rmdir_recurse policies\n# and check the correct amount of directories is matched\nfunction run_rmdirs\n{\n    config_file=$1\n    policy_str=\"$2\"\n    expect_empty=$3\n    expect_recurs=$4\n\n    echo \"Applying rmdir_empty policy ($policy_str)...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=rmdir_empty --target=all --once -l FULL -L rh_purge.log\n    [ \"$DEBUG\" = \"1\" ] && grep \"SELECT ENTRIES\" rh_purge.log\n    grep \"Policy run summary\" rh_purge.log | grep rmdir_empty\n    cnt_empty=$(grep \"Policy run summary\" rh_purge.log | grep rmdir_empty | cut -d ';' -f 3 | awk '{print $1}')\n    :> rh_purge.log\n\n    echo \"Applying rmdir_recurse policy ($policy_str)...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=rmdir_recurse --target=all --once -l FULL -L rh_purge.log\n    [ \"$DEBUG\" = \"1\" ] && grep \"SELECT ENTRIES\" rh_purge.log\n    grep \"Policy run summary\" rh_purge.log | grep rmdir_recurse\n    cnt_recurs=$(grep \"Policy run summary\" rh_purge.log | grep rmdir_recurse | cut -d ';' -f 3 | awk '{print $1}')\n    :> rh_purge.log\n\n    if (( $cnt_empty == $expect_empty )); then\n        echo \"OK: $cnt_empty empty directories removed\"\n    else\n        error \"$cnt_empty empty directories removed ($expect_empty expected)\"\n    fi\n    if (( $cnt_recurs == $expect_recurs )); then\n        echo \"OK: $cnt_recurs directories removed recursively\"\n    else\n        error \"$cnt_recurs directories removed recursively ($expect_recurs expected)\"\n    fi\n}\n\n# test rmdir policies\nfunction test_rmdir\n{\n    config_file=$1\n    sleep_time=$2\n    policy_str=\"$3\"\n\n    clean_logs\n\n    EMPTY=empty\n    NONEMPTY=smthg\n    RECURSE=remove_me\n    export MATCH_PATH=\"$RH_ROOT/$RECURSE.*\"\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n       check_db_error rh_chglogs.log\n\n    echo \"Create test directories\"\n\n    # create 3 empty directories\n    mkdir \"$RH_ROOT/$EMPTY.1\" \"$RH_ROOT/$EMPTY.2\" \"$RH_ROOT/$EMPTY.3\" || error \"creating empty directories\"\n    # create non-empty directories\n    mkdir \"$RH_ROOT/$NONEMPTY.1\" \"$RH_ROOT/$NONEMPTY.2\" \"$RH_ROOT/$NONEMPTY.3\" || error \"creating directories\"\n    touch \"$RH_ROOT/$NONEMPTY.1/f\" \"$RH_ROOT/$NONEMPTY.2/f\" \"$RH_ROOT/$NONEMPTY.3/f\" || error \"populating directories\"\n    # create \"deep\" directories for testing recurse rmdir\n    mkdir \"$RH_ROOT/$RECURSE.1\"  \"$RH_ROOT/$RECURSE.2\" || error \"creating directories\"\n    mkdir \"$RH_ROOT/$RECURSE.1/subdir.1\" \"$RH_ROOT/$RECURSE.1/subdir.2\" || error \"creating directories\"\n    touch \"$RH_ROOT/$RECURSE.1/subdir.1/file.1\" \"$RH_ROOT/$RECURSE.1/subdir.1/file.2\" \"$RH_ROOT/$RECURSE.1/subdir.2/file\" || error \"populating directories\"\n\n    echo \"Reading changelogs...\"\n    # read changelogs\n    if (( $no_log )); then\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n    else\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n    fi\n    check_db_error rh_chglogs.log\n\n    run_rmdirs $config_file \"$policy_str\" 0 0\n    echo \"Sleeping $sleep_time seconds...\"\n    sleep $sleep_time\n\n    run_rmdirs $config_file \"$policy_str\" 0 2\n    echo \"Sleeping $sleep_time seconds...\"\n    sleep $sleep_time\n\n    run_rmdirs $config_file \"$policy_str\" 3 0\n}\n\nfunction test_lru_policy\n{\n\tconfig_file=$1\n\texpected_migr_1=$2\n\texpected_migr_2=$3\n    sleep_time=$4\n\tpolicy_str=\"$5\"\n\n    nb_expected_migr_1=$(echo $expected_migr_1 | wc -w)\n    nb_expected_migr_2=$(echo $expected_migr_2 | wc -w)\n    cr_sleep=5\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\t# initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n   \tcheck_db_error rh_chglogs.log\n\n\t# create test tree with 10 files\n    # time | files         |  0   1   2   3   4   5   6   7   8   9\n    # -------------------------------------------------------------\n    #  0   | creation      |  x   x   x   x\n    #  5s  | creation      |                  x   x   x   x   x   x\n    # 10s  | modification  |          x   x   x       x   x\n    # 15s  | access        |      x           x   x       x\n    # 20s  | 1st archive   |  $expected_migr_1\n    # +$4  | 2nd archive   |  $expected_migr_2\n    # size (M)             |  1   2   3+  4+  5+  1   2+  3+  4   5\n    #\t\t\tdesc=\t4 9 3 8 2=7 6 1\n\n\techo \"1-Creating test files...\"\n    # creation times\n\techo -n \"  Creating files 0 1 2 3, \"\n\tfor i in {0..3}; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=$((1+i%5)) >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n\techo \"sleeping $cr_sleep seconds...\"\n    sleep $cr_sleep\n\techo -n \"  Creating files 4 5 6 7 8 9, \"\n\tfor i in {4..9}; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=$((1+i%5)) >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n\techo \"sleeping $cr_sleep seconds...\"\n    sleep $cr_sleep\n    # modification times\n\techo -n \"  Modifying files 2 3 4 6 7, \"\n\tfor i in 2 3 4 6 7; do\n\t    echo \"data\" >> $RH_ROOT/file.$i || error \"modifying file.$i\"\n\tdone\n\techo \"sleeping $cr_sleep seconds...\"\n    sleep $cr_sleep\n    # update last access times\n\techo -n \"  Reading files 1 4 5 7, \"\n    for i in 1 4 5 7; do\n \t    cat $RH_ROOT/file.$i >/dev/null || error \"reading file.$i\"\n    done\n\techo \"sleeping $cr_sleep seconds...\"\n    sleep $cr_sleep\n\n\techo \"2-Reading changelogs...\"\n\t# read changelogs\n    # TODO: creation time is different when scanning (ctime at discovery time) and when reading\n    # changelogs (changelog timestamp)\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    check_db_error rh_chglogs.log\n    # md_update of entries must be > 0 for policy application\n    sleep 1\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# start a migration files should not be migrated this time\n\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l FULL -L rh_migr.log   || error \"\"\n    [ \"$DEBUG\" = \"1\" ] && grep \"SELECT ENTRIES\" rh_migr.log\n\n    # Retrieve the names of migrated files.\n    migr=`egrep -o \"$ARCH_STR '[^']+'\" rh_migr.log | sed \"s/.*'\\(.*\\)'/\\1/\" | \\\n        awk -F. '{print $NF}' | sort | tr '\\n' ' ' | xargs` # xargs does the trimming\n    nb_migr=$(echo $migr | wc -w)\n\tif [[ \"$migr\" != \"$expected_migr_1\" ]]; then\n        error \"********** TEST FAILED: $nb_expected_migr_1 migration expected ${expected_migr_1:+(files $expected_migr_1)}, $nb_migr started ${migr:+(files $migr)}\"\n\telse\n\t\techo \"OK: $nb_expected_migr_1 files migrated\"\n\tfi\n\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n    # empty log file to prevent from counting previous action twice\n    :> rh_migr.log\n\n\techo \"5-Applying migration policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log\n    [ \"$DEBUG\" = \"1\" ] && grep \"SELECT ENTRIES\" rh_migr.log\n\n    # Retrieve the names of migrated files.\n    #   \"2015/02/18 15:54:31 [17821/5] migration | migration success for '/mnt/lustre/file.1', matching rule 'default', creation_time 41s ago, size=1.00 MB\"\n    migr=`egrep -o \"$ARCH_STR '[^']+'\" rh_migr.log | sed \"s/.*'\\(.*\\)'/\\1/\" | \\\n        awk -F. '{print $NF}' | sort | tr '\\n' ' ' | xargs` # xargs does the trimming\n    nb_migr=$(echo $migr | wc -w)\n\tif [[ \"$migr\" != \"$expected_migr_2\" ]]; then\n        error \"********** TEST FAILED: $nb_expected_migr_2 migration expected ${expected_migr_2:+(files $expected_migr_2)}, $nb_migr started ${migr:+(files $migr)}\"\n\telse\n        echo \"OK: $nb_migr files migrated\"\n\tfi\n}\n\nfunction lru_order_of\n{\n    l=\"$1\"\n    f=\"$2\"\n    grep \"$REL_STR\" $l | grep -n \"'$f'\" | cut -d ':' -f 1\n}\n\n\nfunction test_purge_lru\n{\n\tconfig_file=$1\n    export SORT_PARAM=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    # create 6 files\n  \tfor i in {1..6}; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n        sleep 1\n\tdone\n\n    # access 4 files\n  \tfor i in {1..4}; do\n\t\tdd if=$RH_ROOT/file.$i of=/dev/null bs=1M count=1 >/dev/null 2>/dev/null || error \"reading file.$i\"\n        sleep 1\n\tdone\n\n \t# read changelogs\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\n\t# flush data for HSM flavors\n    if (( ($is_hsmlite != 0) || ($is_lhsm != 0) )); then\n\t\techo \"Archiving files\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"archiving files\"\n\n        if (( $is_lhsm != 0 )); then\n    \t\techo \"Waiting for end of data migration...\"\n    \t\twait_done 60\n\n            # archive is asynchronous: read changelog to get the archive status\n            $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n        fi\n\tfi\n\n    # md_update for purge must be > previous md updates\n    sleep 1\n\n    $RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --no-limit --once -l DEBUG \\\n        -L rh_purge.log || error \"purging files\"\n\n    # if sorted: order should be 5 6 1 2 3 4\n    exp_rank=(3 4 5 6 1 2)\n    # if not: can be any order\n\n    if [[ $SORT_PARAM != \"none\" ]]; then\n      \tfor i in {1..6}; do\n            idx=$(($i-1))\n            rank=$(lru_order_of rh_purge.log $RH_ROOT/file.$i)\n            echo \"file.$i purge rank #${exp_rank[$idx]}\"\n            [[ $rank == ${exp_rank[$idx]} ]] || error \"file.$i should have been purged in #${exp_rank[$idx]} (got $rank)\"\n        done\n\n        # DB request must have access time criteria\n        grep \"new request\" rh_purge.log | grep access || error \"access should be in request criteria\"\n\n    else\n        # all entries must be found\n        cnt=$(grep \"$REL_STR\" rh_purge.log | wc -l)\n        [[ $cnt == 6 ]] || error \"All entries should have been purged\"\n\n        # DB request must not have access time criteria\n        grep \"new request\" rh_purge.log | grep access && error \"access shouldn't be in request criteria\"\n    fi\n\n}\n\nfunction test_suspend_on_error\n{\n\tconfig_file=$1\n    sleep_time=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    # must reach 50% error with at least 5 errors\n    nb_files_ok=5\n    nb_files_error=10 # must stop migrating before\n    # migrating by creation order, so create them in order\n    # to increase the error rate sightly\n    # and reach this condition before the whole migration is finished\n\techo \"1-Creating test files...\"\n\tfor i in $(seq 1 ${nb_files_ok}); do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n    sleep 1\n\tfor i in $(seq 1 ${nb_files_error}); do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i.fail bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i.fail\"\n\tdone\n\n    # read fs content\n\tif (( $no_log )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    check_db_error rh_chglogs.log\n\n\techo \"2-Sleeping $sleep_time sec...\"\n    sleep $sleep_time\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --force --target=all -l DEBUG -L rh_migr.log   || error \"\"\n\n    [ \"$DEBUG\" = \"1\" ] && grep action_params rh_migr.log\n    nb_fail_match=$(count_action_params rh_migr.log arg=fail)\n    nb_ok_match=$(count_action_params rh_migr.log arg=ok)\n\n    echo \"$nb_fail_match failed copies, $nb_ok_match successful copies\"\n    (($nb_ok_match == $nb_files_ok)) || error \"expected $nb_files_ok successful copies (got $nb_ok_match)\"\n    # migration should have been stopped before migrating all\n    (($nb_fail_match == $nb_files_error)) && error \"migration should have stopped before migrating all\"\n    grep \"suspending policy run\" rh_migr.log || error \"migration should have been suspended\"\n}\n\n\n\n\nfunction xattr_test\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\tif (( $no_log == 0 )); then\n\t\techo \"Initial scan of empty filesystem\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\n\t# create and fill 10 files\n\techo \"1-Modifing files...\"\n\tfor i in `seq 1 3`; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.$i\"\n\tdone\n\n\techo \"2-Setting xattrs...\"\n\techo \"$RH_ROOT/file.1: xattr.user.foo=1\"\n\tsetfattr -n user.foo -v 1 $RH_ROOT/file.1\n\techo \"$RH_ROOT/file.2: xattr.user.bar=1\"\n\tsetfattr -n user.bar -v 1 $RH_ROOT/file.2\n\techo \"$RH_ROOT/file.3: none\"\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# start a migration files should not be migrated this time\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"\"\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\tif (($nb_migr != 0)); then\n\t\terror \"********** TEST FAILED: No migration expected, $nb_migr started\"\n\telse\n\t\techo \"OK: no files migrated\"\n\tfi\n\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\techo \"3-Applying migration policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log\n\n\tnb_migr=`grep \"$ARCH_STR\" rh_migr.log |  wc -l`\n\tif (($nb_migr != 3)); then\n\t\terror \"********** TEST FAILED: $expected_migr migrations expected, $nb_migr started\"\n\telse\n\t\techo \"OK: $nb_migr files migrated\"\n\n        # checking policy rule\n        nb_migr_arch1=$(count_action_params rh_migr.log class=xattr_bar)\n        nb_migr_arch2=$(count_action_params rh_migr.log class=xattr_foo)\n        nb_migr_arch3=`grep \"matches the condition for policy rule 'default'\" rh_migr.log | wc -l`\n\n        if (( $nb_migr_arch1 != 1 || $nb_migr_arch2 != 1 || $nb_migr_arch3 != 1 )); then\n            error \"********** wrong policy cases: 1x$nb_migr_arch1/2x$nb_migr_arch2/3x$nb_migr_arch3 (1x1/2x1/3x1 expected)\"\n            cp rh_migr.log /tmp/xattr_test.$$\n            echo \"Log saved as /tmp/xattr_test.$$\"\n        else\n            echo \"OK: 1 file for each policy case\"\n        fi\n\n        # checking archive nums\n        nb_migr_arch1=$(count_action_params rh_migr.log archive_id=1)\n        nb_migr_arch2=$(count_action_params rh_migr.log archive_id=2)\n        nb_migr_arch3=$(count_action_params rh_migr.log archive_id=3)\n\n        if (( $nb_migr_arch1 != 1 || $nb_migr_arch2 != 1 || $nb_migr_arch3 != 1 )); then\n            error \"********** wrong archive_ids: 1x$nb_migr_arch1/2x$nb_migr_arch2/3x$nb_migr_arch3 (1x1/2x1/3x1 expected)\"\n        else\n            echo \"OK: 1 file to each archive_id\"\n        fi\n\tfi\n\n}\n\nfunction link_unlink_remove_test\n{\n\tconfig_file=$1\n\texpected_rm=$2\n\tsleep_time=$3\n\tpolicy_str=\"$4\"\n    cl_delay=6 # time between action and its impact on rbh-report\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\tif (( $no_log )); then\n\t\techo \"changelog disabled: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"1-Start reading changelogs in background...\"\n\t# read changelogs\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --detach --pid-file=rh.pid || error \"\"\n\n\tsleep 1\n\n\t# write file.1 and force immediate migration\n\techo \"2-Writing data to file.1...\"\n\tdd if=/dev/zero of=$RH_ROOT/file.1 bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.1\"\n\n\tsleep $cl_delay\n\n\tif (( $is_lhsm != 0 )); then\n\t\techo \"3-Archiving file....1\"\n\t\tflush_data\n\t\t$LFS hsm_archive $RH_ROOT/file.1 || error \"executing lfs hsm_archive\"\n\n\t\techo \"3bis-Waiting for end of data migration...\"\n\t\twait_done 60 || error \"Migration timeout\"\n\telif (( $is_hsmlite != 0 )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing $CMD --sync\"\n\tfi\n\n\t# create links on file.1 files\n\techo \"4-Creating hard links to $RH_ROOT/file.1...\"\n\tln $RH_ROOT/file.1 $RH_ROOT/link.1 || error \"ln\"\n\tln $RH_ROOT/file.1 $RH_ROOT/link.2 || error \"ln\"\n\n\tsleep 1\n\n\t# removing all files\n        echo \"5-Removing all links to file.1...\"\n\trm -f $RH_ROOT/link.* $RH_ROOT/file.1\n\n\tsleep $cl_delay\n\n\techo \"Checking report...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > rh_report.log\n\tnb_ent=`wc -l rh_report.log | awk '{print $1}'`\n\tif (( $nb_ent != $expected_rm )); then\n\t\terror \"Wrong number of deferred rm reported: $nb_ent\"\n\tfi\n\tgrep \"$RH_ROOT/file.1\" rh_report.log > /dev/null || error \"$RH_ROOT/file.1 not found in deferred rm list\"\n\n\t# deferred remove delay is not reached: nothing should be removed\n\techo \"6-Performing HSM remove requests (before delay expiration)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove --target=all --force -l DEBUG -L rh_rm.log  || error \"hsm_remove\"\n\n\tnb_rm=`grep \"$HSMRM_STR\" rh_rm.log | wc -l`\n\tif (($nb_rm != 0)); then\n\t\techo \"********** test failed: no removal expected, $nb_rm done\"\n\telse\n\t\techo \"OK: no rm done\"\n\tfi\n\n\techo \"7-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\techo \"8-Performing HSM remove requests (after delay expiration)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove --target=all --force -l DEBUG -L rh_rm.log  || error \"hsm_remove\"\n\n\tnb_rm=`grep \"$HSMRM_STR\" rh_rm.log | wc -l`\n\tif (($nb_rm != $expected_rm)); then\n\t\terror \"********** TEST FAILED: $expected_rm removals expected, $nb_rm done\"\n\telse\n\t\techo \"OK: $nb_rm files removed from archive\"\n\tfi\n\n    grep \"Performing new request with a limit\" rh_rm.log && error \"No request splitting is expected for SOFT_RM table\"\n\n\t# kill event handler\n\tpkill -9 $PROC\n\n}\n\nfunction test_hsm_remove\n{\n    config_file=$1\n    expected_rm=$2\n    sleep_time=$3\n    policy_str=\"$4\"\n\n    if (( $is_lhsm + $is_hsmlite == 0 )); then\n        echo \"HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n    if (( $no_log )); then\n        echo \"changelog disabled: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    nb_files=$((2*$expected_rm))\n\n    # write 2 x expected_rm\n    echo \"Writing $nb_files files...\"\n    for i in $(seq 1 $nb_files); do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    # initial scan (files are known as 'new')\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_scan.log  --once || error \"\"\n    check_db_error rh_scan.log\n\n    # create 2 more files that robinhood won't know before their removal\n    # (1 archived, 1 not archived)\n    dd if=/dev/zero of=$RH_ROOT/file.a bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.a\"\n    dd if=/dev/zero of=$RH_ROOT/file.b bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.b\"\n\n    extra=0\n    tolerance=0\n    extra_list=()\n    extra_excl=()\n    # archive them all\n    if (( $is_lhsm != 0 )); then\n        extra=1 # +1 for file.a\n        extra_list=(a) # should be in softrm\n        extra_excl=(b) # shouldn't be in softrm\n        echo \"Archiving $expected_rm files...\"\n        flush_data\n        for i in $(seq 1 $expected_rm) a; do\n            $LFS hsm_archive $RH_ROOT/file.$i || error \"executing lfs hsm_archive\"\n        done\n\n        echo \"Waiting for end of data migration...\"\n        wait_done 60 || error \"Migration timeout\"\n    elif (( $is_hsmlite != 0 )); then\n        # allow 2 extra entries in SOFTRM (robinhood may doubt about file.a and file.b)\n        tolerance=2\n        for i in $(seq 1 $expected_rm); do\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/file.$i --ignore-conditions -l DEBUG  -L rh_migr.log || error \"migrating $RH_ROOT/file.$i\"\n        done\n    fi\n\n    # removing all files\n    echo \"Removing all files\"\n    rm -f $RH_ROOT/file.*\n\n    # make sure rm operations are in the changelog\n    sleep 1\n\n    # robinhood reads the log but entries no longer exist: make sure it takes the right decision in this case\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log  --once || error \"reading changelogs\"\n    check_db_error rh_chglogs.log\n\n    echo \"Checking report...\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    nb_ent=`wc -l rh_report.log | awk '{print $1}'`\n    if (( $nb_ent > $expected_rm  + $extra + $tolerance )) || (( $nb_ent < $expected_rm  + $extra )); then\n        error \"Wrong number of deferred rm reported: $nb_ent / $expected_rm  + $extra (tolerance $tolerance)\"\n    fi\n    for i in $(seq 1 $expected_rm) $extra_list; do\n        grep \"$RH_ROOT/file.$i\" rh_report.log || error \"$RH_ROOT/file.$i not found in deferred rm list\"\n    done\n\n    for i in $(seq $(($expected_rm+1)) $nb_files) $extra_excl; do\n        grep \"$RH_ROOT/file.$i\" rh_report.log && error \"$RH_ROOT/file.$i shouldn't be in deferred rm list\"\n    done\n\n    # deferred remove delay is not reached: nothing should be removed\n    echo \"Performing HSM remove requests (before delay expiration)...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove --target=all --force -l DEBUG -L rh_rm.log  || error \"hsm_remove\"\n\n    nb_rm=`grep \"$HSMRM_STR\" rh_rm.log | wc -l`\n    if (($nb_rm != 0)); then\n        echo \"********** test failed: no removal expected, $nb_rm done\"\n    else\n        echo \"OK: no rm done\"\n    fi\n\n    echo \"Sleeping $sleep_time seconds...\"\n    sleep $sleep_time\n\n    echo \"Performing HSM remove requests (after delay expiration)...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove --target=all --force -l DEBUG -L rh_rm.log  || error \"hsm_remove\"\n\n    nb_rm=`grep \"$HSMRM_STR\" rh_rm.log | wc -l`\n    if (( $nb_rm > $expected_rm  + $extra + $tolerance )) || (( $nb_rm < $expected_rm  + $extra )); then\n        error \"********** TEST FAILED: $expected_rm+$extra removals expected (tolerance $tolerance), $nb_rm done\"\n    else\n        echo \"OK: $nb_rm files removed from archive\"\n    fi\n\n    grep \"Performing new request with a limit\" rh_rm.log && error \"No request splitting is expected for SOFT_RM table\"\n\n}\n\n# test that hsm_remove requests are sent to the right archive\nfunction test_lhsm_remove\n{\n    config_file=$1\n    nb_archive=$2\n    sleep_time=$3\n    policy_str=\"$4\"\n\n    if (( $is_lhsm == 0 )); then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n    if (( $no_log )); then\n        echo \"changelog disabled: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    local default_archive=$(lctl get_param -n mdt.lustre-MDT0000.hsm.default_archive_id)\n\n    # create nb_archive + 3 more files to test:\n    # - hsm_archive with no option\n    # - hsm_archive with -a 0\n    # - file that will be deleted before robinhood gets its archive_id\n\n    id=()\n    name=()\n    echo \"Writing files...\"\n    for i in $(seq 1 $nb_archive) no_opt 0 x ; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n        name+=( \"$i\" )\n        id+=( \"$(get_id \"$RH_ROOT/file.$i\")\" )\n    done\n\n    # initial scan (files are known as 'new')\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"\"\n    check_db_error rh_scan.log\n\n    # archive then\n    echo \"Archiving files...\"\n    flush_data\n    for i in $(seq 1 $nb_archive); do\n        $LFS hsm_archive -a $i $RH_ROOT/file.$i || error \"executing lfs hsm_archive\"\n    done\n    $LFS hsm_archive $RH_ROOT/file.no_opt || error \"executing lfs hsm_archive\"\n    $LFS hsm_archive -a 0 $RH_ROOT/file.0 || error \"executing lfs hsm_archive\"\n\n    echo \"Waiting for end of data migration...\"\n    wait_done 60 || error \"Migration timeout\"\n\n    # make sure rm operations are in the changelog\n    sleep 1\n\n    # robinhood reads the archive_id\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log  --once || error \"reading changelogs\"\n    check_db_error rh_chglogs.log\n\n    # now archive and remove the last file\n    $LFS hsm_archive -a 2 $RH_ROOT/file.x || error \"executing lfs hsm_archive\"\n    echo \"Waiting for end of data migration...\"\n    wait_done 60 || error \"Migration timeout\"\n\n    echo \"Removing all files\"\n    rm -f $RH_ROOT/file.*\n\n    # make sure rm operations are in the changelog\n    sleep 1\n\n    # read unlink records\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log  --once || error \"reading changelogs\"\n    check_db_error rh_chglogs.log\n\n\n    echo \"Checking report...\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > rh_report.log\n\n    nb_ent=`wc -l rh_report.log | awk '{print $1}'`\n    if (( $nb_ent != $nb_archive + 3 )); then\n        error \"Wrong number of deferred rm reported: $nb_ent\"\n    fi\n\n    for i in $(seq 1 ${#id[@]}); do\n        n=${name[$((i-1))]}\n        fid=${id[$((i-1))]}\n        grep \"$fid\" rh_report.log | grep $RH_ROOT/file.$n || error \"$RH_ROOT/file.$n ($fid) not found in deferred rm list\"\n    done\n\n    echo \"Applying deferred remove operations\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove --target=all --force-all -l DEBUG -L rh_rm.log  || error \"hsm_remove\"\n\n    for i in $(seq 1 ${#id[@]}); do\n        n=${name[$((i-1))]}\n        fid=${id[$((i-1))]}\n\n        echo $n\n        # specific cases\n        if [[ \"$n\" == \"0\" ]] || [[ \"$n\" == \"no_opt\" ]]; then\n            # robinhood should know the entry was in default archive\n            grep \"action REMOVE\" rh_rm.log | grep $fid | grep \"archive_id=$default_archive\" ||\n                error \"REMOVE action for $RH_ROOT/file.$n ($fid) should be sent to default archive $default_archive\"\n        elif [[ \"$n\" == \"x\" ]]; then\n            # robinhood doesn't know in was archive was the entry\n            # send to archive 0 (must be interpreted by coordinator as a broadcast to all archives)\n            grep \"action REMOVE\" rh_rm.log | grep $fid | grep \"archive_id=0\" ||\n                error \"REMOVE action for $RH_ROOT/file.$n ($fid) should be sent to archive 0 (broadcast)\"\n        else\n            # should be send to archive $i\n            grep \"action REMOVE\" rh_rm.log | grep \"$fid\" | grep \"archive_id=$i\" ||\n                error \"REMOVE action for $RH_ROOT/file.$n ($fid) should be sent to archive_id $i\"\n        fi\n    done\n\n    nb_rm=`grep \"$HSMRM_STR\" rh_rm.log | wc -l`\n    if (($nb_rm != $nb_archive + 3)); then\n        error \"********** TEST FAILED: $nb_archive + 3 removals expected, $nb_rm done\"\n    else\n        echo \"OK: $nb_rm files removed from archive\"\n    fi\n\n    grep \"Performing new request with a limit\" rh_rm.log && error \"No request splitting is expected for SOFT_RM table\"\n}\n\nfunction populate\n{\n\tlocal entries=$1\n\tlocal i\n\tfor i in `seq 1 $entries`; do\n\t\t((dir_c=$i % 10))\n\t\t((subdir_c=$i % 100))\n\t\tdir=$RH_ROOT/dir.$dir_c/subdir.$subdir_c\n\t\tmkdir -p $dir || error \"creating directory $dir\"\n\t\techo \"file.$i\" > $dir/file.$i || error \"creating file $dir/file.$i\"\n\tdone\n}\n\n\nfunction mass_softrm\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tentries=$3\n\tpolicy_str=\"$4\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\t# populate filesystem\n\techo \"1-Populating filesystem...\"\n\tpopulate $entries\n\n\t# how many subdirs in dir.1?\n\tnbsubdirs=$( ls $RH_ROOT/dir.1 | grep subdir | wc -l )\n\n\techo \"2-Initial scan...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning filesystem\"\n    \tcheck_db_error rh_scan.log\n\n\tgrep \"Full scan of\" rh_scan.log | tail -1\n\n\tsleep 1\n\n\t# archiving files\n\techo \"3-Archiving files...\"\n\n\tif (( $is_lhsm != 0 )); then\n\t\tflush_data\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"flushing data to backend\"\n\n\t\techo \"3bis-Waiting for end of data migration...\"\n\t\twait_done 120 || error \"Migration timeout\"\n\t\techo \"update db content...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading chglog\"\n\n\telif (( $is_hsmlite != 0 )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"flushing data to backend\"\n\tfi\n\tgrep \"Migration summary\" rh_migr.log\n\n\techo \"Checking stats after 1st scan...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR --csv -q --count-min=1 | grep -v ' dir,' > fsinfo.1\n\tcat fsinfo.1\n\t$REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > deferred.1\n\t(( `wc -l fsinfo.1 | awk '{print $1}'` == 1 )) || error \"a single file status is expected after data migration\"\n\tstatus=`cat fsinfo.1 | cut -d \",\" -f 2 | tr -d ' '`\n\tnb=`cat fsinfo.1 | grep synchro | cut -d \",\" -f 3 | tr -d ' '`\n\t[[ -n $nb ]] || nb=0\n\t[[ \"$status\"==\"synchro\" ]] || error \"status expected after data migration: synchro, got $status\"\n\t(( $nb == $entries )) || error \"$entries entries expected, got $nb\"\n\t(( `wc -l deferred.1 | awk '{print $1}'`==0 )) || error \"no deferred rm expected after first scan\"\n\trm -f fsinfo.1 deferred.1\n\n\t# removing some files\n        echo \"4-Removing files in $RH_ROOT/dir.1...\"\n\trm -rf \"$RH_ROOT/dir.1\" || error \"removing files in $RH_ROOT/dir.1\"\n\n\t# at least 1 second must be elapsed since last entry change (sync)\n\tsleep 1\n\n\techo \"5-Update DB with a new scan...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning filesystem\"\n    \tcheck_db_error rh_scan.log\n\n\tgrep \"Full scan of\" rh_scan.log | tail -1\n\n\techo \"Checking stats after 2nd scan...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR --csv -q --count-min=1 | grep -v ' dir,' > fsinfo.2\n\tcat fsinfo.2\n\t$REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > deferred.2\n\t# 100 files were in the removed directory\n\t(( `wc -l fsinfo.2 | awk '{print $1}'` == 1 )) || error \"a single file status is expected after data migration\"\n\tstatus=`cat fsinfo.2 | cut -d \",\" -f 2 | tr -d ' '`\n\tnb=`cat fsinfo.2 | grep synchro | cut -d \",\" -f 3 | tr -d ' '`\n\t[[ \"$status\"==\"synchro\" ]] || error \"status expected after data migration: synchro, got $status\"\n\t(( $nb == $entries - 100 )) || error $(($entries - 100)) \" entries expected, got $nb\"\n\tnb=`wc -l deferred.2 | awk '{print $1}'`\n\t((expect=100 + $nbsubdirs + 1))\n\t(( $nb == $expect )) || error \"$expect deferred rm expected after first scan, got $nb\"\n\trm -f fsinfo.2 deferred.2\n\n}\n\nfunction purge_test\n{\n\tconfig_file=$1\n\texpected_purge=$2\n\tsleep_time=$3\n\tpolicy_str=\"$4\"\n\n\tclean_logs\n\n\t# initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n    \tcheck_db_error rh_chglogs.log\n\n\t# fill 10 files and archive them\n\n\techo \"1-Modifing files...\"\n\tfor i in a `seq 1 10`; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"$? writing file.$i\"\n\n\t\tif (( $is_lhsm != 0 )); then\n\t\t\tflush_data\n\t\t\t$LFS hsm_archive $RH_ROOT/file.$i || error \"lfs hsm_archive\"\n\t\tfi\n\tdone\n\tif (( $is_lhsm != 0 )); then\n\t\twait_done 60 || error \"Copy timeout\"\n\tfi\n\n\tsleep 1\n\tif (( $no_log )); then\n\t\techo \"2-Scanning the FS again to update file status (after 1sec)...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs to update file status (after 1sec)...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n\t\tif (($is_lhsm != 0)); then\n\t\t\t((`grep \"archive,rc=0\" rh_chglogs.log | wc -l` == 11)) || error \"Not enough archive events in changelog!\"\n\t\tfi\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\t# use robinhood for flushing\n\tif (( $is_hsmlite != 0 )); then\n\t\techo \"2bis-Archiving files\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing migration policy\"\n\t\tarch_count=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n\t\t(( $arch_count == 11 )) || error \"$11 archive commands expected\"\n\tfi\n\n\techo \"3-Applying purge policy ($policy_str)...\"\n\t# no purge expected here\n\t$RH -f $RBH_CFG_DIR/$config_file --run=purge --target=all --no-limit -l DEBUG -L rh_purge.log  || error \"\"\n\n        nb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\n        if (($nb_purge != 0)); then\n                error \"********** TEST FAILED: No release actions expected, $nb_purge done\"\n        else\n                echo \"OK: no file released\"\n        fi\n\n\techo \"4-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\techo \"5-Applying purge policy again ($policy_str)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT -l DEBUG -L rh_purge.log --once || error \"\"\n\n    nb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\n    if (($nb_purge != $expected_purge)); then\n            error \"********** TEST FAILED: $expected_purge release actions expected, $nb_purge done\"\n    else\n            echo \"OK: $nb_purge files released\"\n    fi\n\n\t# stop RH in background\n#\tkill %1\n}\n\nfunction test_custom_purge\n{\n    config_file=$1\n    sleep_time=$2\n    policy_str=\"$3\"\n\n    clean_logs\n\n    # initial scan\n    echo \"Populating filesystem...\"\n    for i in `seq 1 10`; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n    # create malicious file names to test vulnerability\n    touch \"$RH_ROOT/foo1 \\`pkill -9 $CMD\\`\" || error \"couldn't create file\"\n    touch \"$RH_ROOT/foo2 ; exit 1\" || error \"couldn't create file\"\n    touch \"$RH_ROOT/foo3' ';' 'exit' '1'\" || error \"couldn't create file\"\n\n    echo \"Initial scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log\n    check_db_error rh_scan.log\n\n    if (( $is_lhsm != 0 )); then\n        # Archive files to be able to release them afterward\n        flush_data\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log ||\n            error \"flushing data to backend\"\n    \tgrep \"run summary\" rh_migr.log\n\n        echo \"Waiting for end of data migration...\"\n        wait_done 120 || error \"Migration timeout\"\n        echo \"update db content...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log ||\n            error \"reading chglog\"\n\n    elif (( $shook != 0 )); then\n\t# need archive before release\n        echo \"Archiving data...\"\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log ||\n            error \"flushing data to backend\"\n    \tgrep \"run summary\" rh_migr.log\n    fi\n\n    echo \"Sleeping $sleep_time seconds...\"\n    sleep $sleep_time\n\n    if [ -z \"$POSIX_MODE\" ]; then\n        fsname=$(df $RH_ROOT/. | xargs | awk '{print $(NF-5)}' |\n                 awk -F '/' '{print $(NF)}')\n    else\n        fsname=$(df $RH_ROOT/. | xargs | awk '{print $(NF-5)}')\n    fi\n    if (( $no_log == 0 )); then\n        # get fids of entries\n        fids=()\n        for i in `seq 1 10`; do\n            fids[$i]=$(get_id \"$RH_ROOT/file.$i\")\n        done\n        i=11\n        for f in  \"$RH_ROOT/foo1 \\`pkill -9 $CMD\\`\" \\\n                  \"$RH_ROOT/foo2 ; exit 1\" \\\n                  \"$RH_ROOT/foo3' ';' 'exit' '1'\" ; do\n            fids[$i]=$(get_id \"$f\")\n            ((i=$i+1))\n        done\n        [ \"$DEBUG\" = \"1\" ] && echo \"fsname=$fsname, fids=${fids[*]}\"\n    fi\n\n    echo \"Applying purge policy ($policy_str)...\"\n    # NB: Lustre reports less space used as expected so the purge trigger\n    # target is not high enough to purge all file => use the no-limit option\n    $RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --no-limit -l FULL \\\n        -L rh_purge.log --once || error \"purging files\"\n    check_db_error rh_purge.log\n\n    nb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n    if (($nb_purge != 13)); then\n        error \"********** TEST FAILED: 13 purge actions expected, $nb_purge done\"\n    else\n        echo \"OK: 13 actions done\"\n    fi\n\n    # checking that the custom command was called for each file\n    for  i in `seq 1 10`; do\n        line=$(grep \"action: cmd\" rh_purge.log | grep 'rm_script' | grep $RH_ROOT/file.$i)\n        if [ -z \"$line\" ]; then\n            error \"No action found on $RH_ROOT/file.$i\"\n            continue\n        fi\n        # split args\n    #2016/05/10 10:17:08 [5529/4] purge | [0x200000400:0x133ac:0x0]: action: cmd(./rm_script lustre 0x200000400:0x133ac:0x0 /mnt/lustre/file.1)\n        args=($(echo \"$line\" | sed -e \"s/.*rm_script//\" -e \"s/)$//\"))\n        fn=${args[0]}\n        id=${args[1]}\n        p=${args[2]}\n        [ \"$DEBUG\" = \"1\" ] && echo \"action: fsname=$fn, fid=$id, path=$p\"\n\n        [ $fn = $fsname ] || error \"invalid fsname $fn != $fsname\"\n        # only compare fids for lustre 2.x\n        if (( $no_log == 0 )); then\n            [ $id = ${fids[$i]} ] || error \"invalid fid $id != ${fids[$i]}\"\n        fi\n        [ $p = $RH_ROOT/file.$i ] || error \"invalid path $p != $RH_ROOT/file.$i\"\n\n        [ -f $RH_ROOT/file.$i ] && error \"$RH_ROOT/file.$i still exists after purge command\"\n    done\n\n    # same test for special file names\n    i=11\n    for f in  \"$RH_ROOT/foo1 \\`pkill -9 $CMD\\`\" \"$RH_ROOT/foo2 ; exit 1\" \"$RH_ROOT/foo3' ';' 'exit' '1'\" ; do\n        f0=$(echo \"$f\" | awk '{print $1}')\n        line=$(grep \"action: cmd\" rh_purge.log | grep 'rm_script' | grep \"$f0\")\n        if [ -z \"$line\" ]; then\n            error \"No action found on $f\"\n            continue\n        fi\n        # split args\n        args=($(echo \"$line\" | sed -e \"s/.*rm_script//\" -e \"s/)$//\"))\n        fn=${args[0]}\n        id=${args[1]}\n        unset args[0]\n        unset args[1]\n        p=${args[@]}\n        [ \"$DEBUG\" = \"1\" ] && echo \"action: fsname=$fn, fid=$id, path=$p\"\n\n        [ $fn = $fsname ] || error \"invalid fsname $fn != $fsname\"\n        # only compare fids for lustre 2.x\n        if (( $no_log == 0 )); then\n            [ $id = ${fids[$i]} ] || error \"invalid fid $id != ${fids[$i]}\"\n        fi\n        [ \"$p\" = \"$f\" ] || error \"invalid path $p != $f\"\n\n        [ -f \"$f\" ] && error \"$f still exists after purge command\"\n        ((i=$i+1))\n    done\n\n    return 0\n}\n\n\nfunction test_default\n{\n\tconfig_file=$1\n\tpolicy_str=\"$2\"\n\n\tclean_logs\n\n    # matrix of files (m)igration/(p)urge:\n    #       *.A  *.B  *.C\n    #   X*        m\n    #   Y*   p    m/p   p\n    #   Z*        m\n    for pre in X Y Z; do\n        for suf in A B C; do\n            touch $RH_ROOT/$pre.$suf || error \"touch $RH_ROOT/$pre.$suf\"\n        done\n    done\n\n    # wait for entries to be eligible\n    sleep 1\n\n\t# initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log || error \"Initial scan\"\n    check_db_error rh_chglogs.log\n\n\t# archive the file (if applicable)\n\tif (( $is_hsmlite + $is_lhsm != 0 )); then\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"Migration\"\n\n        # check archived files\n        # *.B files must be archived. other files should be.\n        nb_b=$(grep \"$ARCH_STR\" rh_migr.log | grep -E \"$RH_ROOT/[XYZ]\\.B\"| wc -l)\n        nb_ac=$(grep \"$ARCH_STR\" rh_migr.log | grep -E \"$RH_ROOT/[XYZ]\\.[AC]\"| wc -l)\n\n        [ \"$DEBUG\" = \"1\" ] && grep \"$ARCH_STR\" rh_migr.log\n\n        (( $nb_b != 3 )) && error \"unexpected number of migrated *.B files: $nb_b != 3\"\n        (( $nb_ac != 0 )) && error \"unexpected number of migrated *.[AC] files: $nb_ac != 0\"\n    fi\n\n    # purge the files (if applicable)\n    if (( ($is_hsmlite == 0) || ($shook != 0) )); then\n\n        if (($is_lhsm != 0)); then\n    \t\twait_done 60 || error \"Migration timeout\"\n\n            # read changelogs to be aware of migration success\n            :> rh_chglogs.log\n            $RH -f $RBH_CFG_DIR/$config_file --readlog --once  -l DEBUG -L rh_chglogs.log || error \"reading changelog\"\n            check_db_error rh_chglogs.log\n        fi\n\n        # wait for entries to be eligible\n        sleep 1\n\n        $RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --once -l DEBUG -L rh_purge.log || error \"Purge\"\n\n        # check purged files\n        # tmpfs: all Y* files can be purged\n        # hsm: only archived files can be purged: Y.B\n\t    if (( $is_lhsm + $shook != 0 )); then\n            nb_purge=1\n            purge_pat=\"Y.B\"\n        else\n            nb_purge=3\n            purge_pat=\"Y*\"\n        fi\n        other=$(( 9 - $nb_purge ))\n\n        nbp=$(grep \"$REL_STR\" rh_purge.log | grep -E \"$RH_ROOT/$purge_pat\"| wc -l)\n        nbnp=$(grep \"$REL_STR\" rh_purge.log | grep -vE \"$RH_ROOT/$purge_pat\" | wc -l)\n\n        [ \"$DEBUG\" = \"1\" ] && grep \"$REL_STR\" rh_purge.log\n\n        (( $nbp != $nb_purge )) && error \"unexpected number of purged files matching $purge_pat : $nbp != $nb_purge\"\n        (( $nbnp != 0 )) && error \"unexpected number of purged files matching $purge_pat: $nbnp != 0\"\n    fi\n\n\n\t# stop RH in background\n#\tkill %1\n}\n\nfunction test_undelete\n{\n    local config_file=\"$1\"\n    local policy_str=\"$2\"\n\n    clean_logs\n\n    if (( $is_hsmlite + $is_lhsm == 0 )); then\n        echo \"No undelete for this flavor\"\n        set_skipped\n        return 1\n    fi\n\n    # Using two level of directories allows to fully test the mkdir_recurse()\n    # function defined in src\n    local files=()\n    for path in dir0/dir{1,2}/file{1,2}; do\n        files+=( \"$RH_ROOT/$path\" )\n    done\n\n    mkdir -p \"$RH_ROOT\"/dir0/dir{1,2} || error \"mkdir\"\n    for f in \"${files[@]}\"; do\n        echo 123 > \"$f\" || error \"write\"\n    done\n    local sz1=$(stat -c '%s' \"${files[0]}\")\n    local fid=$(get_id \"$RH_ROOT/dir0/dir1/file1\")\n\n    # initial scan + archive all\n    $RH -f \"$RBH_CFG_DIR/$config_file\" --readlog --once $SYNC_OPT -l DEBUG -L rh_chglogs.log || error \"Initial scan and sync\"\n    check_db_error rh_chglogs.log\n\n    if (( $is_lhsm != 0 )); then\n        wait_done 60 || error \"Copy timeout\"\n\n        # archive is asynchronous: read the changelog to get archive completion status\n        $RH -f \"$RBH_CFG_DIR/$config_file\" --readlog --once -l DEBUG -L rh_chglogs.log || error \"Reading changelog\"\n    fi\n\n    # remove all and read the changelog\n    rm -rf \"$RH_ROOT/dir0\"\n    $RH -f \"$RBH_CFG_DIR/$config_file\" --readlog --once -l DEBUG -L rh_chglogs.log || error \"Reading changelog\"\n    check_db_error rh_chglogs.log\n\n    # list all deleted entries\n    # details about output format:\n    #   - last field of each line is last entry path in filesystem\n    #   - test suite uses a single status manager at once: '-s' option not needed\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -L | grep 'file' | awk '{print $(NF)}' > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    diff <(sort rh_report.log) <(printf '%s\\n' \"${files[@]}\" | sort) ||\n        error \"undelete list does not match the expected output\"\n\n    # list all deleted entried from dir1\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -L \"$RH_ROOT/dir0/dir1\" | grep \"file\" | awk '{print $(NF)}' > rh_report.log\n    diff <(sort rh_report.log) <(printf '%s\\n' \"${files[@]:0:2}\" | sort) ||\n        error \"undelete list does not match the expected output\"\n\n    # query a single file by path\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -L \"$RH_ROOT/dir0/dir1/file2\" \\\n        > rh_report.log || error \"list softrm by path\"\n    grep \"$RH_ROOT/dir0/dir1/file2\" rh_report.log || error \"entry missing in report\"\n\n    # query a single file by fid\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -L \"$fid\" > rh_report.log ||\n        error \"list softrm by fid\"\n    grep \"$RH_ROOT/dir0/dir1/file1\" rh_report.log || error \"entry missing in report\"\n\n    # recover all deleted entries from dir2\n    local undeleted_files=( \"${files[@]:2}\" )\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -R \"$RH_ROOT/dir0/dir2\" | grep Restoring | cut -d \"'\" -f 2 > rh_report.log\n    diff <(sort rh_report.log) \\\n        <(printf '%s\\n' \"${undeleted_files[@]}\" | sort) ||\n        error \"list of undeleted file does not match the expected output\"\n\n    # query a single file by path\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -R \"$RH_ROOT/dir0/dir1/file2\" || error \"undelete by path\"\n    undeleted_files+=( \"$RH_ROOT/dir0/dir1/file2\" )\n\n    # query a single file by fid\n    $UNDELETE -f \"$RBH_CFG_DIR/$config_file\" -R \"$fid\" || error \"undelete by fid\"\n    undeleted_files+=( \"$RH_ROOT/dir0/dir1/file1\" )\n\n    for f in \"${undeleted_files[@]}\"; do\n        [ -f \"$f\"  ] || error \"Missing $f in FS after undelete\"\n    done\n\n    # check final size\n    local sz2=$(stat -c '%s' \"${undeleted_files[0]}\")\n    (( $sz1 == $sz2 )) || error \"final size $sz2 doesn't match $sz1\"\n\n    # Lustre/HSM specific checks\n    if (( $is_lhsm != 0 )); then\n        # files must be imported as 'released'\n        for f in \"${undeleted_files[@]}\"; do\n            $LFS hsm_state \"$f\" | grep released ||\n                error \"$f should be released\"\n        done\n\n        # check if restore command succeeds\n        for f in \"${undeleted_files[@]}\"; do\n            $LFS hsm_restore \"$f\" || error \"hsm_restore\"\n        done\n        wait_done 60 || error \"Restore timeout\"\n\n        # check final size\n        sz2=$(stat -c '%s' \"${undeleted_files[0]}\")\n        (( $sz1 == $sz2 )) || error \"final size $sz2 doesn't match $sz1\"\n\n        # files must be online now\n        for f in \"${undeleted_files[@]}\"; do\n            $LFS hsm_state \"$f\" | grep released && error \"$f should be online\"\n        done\n    fi\n}\n\nfunction purge_size_filesets\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tcount=$3\n\tpolicy_str=\"$4\"\n\n\tclean_logs\n\n\t# initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n    \tcheck_db_error rh_chglogs.log\n\n\t# fill 3 files of different sizes and mark them archived non-dirty\n\n\tj=1\n\tfor size in 0 1 10 200; do\n\t\techo \"1.$j-Writing files of size \" $(( $size*10 )) \"kB...\"\n\t\t((j=$j+1))\n\t\tfor i in `seq 1 $count`; do\n\t\t\tdd if=/dev/zero of=$RH_ROOT/file.$size.$i bs=10k count=$size >/dev/null 2>/dev/null || error \"writing file.$size.$i\"\n\n\t\t\tif (( $is_lhsm != 0 )); then\n\t\t\t\tflush_data\n\t\t\t\t$LFS hsm_archive $RH_ROOT/file.$size.$i || error \"lfs hsm_archive\"\n\t\t\t\twait_done 60 || error \"Copy timeout\"\n\t\t\tfi\n\t\tdone\n\tdone\n\n\tsleep 1\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs to update file status (after 1sec)...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\tif (( $is_hsmlite != 0 )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing $CMD --sync\"\n    fi\n\n\techo \"3-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\techo \"4-Applying purge policy ($policy_str)...\"\n\t# no purge expected here\n\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT -l DEBUG -L rh_purge.log --once || error \"\"\n\n\t# counting each matching policy $count of each\n\tfor policy in very_small mid_file default; do\n\t        nb_purge=`grep 'matching rule' rh_purge.log | grep $policy | wc -l`\n\t\tif (($nb_purge != $count)); then\n\t\t\terror \"********** TEST FAILED: $count release actions expected matching rule $policy, $nb_purge done\"\n\t\telse\n\t\t\techo \"OK: $nb_purge files released matching rule $policy\"\n\t\tfi\n\tdone\n\n\t# stop RH in background\n#\tkill %1\n}\n\nfunction test_maint_mode\n{\n\tconfig_file=$1\n\twindow=$2 \t\t# in seconds\n\tmigr_policy_delay=$3  \t# in seconds\n\tdelay_min=$4  \t\t# in seconds\n\tpolicy_str=\"$5\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"scanning filesystem\"\n\n\t# writing data\n\techo \"1-Writing files...\"\n\tfor i in `seq 1 4`; do\n\t\techo \"file.$i\" > $RH_ROOT/file.$i || error \"creating file $RH_ROOT/file.$i\"\n\tdone\n\tt0=`date +%s`\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"scanning filesystem\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"reading changelogs\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n    \t# migrate (nothing must be migrated, no maint mode reported)\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"executing --run=migration action\"\n\tgrep \"Maintenance time\" rh_migr.log && error \"No maintenance mode expected\"\n\tgrep \"Currently in maintenance mode\" rh_migr.log && error \"No maintenance mode expected\"\n\n\t# set maintenance mode (due is window +10s)\n\tmaint_time=`perl -e \"use POSIX; print strftime(\\\"%Y%m%d%H%M%S\\\" ,localtime($t0 + $window + 10))\"`\n\t$REPORT -f $RBH_CFG_DIR/$config_file --next-maintenance=$maint_time || error \"setting maintenance time\"\n\n\t# right now, migration window is in the future\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"executing --run=migration action\"\n\tgrep \"maintenance window will start in\" rh_migr.log || error \"Future maintenance not report in the log\"\n\n\t# sleep enough to be in the maintenance window\n\tsleep 11\n\n\t# split maintenance window in 4\n\t((delta=$window / 4))\n\t(( $delta == 0 )) && delta=1\n\n\tarch_done=0\n\n\t# start migrations while we do not reach maintenance time\n\twhile (( `date +%s` < $t0 + $window + 10 )); do\n\t\tcp /dev/null rh_migr.log\n\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"executing --run=migration action\"\n\t\tgrep \"Currently in maintenance mode\" rh_migr.log || error \"Should be in maintenance window now\"\n\n\t\t# check that files are migrated after min_delay and before the policy delay\n\t\tif grep \"$ARCH_STR\" rh_migr.log ; then\n\t\t\tarch_done=1\n\t\t\tnow=`date +%s`\n\t\t\t# delay_min must be elapsed\n\t\t\t(( $now >= $t0 + $delay_min )) || error \"file migrated before dealy min\"\n\t\t\t# migr_policy_delay must not been reached\n\t\t\t(( $now < $t0 + $migr_policy_delay )) || error \"file already reached policy delay\"\n\t\tfi\n\t\tsleep $delta\n\tdone\n\tcp /dev/null rh_migr.log\n\n\t(($arch_done == 1)) || error \"Files have not been migrated during maintenance window\"\n\n\t(( `date +%s` > $t0 + $window + 15 )) || sleep $(( $t0 + $window + 15 - `date +%s` ))\n\t# shouldn't be in maintenance now\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"executing --run=migration action\"\n\tgrep \"Maintenance time is in the past\" rh_migr.log || error \"Maintenance window should be in the past now\"\n}\n\n# test reporting function with path filter\nfunction test_rh_report\n{\n\tconfig_file=$1\n\tdircount=$2\n\tsleep_time=$3\n\tdescr_str=\"$4\"\n\n\tclean_logs\n\n\tfor i in `seq 1 $dircount`; do\n\t\tmkdir $RH_ROOT/dir.$i\n\t\techo \"1.$i-Writing files to $RH_ROOT/dir.$i...\"\n\t\t# write i MB to each directory\n\t\tfor j in `seq 1 $i`; do\n\t\t\tdd if=/dev/zero of=$RH_ROOT/dir.$i/file.$j bs=1M count=1 >/dev/null 2>/dev/null || error \"writing $RH_ROOT/dir.$i/file.$j\"\n\t\tdone\n\tdone\n\n\techo \"1bis. Wait for IO completion...\"\n\tsync\n\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n    \tcheck_db_error rh_chglogs.log\n\n\techo \"3.Checking reports...\"\n\tfor i in `seq 1 $dircount`; do\n\t    # posix FS do some block preallocation, so we don't know the exact space used:\n    \t# compare with 'du -b' instead.\n        if [ -n \"$POSIX_MODE\" ]; then\n\t\t    real=`du -b -c $RH_ROOT/dir.$i/* | grep total | awk '{print $1}'`\n    \t\t#real=`echo \"$real*512\" | bc -l`\n        else\n            real=$(($i*1024*1024))\n        fi\n\t\t$REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -U 1 -P \"$RH_ROOT/dir.$i/*\" > rh_report.log\n\t\tused=`tail -n 1 rh_report.log | cut -d \",\" -f 3`\n\t\tif (( $used != $real )); then\n\t\t\terror \": $used != $real\"\n\t\telse\n\t\t\techo \"OK: space used by files in $RH_ROOT/dir.$i is $real bytes\"\n        fi\n\tdone\n}\n\n#test report using accounting table\nfunction test_rh_acct_report\n{\n        config_file=$1\n        dircount=$2\n        # used in acct.conf\n        export ACCT_SWITCH=$3\n        descr_str=\"$4\"\n\n        clean_logs\n\n        for i in `seq 1 $dircount`; do\n                mkdir $RH_ROOT/dir.$i\n                echo \"1.$i-Writing files to $RH_ROOT/dir.$i...\"\n                # write i MB to each directory\n                for j in `seq 1 $i`; do\n                        dd if=/dev/zero of=$RH_ROOT/dir.$i/file.$j bs=1M count=1 >/dev/null 2>/dev/null || error \"$? writing $RH_ROOT/dir.$i/file.$j\"\n                done\n        done\n\n        echo \"1bis. Wait for IO completion...\"\n        sync\n\n        echo \"2-Scanning...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"scanning filesystem\"\n\tcheck_db_error rh_scan.log\n\n        echo \"3.Checking reports...\"\n        $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --force-no-acct --top-user > rh_no_acct_report.log\n        $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --top-user > rh_acct_report.log\n\n        nbrowacct=` awk -F ',' 'END {print NF}' rh_acct_report.log`;\n        nbrownoacct=` awk -F ',' 'END {print NF}' rh_no_acct_report.log`;\n        for i in `seq 1 $nbrowacct`; do\n                rowchecked=0;\n                for j in `seq 1 $nbrownoacct`; do\n                        if [[ `cut -d \",\" -f $i rh_acct_report.log` == `cut -d \",\" -f $j rh_no_acct_report.log`  ]]; then\n                                rowchecked=1\n                                break\n                        fi\n                done\n                if (( $rowchecked == 1 )); then\n                        echo \"Row `awk -F ',' 'NR == 1 {print $'$i';}' rh_acct_report.log | tr -d ' '` OK\"\n                else\n                        error \"Row `awk -F ',' 'NR == 1 {print $'$i';}' rh_acct_report.log | tr -d ' '` is different with acct \"\n                fi\n        done\n        rm -f rh_no_acct_report.log\n        rm -f rh_acct_report.log\n}\n\n#test --split-user-groups option\nfunction test_rh_report_split_user_group\n{\n        config_file=$1\n        dircount=$2\n        option=$3\n        descr_str=\"$4\"\n\n        clean_logs\n\n        for i in `seq 1 $dircount`; do\n                mkdir $RH_ROOT/dir.$i || error \"creating directory $RH_ROOT/dir.$i\"\n                echo \"1.$i-Writing files to $RH_ROOT/dir.$i...\"\n                # write i MB to each directory\n                for j in `seq 1 $i`; do\n                        dd if=/dev/zero of=$RH_ROOT/dir.$i/file.$j bs=1M count=1 >/dev/null 2>/dev/null || error \"writing $RH_ROOT/dir.$i/file.$j\"\n                done\n        done\n\n        echo \"1bis. Wait for IO completion...\"\n        sync\n\n        echo \"2-Scanning...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"scanning filesystem\"\n\tcheck_db_error rh_scan.log\n\n        echo \"3.Checking reports...\"\n        $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --user-info $option | head --lines=-2 > rh_report_no_split.log\n        $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --user-info --split-user-groups $option | head --lines=-2 > rh_report_split.log\n\n        nbrow=` awk -F ',' 'END {print NF}' rh_report_split.log`\n        nb_uniq_user=`sed \"1d\" rh_report_split.log | cut -d \",\" -f 1 | uniq | wc -l `\n        for i in `seq 1 $nb_uniq_user`; do\n                check=1\n                user=`sed \"1d\" rh_report_split.log | awk -F ',' '{print $1;}' | uniq | awk 'NR=='$i'{ print }'`\n                for j in `seq 1 $nbrow`; do\n                        curr_row=`sed \"1d\" rh_report_split.log | awk -F ',' 'NR==1 { print $'$j'; }' | tr -d ' '`\n                        curr_row_label=` awk -F ',' 'NR==1 { print $'$j'; }' rh_report_split.log | tr -d ' '`\n                        if [[ \"$curr_row\" =~ \"^[0-9]*$\" && \"$curr_row_label\" != \"avg_size\" ]]; then\n\t\t\t\tif [[ `grep -e \"dir\" rh_report_split.log` ]]; then\n\t\t\t\t\tsum_split_dir=`egrep -e \"^$user.*dir.*\" rh_report_split.log | awk -F ',' '{array[$1]+=$'$j'}END{for (name in array) {print array[name]}}'`\n\t\t\t\t\tsum_no_split_dir=`egrep -e \"^$user.*dir.*\" rh_report_no_split.log | awk -F ',' '{array[$1]+=$'$((j-1))'}END{for (name in array) {print array[name]}}'`\n\t\t\t\t\tsum_split_file=`egrep -e \"^$user.*file.*\" rh_report_split.log | awk -F ',' '{array[$1]+=$'$j'}END{for (name in array) {print array[name]}}'`\n\t\t\t\t\tsum_no_split_file=`egrep -e \"^$user.*file.*\" rh_report_no_split.log | awk -F ',' '{array[$1]+=$'$((j-1))'}END{for (name in array) {print array[name]}}'`\n                                        if (( $sum_split_dir != $sum_no_split_dir || $sum_split_file != $sum_no_split_file )); then\n\t\t\t\t\t\terror \"Unexpected value: dircount=$sum_split_dir/$sum_no_split_dir, filecount: $sum_split_file/$sum_no_split_file\"\n\t\t\t\t\t\techo \"Split report: \"\n\t\t\t\t\t\tcat rh_report_split.log\n\t\t\t\t\t\techo \"Summed report: \"\n\t\t\t\t\t\tcat rh_report_no_split.log\n                                                check=0\n                                        fi\n\t\t\t\telse\n                                        sum_split=`egrep -e \"^$user\" rh_report_split.log | awk -F ',' '{array[$1]+=$'$j'}END{for (name in array) {print array[name]}}'`\n                                        sum_no_split=`egrep -e \"^$user\" rh_report_no_split.log | awk -F ',' '{array[$1]+=$'$((j-1))'}END{for (name in array) {print array[name]}}'`\n\t\t\t\t\tif (( $sum_split != $sum_no_split )); then\n\t\t\t\t\t\terror \"Unexpected value: filecount: $sum_split/$sum_no_split\"\n\t\t\t\t\t\techo \"Split report: \"\n\t\t\t\t\t\tcat rh_report_split.log\n\t\t\t\t\t\techo \"Summed report: \"\n\t\t\t\t\t\tcat rh_report_no_split.log\n                                        \tcheck=0\n                                \tfi\n\t\t\t\tfi\n                        fi\n                done\n                if (( $check == 1 )); then\n                        echo \"Report for user $user: OK\"\n                else\n                        error \"Report for user $user is wrong\"\n                fi\n        done\n\n        rm -f rh_report_no_split.log\n        rm -f rh_report_split.log\n\n}\n\n#test acct table and triggers creation\nfunction test_acct_table\n{\n        config_file=$1\n        dircount=$2\n        # used in acct.conf\n        export ACCT_SWITCH=$3\n        descr_str=\"$4\"\n\n        clean_logs\n\n        for i in `seq 1 $dircount`; do\n\t        mkdir $RH_ROOT/dir.$i\n                echo \"1.$i-Writing files to $RH_ROOT/dir.$i...\"\n                # write i MB to each directory\n                for j in `seq 1 $i`; do\n                        dd if=/dev/zero of=$RH_ROOT/dir.$i/file.$j bs=1M count=1 >/dev/null 2>/dev/null || error \"writing $RH_ROOT/dir.$i/file.$j\"\n                done\n        done\n\n        echo \"1bis. Wait for IO completion...\"\n        sync\n\n        echo \"2-Scanning...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l VERB -L rh_scan.log  --once || error \"scanning filesystem\"\n\tcheck_db_error rh_scan.log\n\n        if [[ \"$ACCT_SWITCH\" != \"no\" ]]; then\n            echo \"3.Checking acct table and triggers creation\"\n            grep -q \"Table ACCT_STAT created successfully\" rh_scan.log && echo \"ACCT table creation: OK\" || error \"creating ACCT table\"\n            grep -q \"Trigger ACCT_ENTRY_INSERT created successfully\" rh_scan.log && echo \"ACCT_ENTRY_INSERT trigger creation: OK\" || error \"creating ACCT_ENTRY_INSERT trigger\"\n            grep -q \"Trigger ACCT_ENTRY_UPDATE created successfully\" rh_scan.log && echo \"ACCT_ENTRY_INSERT trigger creation: OK\" || error \"creating ACCT_ENTRY_UPDATE trigger\"\n            grep -q \"Trigger ACCT_ENTRY_DELETE created successfully\" rh_scan.log && echo \"ACCT_ENTRY_INSERT trigger creation: OK\" || error \"creating ACCT_ENTRY_DELETE trigger\"\n        else\n            echo \"3. Checking no ACCT table or trigger have been created\"\n            grep -q \"Table ACCT_STAT created successfully\" rh_scan.log && error \"table ACCT created\"\n            grep -q \"Trigger ACCT_ENTRY_INSERT created successfully\" rh_scan.log && error \"ACCT_ENTRY_INSERT trigger created\"\n            grep -q \"Trigger ACCT_ENTRY_UPDATE created successfully\" rh_scan.log && error \"ACCT_ENTRY_INSERT trigger created\"\n            grep -q \"Trigger ACCT_ENTRY_DELETE created successfully\" rh_scan.log && error \"ACCT_ENTRY_INSERT trigger created\"\n        fi\n}\n\n#test accounting with borderline cases (NULL fields, etc...)\nfunction test_acct_borderline\n{\n    config_file=$1\n    export ACCT_SWITCH=$2\n\n    :>rh.log\n\n    # create DB schema\n    $RH -f $RBH_CFG_DIR/$config_file --alter-db -L rh.log\n\n    # insert 2 records with NULL uid, gid, size...\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type) VALUES ('id1','file')\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type) VALUES ('id2','file')\" || error \"INSERT ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 2 ]] || error \"expected count: 2\"\n    [[ \"${line_values[4]}\" == 0 ]] || error \"expected size: 0\"\n    [[ \"${line_values[7]}\" == 2 ]] || error \"expected count for size0: 2\"\n    [[ \"${line_values[8]}\" == 0 ]] || error \"expected count for size1-31: 0\"\n\n    # change size of this record (to sz32)\n    mysql $RH_DB -e \"UPDATE ENTRIES SET size=123 WHERE id='id1'\" || error \"UPDATE ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 2 ]] || error \"expected count: 2\"\n    [[ \"${line_values[4]}\" == 123 ]] || error \"expected size: 123\"\n    [[ \"${line_values[7]}\" == 1 ]] || error \"expected count for size0: 1\"\n    [[ \"${line_values[9]}\" == 1 ]] || error \"expected count for size32-1K: 1\"\n\n    # change size of this record (to sz1M)\n    mysql $RH_DB -e \"UPDATE ENTRIES SET size=2000000 WHERE id='id1'\" || error \"UPDATE ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 2 ]] || error \"expected count: 2\"\n    [[ \"${line_values[4]}\" == 2000000 ]] || error \"expected size: 2000000\"\n    [[ \"${line_values[7]}\" == 1 ]] || error \"expected count for size0: 1\"\n    [[ \"${line_values[9]}\" == 0 ]] || error \"expected count for size32-1K: 0\"\n    [[ \"${line_values[12]}\" == 1 ]] || error \"expected count for size1M-32M: 1\"\n\n    # change record owner\n    mysql $RH_DB -e \"UPDATE ENTRIES SET uid='foo' WHERE id='id1'\" || error \"UPDATE ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    # only id2 remains with unknown uid/unknown gid\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 1 ]] || error \"expected count: 1\"\n    [[ \"${line_values[4]}\" == 0 ]] || error \"expected size: 0\"\n    [[ \"${line_values[7]}\" == 1 ]] || error \"expected count for size0: 1\"\n    [[ \"${line_values[9]}\" == 0 ]] || error \"expected count for size32-1K: 0\"\n    [[ \"${line_values[12]}\" == 0 ]] || error \"expected count for size1M-32M: 0\"\n\n    # only id1 is now foo/unknown gid\n    line_values=($(grep \" foo,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 1 ]] || error \"expected count: 1\"\n    [[ \"${line_values[4]}\" == 2000000 ]] || error \"expected size: 2000000\"\n    [[ \"${line_values[7]}\" == 0 ]] || error \"expected count for size0: 0\"\n    [[ \"${line_values[9]}\" == 0 ]] || error \"expected count for size32-1K: 0\"\n    [[ \"${line_values[12]}\" == 1 ]] || error \"expected count for size1M-32M: 1\"\n\n    # change record group\n    mysql $RH_DB -e \"UPDATE ENTRIES SET gid='bar' WHERE id='id1'\" || error \"UPDATE ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    # only id1 is now foo/bar\n    line_values=($(grep \" foo,        bar\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 1 ]] || error \"expected count: 1\"\n    [[ \"${line_values[4]}\" == 2000000 ]] || error \"expected size: 2000000\"\n    [[ \"${line_values[7]}\" == 0 ]] || error \"expected count for size0: 0\"\n    [[ \"${line_values[9]}\" == 0 ]] || error \"expected count for size32-1K: 0\"\n    [[ \"${line_values[12]}\" == 1 ]] || error \"expected count for size1M-32M: 1\"\n\n    # nothing remains as foo/unknown (no report line, or 0)\n    line_values=($(grep \" foo,    unknown\" rh_report.log | tr ',' ' '))\n    [ -z \"$line_values\" ] || [[ \"${line_values[3]}\" == 0 ]] || error \"no entries expected for foo/unknown (${line_values[3]})\"\n\n    # delete records\n    mysql $RH_DB -e \"DELETE FROM ENTRIES WHERE id='id1'\" || error \"DELETE ERROR\"\n    mysql $RH_DB -e \"DELETE FROM ENTRIES WHERE id='id2'\" || error \"DELETE ERROR\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    # nothing remains as foo/bar or unknown/unknown\n    line_values=($(grep \" foo,        bar\" rh_report.log | tr ',' ' '))\n    [ -z \"$line_values\" ] || [[ \"${line_values[3]}\" == 0 ]] || error \"no entries expected for foo/bar (${line_values[3]})\"\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [ -z \"$line_values\" ] || [[ \"${line_values[3]}\" == 0 ]] || error \"no entries expected for unknown/unknown (${line_values[3]})\"\n\n    # Add new records and check ACCT is correctly populated\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type) VALUES ('id3','file')\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type) VALUES ('id4','file')\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type, size) VALUES ('id5','file', 123)\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type, uid, gid, size) VALUES ('id6','file','foo','bar', 456)\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"INSERT INTO ENTRIES (id, type, uid, gid, size) VALUES ('id7','file','foo','bar', 123456)\" || error \"INSERT ERROR\"\n    mysql $RH_DB -e \"DROP TABLE ACCT_STAT\" || error \"DROP ERROR\"\n\n    :>rh.log\n\n    $RH -f $RBH_CFG_DIR/$config_file --alter-db -L rh.log\n    # check if ACCT_STAT has been populated\n    grep \"Populating accounting table\" rh.log || error \"ACCT_STAT should have been populated\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -u '*' -S --csv -q --szprof > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log && echo \"------\"\n\n    # check records\n    line_values=($(grep \"unknown,    unknown\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 3 ]] || error \"expected count: 3\"\n    [[ \"${line_values[4]}\" == 123 ]] || error \"expected size: 123\"\n    [[ \"${line_values[7]}\" == 2 ]] || error \"expected count for size0: 2\"\n    [[ \"${line_values[9]}\" == 1 ]] || error \"expected count for size32-1K: 1\"\n\n    line_values=($(grep \" foo,        bar\" rh_report.log | tr ',' ' '))\n    [[ \"${line_values[3]}\" == 2 ]] || error \"expected count: 2\"\n    [[ \"${line_values[4]}\" == 123912 ]] || error \"expected size: 123912\"\n    [[ \"${line_values[7]}\" == 0 ]] || error \"expected count for size0: 0\"\n    [[ \"${line_values[9]}\" == 1 ]] || error \"expected count for size32-1K: 1\"\n    [[ \"${line_values[11]}\" == 1 ]] || error \"expected count for size32K-1M: 1\"\n}\n\n#test dircount reports\nfunction test_dircount_report\n{\n\tconfig_file=$1\n\tdircount=$2\n\tdescr_str=\"$3\"\n\temptydir=5\n\n\tclean_logs\n\n\t# initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n\tcheck_db_error rh_chglogs.log\n\n\t# create several dirs with different entry count (+10 for each)\n\n    match_empty1=0\n    match_dir1=0\n\tfor i in `seq 1 $dircount`; do\n                mkdir $RH_ROOT/dir.$i\n                [[ $i == 1* ]] && ((match_dir1++))\n                echo \"1.$i-Creating files in $RH_ROOT/dir.$i...\"\n                # write i MB to each directory\n                for j in `seq 1 $((10*$i))`; do\n                        dd if=/dev/zero of=$RH_ROOT/dir.$i/file.$j bs=1 count=$i 2>/dev/null || error \"creating $RH_ROOT/dir.$i/file.$j\"\n                done\n        done\n\n    echo \"1bis. Creating empty directories...\"\n    # create 5 empty dirs\n    for i in `seq 1 $emptydir`; do\n        mkdir $RH_ROOT/empty.$i\n        [[ $i == 1* ]] && ((match_empty1++))\n    done\n\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading chglog\"\n\tfi\n\tcheck_db_error rh_chglogs.log\n\n\techo \"3.Checking dircount report...\"\n\t# dircount+1 because $RH_ROOT may be returned\n\t$REPORT -f $RBH_CFG_DIR/$config_file --topdirs=$((dircount+1)) --csv > report.out\n\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n\t# check that dircount is right for each dir\n\n\t# check if $RH_ROOT is in topdirs. If so, check its position\n\tis_root=0\n\tline=`grep \"$RH_ROOT,\" report.out`\n\t[[ -n $line ]] && is_root=1\n\tif (( ! $is_root )); then\n\t\tid=`stat -c \"%D/%i\" $RH_ROOT/. | tr '[:lower:]' '[:upper:]'`\n\t\tline=`grep \"$id,\" report.out`\n\t\t[[ -n $line ]] && is_root=1\n\tfi\n\tif (( $is_root )); then\n\t\troot_rank=`echo $line | cut -d ',' -f 1 | tr -d ' '`\n\t\techo \"FS root $RH_ROOT was returned in top dircount (rank=$root_rank)\"\n\tfi\n\tfor i in `seq 1 $dircount`; do\n\t\tline=`grep \"$RH_ROOT/dir.$i,\" report.out` || error \"$RH_ROOT/dir.$i not found in report\"\n\t\trank=`echo $line | cut -d ',' -f 1 | tr -d ' '`\n\t\tcount=`echo $line | cut -d ',' -f 3 | tr -d ' '`\n\t\tavg=`echo $line | cut -d ',' -f 4 | tr -d ' '`\n        [ \"$DEBUG\" = \"1\" ] && echo \"rank=$rank, count=$count, avg_sz=$avg\"\n\t\t# if expected_rank >= root_rank, shift expected rank\n\t\t(($is_root )) && (($rank >= $root_rank)) && rank=$rank-1\n\t\t(($rank == $(( 20 - $i +1 )) )) || error \"Invalid rank $rank for dir.$i\"\n\t\t(($count == $(( 10 * $i )) )) || error \"Invalid dircount $count for dir.$i\"\n\t\t(($avg == $i)) || error \"Invalid avg size $avg for dir.$i ($i expected)\"\n\tdone\n\n    echo \"3b. Checking topdirs + filterpath\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --topdirs=$((dircount+1)) --filter-path=\"$RH_ROOT/dir.1\" --csv -q > report.out\n    [ \"$DEBUG\" = \"1\" ] && echo && cat report.out\n    # only one line expected\n    lines=$(wc -l report.out | awk '{print $1}')\n    (( $lines == 1 )) || error \"1 single dir expected in output (found $lines)\"\n    line=`grep \"$RH_ROOT/dir.1,\" report.out` || error \"$RH_ROOT/dir.1 not found in report\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --topdirs=$((dircount+1)) --filter-path=\"$RH_ROOT/dir.1*\" --csv -q > report.out\n    [ \"$DEBUG\" = \"1\" ] && echo && cat report.out\n    lines=$(wc -l report.out | awk '{print $1}')\n    (( $lines == $match_dir1 )) || error \"$match_dir1 expected in output (found $lines)\"\n\n    echo \"4. Check empty dirs...\"\n    # check empty dirs\n    $REPORT -f $RBH_CFG_DIR/$config_file --oldest-empty-dirs --csv > report.out\n    [ \"$DEBUG\" = \"1\" ] && echo && cat report.out\n    for i in `seq 1 $emptydir`; do\n        grep \"$RH_ROOT/empty.$i\" report.out > /dev/null || error \"$RH_ROOT/empty.$i not found in empty dirs\"\n    done\n\n    # test with filterpath\n    $REPORT -f $RBH_CFG_DIR/$config_file --oldest-empty-dirs --csv -q --filter-path=\"$RH_ROOT/empty.1\" > report.out\n    [ \"$DEBUG\" = \"1\" ] && echo && cat report.out\n    # only one line expected\n    lines=$(wc -l report.out | awk '{print $1}')\n    (( $lines == 1 )) || error \"1 single dir expected in output (found $lines)\"\n    line=`grep \"$RH_ROOT/empty.1,\" report.out` || error \"$RH_ROOT/empty.1 not found in report\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --oldest-empty-dirs --csv -q --filter-path=\"$RH_ROOT/empty.1*\" > report.out\n    [ \"$DEBUG\" = \"1\" ] && echo && cat report.out\n    lines=$(wc -l report.out | awk '{print $1}')\n    (( $lines == $match_empty1 )) || error \"$match_empty1 expected in output (found $lines)\"\n\n    [ \"$DEBUG\" = \"1\" ] || rm -f report.out\n}\n\n# test report options: avg_size, by-count, count-min and reverse\nfunction    test_sort_report\n{\n    config_file=$1\n    dummy=$2\n    descr_str=\"$3\"\n\n    clean_logs\n\n    # get 3 different users (from /etc/passwd)\n    if [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n        users=( $(head -n 3 /etc/passwd | cut -d ':' -f 3) )\n    else\n        users=( $(head -n 3 /etc/passwd | cut -d ':' -f 1) )\n    fi\n\n    echo \"1-Populating filesystem with test files...\"\n\n    # populate the filesystem with data of these users\n    for i in `seq 0 2`; do\n        u=${users[$i]}\n        mkdir $RH_ROOT/dir.$u || error \"creating directory  $RH_ROOT/dir.$u\"\n        if (( $i == 0 )); then\n            # first user:  20 files of size 1k to 20k\n            for f in `seq 1 20`; do\n                dd if=/dev/zero of=$RH_ROOT/dir.$u/file.$f bs=1k count=$f 2>/dev/null || error \"writing $f KB to $RH_ROOT/dir.$u/file.$f\"\n            done\n        elif (( $i == 1 )); then\n            # second user: 10 files of size 10k to 100k\n            for f in `seq 1 10`; do\n                dd if=/dev/zero of=$RH_ROOT/dir.$u/file.$f bs=10k count=$f 2>/dev/null || error \"writing $f x10 KB to $RH_ROOT/dir.$u/file.$f\"\n            done\n        else\n            # 3rd user:    5 files of size 100k to 500k\n            for f in `seq 1 5`; do\n                dd if=/dev/zero of=$RH_ROOT/dir.$u/file.$f bs=100k count=$f 2>/dev/null || error \"writing $f x100 KB to $RH_ROOT/dir.$u/file.$f\"\n            done\n        fi\n        chown -R $u $RH_ROOT/dir.$u || error \"changing owner of $RH_ROOT/dir.$u\"\n    done\n\n    # flush data to OSTs\n    sync\n\n    # scan!\n    echo \"2-Scanning...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l VERB -L rh_scan.log  --once || error \"scanning filesystem\"\n    check_db_error rh_scan.log\n\n    echo \"3-checking reports...\"\n\n    # sort users by volume\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user > report.out || error \"generating topuser report by volume\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[2]} ] || error \"first user expected in top volume: ${users[2]} (got $first)\"\n    [ $last = ${users[0]} ] || error \"last user expected in top volume: ${users[0]} (got $last)\"\n\n    # sort users by volume (reverse)\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --reverse > report.out || error \"generating topuser report by volume (reverse)\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[0]} ] || error \"first user expected in top volume: ${users[0]} (got $first)\"\n    [ $last = ${users[2]} ] || error \"last user expected in top volume: ${users[2]} (got $last)\"\n\n    # sort users by count\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --by-count > report.out || error \"generating topuser report by count\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[0]} ] || error \"first user expected in top count: ${users[0]} (got $first)\"\n    [ $last = ${users[2]} ] || error \"last user expected in top count: ${users[2]} (got $last)\"\n\n    # sort users by count (reverse)\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --by-count --reverse > report.out || error \"generating topuser report by count (reverse)\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[2]} ] || error \"first user expected in top count: ${users[2]} (got $first)\"\n    [ $last = ${users[0]} ] || error \"last user expected in top count: ${users[0]} (got $last)\"\n\n    # sort users by avg size\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --by-avgsize > report.out || error \"generating topuser report by avg size\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[2]} ] || error \"first user expected in top avg size: ${users[2]} (got $first)\"\n    [ $last = ${users[0]} ] || error \"last user expected in top avg size: ${users[0]} (got $last)\"\n\n    # sort users by avg size (reverse)\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --by-avgsize --reverse > report.out || error \"generating topuser report by avg size (reverse)\"\n    first=$(head -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    last=$(tail -n 1 report.out | cut -d ',' -f 2 | tr -d ' ')\n    [ $first = ${users[0]} ] || error \"first user expected in top avg size: ${users[0]} (got $first)\"\n    [ $last = ${users[2]} ] || error \"last user expected in top avg size: ${users[2]} (got $last)\"\n\n    # filter users by min count\n    # only user 0 and 1 have 10 entries or more\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -q --top-user --count-min=10 > report.out || error \"generating topuser with at least 10 entries\"\n    (( $(wc -l report.out | awk '{print$1}') == 2 )) || error \"only 2 users expected with more than 10 entries\"\n    egrep \"^\\s+[0-9]+,\\s+${users[2]},\" report.out && error \"${users[2]} is not expected to have more than 10 entries\"\n\n    rm -f report.out\n}\n\nfunction count_action_params # log, pattern\n{\n    grep action_params $1 | grep \"$2\" | wc -l\n}\n\nfunction path_test\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"hsm test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\tif (( $no_log == 0 )); then\n\t\techo \"Initial scan of empty filesystem\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\n\t# create test tree\n\tmkdir -p $RH_ROOT/dir1\n\tmkdir -p $RH_ROOT/dir1/subdir1\n\tmkdir -p $RH_ROOT/dir1/subdir2\n\tmkdir -p $RH_ROOT/dir1/subdir3/subdir4\n\t# 2 matching files for fileclass absolute_path\n\techo \"data\" > $RH_ROOT/dir1/subdir1/A\n\techo \"data\" > $RH_ROOT/dir1/subdir2/A\n\t# 2 unmatching\n\techo \"data\" > $RH_ROOT/dir1/A\n\techo \"data\" > $RH_ROOT/dir1/subdir3/subdir4/A\n\n\tmkdir -p $RH_ROOT/dir2\n\tmkdir -p $RH_ROOT/dir2/subdir1\n\t# 2 matching files for fileclass absolute_tree\n\techo \"data\" > $RH_ROOT/dir2/X\n\techo \"data\" > $RH_ROOT/dir2/subdir1/X\n\n\tmkdir -p $RH_ROOT/one_dir/dir3\n\tmkdir -p $RH_ROOT/other_dir/dir3\n\tmkdir -p $RH_ROOT/yetanother_dir\n\tmkdir -p $RH_ROOT/dir3\n\tmkdir -p $RH_ROOT/one_dir/one_dir/dir3\n\t# 2 matching files for fileclass path_depth2\n\techo \"data\" > $RH_ROOT/one_dir/dir3/X\n\techo \"data\" > $RH_ROOT/other_dir/dir3/Y\n\t# 2 unmatching files for fileclass path_depth2\n\techo \"data\" > $RH_ROOT/dir3/X\n\techo \"data\" > $RH_ROOT/one_dir/one_dir/dir3/X\n\n\tmkdir -p $RH_ROOT/one_dir/dir4/subdir1\n\tmkdir -p $RH_ROOT/other_dir/dir4/subdir1\n\tmkdir -p $RH_ROOT/dir4\n\tmkdir -p $RH_ROOT/one_dir/one_dir/dir4\n\t# 3 matching files for fileclass tree_depth2\n\techo \"data\" > $RH_ROOT/one_dir/dir4/subdir1/X\n\techo \"data\" > $RH_ROOT/other_dir/dir4/subdir1/X\n    echo \"data\" > $RH_ROOT/yetanother_dir/dir4 # tree root should match too!\n\t# unmatching files for fileclass tree_depth2\n\techo \"data\" > $RH_ROOT/dir4/X\n\techo \"data\" > $RH_ROOT/one_dir/one_dir/dir4/X\n\n\tmkdir -p $RH_ROOT/dir5\n\tmkdir -p $RH_ROOT/subdir/dir5\n\t# 2 matching files for fileclass relative_path\n\techo \"data\" > $RH_ROOT/dir5/A\n\techo \"data\" > $RH_ROOT/dir5/B\n\t# 2 unmatching files for fileclass relative_path\n\techo \"data\" > $RH_ROOT/subdir/dir5/A\n\techo \"data\" > $RH_ROOT/subdir/dir5/B\n\n\tmkdir -p $RH_ROOT/dir6/subdir\n\tmkdir -p $RH_ROOT/subdir/dir6\n\t# 3 matching files for fileclass relative_tree\n\techo \"data\" > $RH_ROOT/dir6/A\n\techo \"data\" > $RH_ROOT/dir6/subdir/A\n    echo \"data\" > $RH_ROOT/file.6 # tree root should match too!\n\t# 2 unmatching files for fileclass relative_tree\n\techo \"data\" > $RH_ROOT/subdir/dir6/A\n\techo \"data\" > $RH_ROOT/subdir/dir6/B\n\n\n\tmkdir -p $RH_ROOT/dir7/subdir\n\tmkdir -p $RH_ROOT/dir71/subdir\n\tmkdir -p $RH_ROOT/subdir/subdir/dir7\n\tmkdir -p $RH_ROOT/subdir/subdir/dir72\n\t# 3 matching files for fileclass any_root_tree\n\techo \"data\" > $RH_ROOT/dir7/subdir/file\n\techo \"data\" > $RH_ROOT/subdir/subdir/dir7/file\n    echo \"data\" > $RH_ROOT/yetanother_dir/dir7 # tree root should match too!\n\t# 2 unmatching files for fileclass any_root_tree\n\techo \"data\" > $RH_ROOT/dir71/subdir/file\n\techo \"data\" > $RH_ROOT/subdir/subdir/dir72/file\n\n\tmkdir -p $RH_ROOT/dir8\n\tmkdir -p $RH_ROOT/dir81/subdir\n\tmkdir -p $RH_ROOT/subdir/subdir/dir8\n\t# 2 matching files for fileclass any_root_path\n\techo \"data\" > $RH_ROOT/dir8/file.1\n\techo \"data\" > $RH_ROOT/subdir/subdir/dir8/file.1\n\t# 3 unmatching files for fileclass any_root_path\n\techo \"data\" > $RH_ROOT/dir8/file.2\n\techo \"data\" > $RH_ROOT/dir81/file.1\n\techo \"data\" > $RH_ROOT/subdir/subdir/dir8/file.2\n\n\tmkdir -p $RH_ROOT/dir9/subdir/dir10/subdir\n\tmkdir -p $RH_ROOT/dir9/subdir/dir10x/subdir\n\tmkdir -p $RH_ROOT/dir91/subdir/dir10\n\t# 3 matching files for fileclass any_level_tree\n\techo \"data\" > $RH_ROOT/dir9/subdir/dir10/file\n\techo \"data\" > $RH_ROOT/dir9/subdir/dir10/subdir/file\n\techo \"data\" > $RH_ROOT/dir9/subdir/dir10x/dir10  # tree root should match too!\n\t# 2 unmatching files for fileclass any_level_tree\n\techo \"data\" > $RH_ROOT/dir9/subdir/dir10x/subdir/file\n\techo \"data\" > $RH_ROOT/dir91/subdir/dir10/file\n\n\tmkdir -p $RH_ROOT/dir11/subdir/subdir\n\tmkdir -p $RH_ROOT/dir11x/subdir\n\t# 2 matching files for fileclass any_level_path\n\techo \"data\" > $RH_ROOT/dir11/subdir/file\n\techo \"data\" > $RH_ROOT/dir11/subdir/subdir/file\n\t# 2 unmatching files for fileclass any_level_path\n\techo \"data\" > $RH_ROOT/dir11/subdir/file.x\n\techo \"data\" > $RH_ROOT/dir11x/subdir/file\n\n\n\techo \"1bis-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\tcheck_db_error rh_chglogs.log\n\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# start a migration files should not be migrated this time\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l DEBUG -L rh_migr.log   || error \"\"\n\n\t# count the number of file for each policy\n\tnb_pol1=$(count_action_params rh_migr.log class=absolute_path)\n\tnb_pol2=$(count_action_params rh_migr.log  class=absolute_tree)\n\tnb_pol3=$(count_action_params rh_migr.log  class=path_depth2)\n\tnb_pol4=$(count_action_params rh_migr.log  class=tree_depth2)\n\tnb_pol5=$(count_action_params rh_migr.log  class=relative_path)\n\tnb_pol6=$(count_action_params rh_migr.log  class=relative_tree)\n\n\tnb_pol7=$(count_action_params rh_migr.log  class=any_root_tree)\n\tnb_pol8=$(count_action_params rh_migr.log  class=any_root_path)\n\tnb_pol9=$(count_action_params rh_migr.log  class=any_level_tree)\n\tnb_pol10=$(count_action_params rh_migr.log  class=any_level_path)\n\n\tnb_unmatch=$(count_action_params rh_migr.log  class=unmatch)\n\n\t(( $nb_pol1 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'absolute_path': $nb_pol1\"\n\t(( $nb_pol2 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'absolute_tree': $nb_pol2\"\n\t(( $nb_pol3 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'path_depth2': $nb_pol3\"\n\t(( $nb_pol4 == 3 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'tree_depth2': $nb_pol4\"\n\t(( $nb_pol5 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'relative_path': $nb_pol5\"\n\t(( $nb_pol6 == 3 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'relative_tree': $nb_pol6\"\n\n\t(( $nb_pol7 == 3 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'any_root_tree': $nb_pol7\"\n\t(( $nb_pol8 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'any_root_path': $nb_pol8\"\n\t(( $nb_pol9 == 3 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'any_level_tree': $nb_pol9\"\n\t(( $nb_pol10 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for policy 'any_level_tree': $nb_pol10\"\n\t(( $nb_unmatch == 19 )) || error \"********** TEST FAILED: wrong count of unmatching files: $nb_unmatch\"\n\n\t(( $nb_pol1 == 2 )) && (( $nb_pol2 == 2 )) && (( $nb_pol3 == 2 )) && (( $nb_pol4 == 3 )) \\\n        \t&& (( $nb_pol5 == 2 )) && (( $nb_pol6 == 3 )) && (( $nb_pol7 == 3 )) \\\n\t\t&& (( $nb_pol8 == 2 )) && (( $nb_pol9 == 3 )) && (( $nb_pol10 == 2 )) \\\n\t\t&& (( $nb_unmatch == 19 )) \\\n\t\t&& echo \"OK: test successful\"\n}\n\n\n\nfunction update_test\n{\n    config_file=$1\n    event_updt_min=$2\n    update_period=$3\n    policy_str=\"$4\"\n\n    init=`date \"+%s\"`\n\n    LOG=rh_chglogs.log\n\n    if (( $no_log )); then\n        echo \"changelog disabled: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    for i in `seq 1 3`; do\n        # force emptying the log\n        $LFS changelog_clear lustre-MDT0000 cl1 0\n\n        t=$(( `date \"+%s\"` - $init ))\n        echo \"loop 1.$i: many 'touch' within $event_updt_min sec (t=$t)\"\n        clean_logs\n\n        # start log reader (DEBUG level displays needed attrs)\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L $LOG --detach \\\n            --pid-file=rh.pid 2>/dev/null || error \"\"\n\n        start=`date \"+%s\"`\n        # generate a lot of MTIME events within 'event_updt_min'\n        # => must only update once\n        while (( `date \"+%s\"` - $start < $event_updt_min - 2 )); do\n            touch $RH_ROOT/file\n            sleep 0.01\n        done\n\n        # force flushing log\n        sleep 1\n        pkill $PROC\n        sleep 1\n\n        # make sure all remaining records are read\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L $LOG \\\n            2>/dev/null || error \"\"\n\n        t=$(( `date \"+%s\"` - $init ))\n\n        nb_getattr=`grep getattr=1 $LOG | wc -l`\n        egrep -e \"getattr=1|needed because\" $LOG\n        echo \"nb attr update: $nb_getattr\"\n\n        expect_attr=1\n        (( $shook != 0 && $i == 1 )) && expect_attr=4 # .shook dir, .shook/restripe dir, .shook/locks dir\n\n        (( $nb_getattr == $expect_attr )) || error \"********** TEST FAILED: wrong count of getattr: $nb_getattr (t=$t), expected=$expect_attr\"\n        # the path may be retrieved at the first loop (at creation)\n        # but not during the next loop (as long as elapsed time < update_period)\n        if (( $i > 1 )) && (( `date \"+%s\"` - $init < $update_period )); then\n            nb_getpath=`grep getpath=1 $LOG | wc -l`\n            grep \"getpath=1\" $LOG\n            echo \"nb path update: $nb_getpath\"\n            (( $nb_getpath == 0 )) || error \"********** TEST FAILED: wrong count of getpath: $nb_getpath (t=$t), expected=0\"\n        fi\n\n        # wait for 5s to be fully elapsed\n        while (( `date \"+%s\"` - $start <= $event_updt_min )); do\n            sleep 0.1\n        done\n    done\n\n    init=`date \"+%s\"`\n\n    for i in `seq 1 3`; do\n        # force emptying the log\n        $LFS changelog_clear lustre-MDT0000 cl1 0\n\n        echo \"loop 2.$i: many 'rename' within $event_updt_min sec\"\n        clean_logs\n\n        # start log reader (DEBUG level displays needed attrs)\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L $LOG \\\n            --detach --pid-file=rh.pid 2>/dev/null || error \"\"\n\n        start=`date \"+%s\"`\n        # generate a lot of TIME events within 'event_updt_min'\n        # => must only update once\n        while (( `date \"+%s\"` - $start < $event_updt_min - 2 )); do\n            mv $RH_ROOT/file $RH_ROOT/file.2\n            sleep 0.01\n            mv $RH_ROOT/file.2 $RH_ROOT/file\n            sleep 0.01\n        done\n\n        # force flushing log\n        sleep 1\n        pkill $PROC\n        sleep 1\n\n        # make sure all remaining records are read\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L $LOG \\\n            2>/dev/null || error \"\"\n\n        nb_getpath=`grep getpath=1 $LOG | wc -l`\n        echo \"nb path update: $nb_getpath\"\n        # no getpath expected as rename records provide name info\n        (( $nb_getpath == 0 )) || error \"********** TEST FAILED: wrong count of getpath: $nb_getpath\"\n\n        # attributes may be retrieved at the first loop (at creation)\n        # but not during the next loop (as long as elapsed time < update_period)\n        if (( $i > 1 )) && (( `date \"+%s\"` - $init < $update_period )); then\n            nb_getattr=`grep getattr=1 $LOG | wc -l`\n            echo \"nb attr update: $nb_getattr\"\n            (( $nb_getattr == 0 )) || error \"********** TEST FAILED: wrong count of getattr: $nb_getattr\"\n        fi\n    done\n\n    # force emptying the log\n    $LFS changelog_clear lustre-MDT0000 cl1 0\n\n    echo \"Waiting $update_period seconds...\"\n    clean_logs\n\n    # check that getattr+getpath are performed after update_period, even if the event is not related:\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L $LOG --detach \\\n        --pid-file=rh.pid 2>/dev/null || error \"\"\n    sleep $update_period\n\n    if (( $is_lhsm != 0 )); then\n        # chg something different that path or POSIX attributes\n        $LFS hsm_set --noarchive $RH_ROOT/file\n    else\n        touch $RH_ROOT/file\n    fi\n\n    # force flushing log\n    sleep 1\n    pkill $PROC\n    sleep 1\n\n    nb_getattr=`grep getattr=1 $LOG | wc -l`\n    echo \"nb attr update: $nb_getattr\"\n    (( $nb_getattr == 1 )) || error \"********** TEST FAILED: wrong count of getattr: $nb_getattr\"\n    nb_getpath=`grep getpath=1 $LOG | wc -l`\n    echo \"nb path update: $nb_getpath\"\n    (( $nb_getpath == 1 )) || error \"********** TEST FAILED: wrong count of getpath: $nb_getpath\"\n\n    if (( $is_lhsm != 0 )); then\n        # also check that the status is to be retrieved\n        nb_getstatus=`grep \"getstatus(lhsm)\" $LOG | wc -l`\n        echo \"nb status update: $nb_getstatus\"\n        (( $nb_getstatus == 1 )) || error \"********** TEST FAILED: wrong count of getstatus: $nb_getstatus\"\n    fi\n\n    # kill remaining event handler\n    sleep 1\n    pkill -9 $PROC\n}\n\nfunction periodic_class_match_migr\n{\n\tconfig_file=$1\n\tupdate_period=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\t#create test tree\n\ttouch $RH_ROOT/ignore1\n\ttouch $RH_ROOT/whitelist1\n\ttouch $RH_ROOT/migrate1\n\ttouch $RH_ROOT/default1\n\n\t# scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n\tcheck_db_error rh_chglogs.log\n\n\t# now apply policies\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all --dry-run -l FULL -L rh_migr.log  || error \"\"\n\n\t#we must have 4 lines like this: Entry xxx matches target file class\n\tnb_updt=`grep \"matches target file class\" rh_migr.log | wc -l`\n\tnb_whitelist=`grep \"matches ignored target\" rh_migr.log | wc -l`\n\tnb_migr_match=`grep \"matches the condition for policy rule 'migr_match'\" rh_migr.log | wc -l`\n\tnb_default=`grep \"matches the condition for policy rule 'default'\" rh_migr.log | wc -l`\n\n\t(( $nb_updt == 1 )) || error \"********** TEST FAILED: wrong count of matched fileclasses: $nb_updt/1\"\n\t(( $nb_whitelist == 2 )) || error \"********** TEST FAILED: wrong count of ignored entries : $nb_whitelist/2\"\n\t(( $nb_migr_match == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'migr_match': $nb_migr_match\"\n\t(( $nb_default == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'default': $nb_default\"\n\n        (( $nb_updt == 1 )) && (( $nb_whitelist == 2 ))  && (( $nb_migr_match == 1 )) && (( $nb_default == 1 )) \\\n\t\t&& echo \"OK: fileclass matching successful\"\n\n\techo \"Waiting $update_period sec...\"\n\tsleep $update_period\n\n\t# rematch entries: should update all fileclasses\n\tclean_logs\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all --dry-run -l FULL -L rh_migr.log  || error \"\"\n\n\tnb_updt=`grep \"matches target file class\" rh_migr.log | wc -l`\n\tnb_whitelist=`grep \"matches ignored target\" rh_migr.log | wc -l`\n\n\t(( $nb_updt == 1 )) || error \"********** TEST FAILED: wrong count of matched fileclasses: $nb_updt/1\"\n\t(( $nb_whitelist == 2 )) || error \"********** TEST FAILED: wrong count of ignored entries : $nb_whitelist/2\"\n\n    (( $nb_updt == 1 )) && (( $nb_whitelist == 2 )) && echo \"OK: all fileclasses updated\"\n}\n\nfunction policy_check_migr\n{\n    # check that migr fileclasses are properly matched at scan time,\n    # then at application time\n\tconfig_file=$1\n\tupdate_period=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\t#create test tree\n\ttouch $RH_ROOT/ignore1\n\ttouch $RH_ROOT/whitelist1\n\ttouch $RH_ROOT/migrate1\n\ttouch $RH_ROOT/default1\n\n    echo \"1. scan...\"\n\t# scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log || error \"scanning\"\n\tcheck_db_error rh_chglogs.log\n    # check that all files have been properly matched\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    st1=`grep ignore1 report.out | cut -d ',' -f 5 | tr -d ' '`\n    st2=`grep whitelist1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n    st3=`grep migrate1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n    st4=`grep default1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n\n    [ \"$st1\" = \"to_be_ignored\" ] || error \"file should be in class 'to_be_ignored'\"\n    [ \"$st2\" = \"\" ] || error \"file should not match a class\"\n    [ \"$st3\" = \"to_be_migr\" ] || error \"file should be in class 'to_be_migr'\"\n    [ \"$st4\" = \"\" ] || error \"file should not match a class\"\n\n    echo \"2. migrate...\"\n\n\t# now apply policies\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all --dry-run -l FULL -L rh_migr.log  || error \"running migration\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    st1=`grep ignore1 report.out | cut -d ',' -f 5 | tr -d ' '`\n    st2=`grep whitelist1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n    st3=`grep migrate1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n    st4=`grep default1 report.out  | cut -d ',' -f 5 | tr -d ' '`\n\n    [ \"$st1\" = \"to_be_ignored\" ] || error \"file should be in class 'to_be_ignored'\"\n    [ \"$st2\" = \"\" ] || error \"file should not match a class\"\n    [ \"$st3\" = \"to_be_migr\" ] || error \"file should be in class 'to_be_migr'\"\n    [ \"$st4\" = \"\" ] || error \"file should not match a class\"\n\n\t#we must have 4 lines like this: \"Need to update fileclass (not set)\"\n\tnb_migr_match=`grep \"matches the condition for policy rule 'migr_match'\" rh_migr.log | wc -l`\n\tnb_default=`grep \"matches the condition for policy rule 'default'\" rh_migr.log | wc -l`\n\n\t(( $nb_migr_match == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'migr_match': $nb_migr_match\"\n\t(( $nb_default == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'default': $nb_default\"\n\n    (( $nb_migr_match == 1 )) && (( $nb_default == 1 )) \\\n\t\t&& echo \"OK: initial fileclass matching successful\"\n\n\t# rematch entries\n\tclean_logs\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all --dry-run -l FULL -L rh_migr.log  || error \"running $RH --run=migration\"\n\n    # check effectively migrated files\n    m1_arch=`grep \"$ARCH_STR\" rh_migr.log | grep migrate1 | wc -l`\n    d1_arch=`grep \"$ARCH_STR\" rh_migr.log | grep default1 | wc -l`\n    w1_arch=`grep \"$ARCH_STR\" rh_migr.log | grep whitelist1 | wc -l`\n    i1_arch=`grep \"$ARCH_STR\" rh_migr.log | grep ignore1 | wc -l`\n\n    (( $w1_arch == 0 )) || error \"whitelist1 should not have been migrated\"\n    (( $i1_arch == 0 )) || error \"ignore1 should not have been migrated\"\n    (( $m1_arch == 1 )) || error \"migrate1 should have been migrated\"\n    (( $d1_arch == 1 )) || error \"default1 should have been migrated\"\n\n    (( $w1_arch == 0 )) && (( $i1_arch == 0 )) && (( $m1_arch == 1 )) \\\n    && (( $d1_arch == 1 )) && echo \"OK: All expected files migrated\"\n\n    rm -f report.out\n}\n\nfunction policy_check_purge\n{\n    # check that purge fileclasses are properly matched at scan time,\n    # then at application time\n    config_file=$1\n    update_period=$2\n    policy_str=\"$3\"\n\n    stf=5\n\n    clean_logs\n\n    #create test tree\n    touch $RH_ROOT/ignore1\n    touch $RH_ROOT/whitelist1\n    touch $RH_ROOT/purge1\n    touch $RH_ROOT/default1\n\n    echo \"1. scan...\"\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log || error \"scanning\"\n    check_db_error rh_chglogs.log\n    # check that all files have been properly matched\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    st1=`grep ignore1 report.out | cut -d ',' -f $stf | tr -d ' '`\n    st2=`grep whitelist1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n    st3=`grep purge1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n    st4=`grep default1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n\n    [ \"$st1\" = \"to_be_ignored\" ] || error \"file should be in class 'to_be_ignored'\"\n    [ \"$st2\" = \"\" ] || error \"file should not match a fileclass\"\n    [ \"$st3\" = \"to_be_released\" ] || error \"file should be in class 'to_be_released'\"\n    [ \"$st4\" = \"\" ] || error \"file should not match a fileclass\"\n\n    if (( $is_lhsm + $is_hsmlite > 0 )); then\n        echo \"1bis. migrate...\"\n\n        # now apply policies\n        if (( $is_lhsm != 0 )); then\n                flush_data\n                $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"flushing data to backend\"\n\n                echo \"1ter. Waiting for end of data migration...\"\n                wait_done 120 || error \"Migration timeout\"\n        echo \"update db content...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading chglog\"\n\n        elif (( $is_hsmlite != 0 )); then\n                $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"flushing data to backend\"\n        fi\n\n        # check that release class is still correct\n        $REPORT -f $RBH_CFG_DIR/$config_file --dump -q  > report.out\n        [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n        st1=`grep ignore1 report.out | cut -d ',' -f $stf | tr -d ' '`\n        st2=`grep whitelist1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n        st3=`grep purge1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n        st4=`grep default1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n\n        [ \"$st1\" = \"to_be_ignored\" ] || error \"file should be in class 'to_be_ignored'\"\n        [ \"$st2\" = \"\" ] || error \"file should not match a fileclass\"\n        [ \"$st3\" = \"to_be_released\" ] || error \"file should be in class 'to_be_released'\"\n        [ \"$st4\" = \"\" ] || error \"file should not match a fileclass\"\n    fi\n    sleep 1\n    echo \"2. purge/release...\"\n\n    # now apply policies\n    $RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT -l FULL -L rh_purge.log --once || error \"running purge\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    st1=`grep ignore1 report.out | cut -d ',' -f $stf | tr -d ' '`\n    st2=`grep whitelist1 report.out  | cut -d ',' -f $stf | tr -d ' '`\n\n    [ \"$st1\" = \"to_be_ignored\" ] || error \"file should be in class 'to_be_ignored'\"\n    [ \"$st2\" = \"\" ] || error \"file should not match a fileclass: $st2\"\n\n    #we must have 2 lines like this: \"Need to update fileclass (not set)\"\n    nb_purge_match=`grep \"matches the condition for policy rule 'purge_match'\" rh_purge.log | wc -l`\n    nb_default=`grep \"matches the condition for policy rule 'default'\" rh_purge.log | wc -l`\n\n    (( $nb_purge_match == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'purge_match': $nb_purge_match\"\n    (( $nb_default == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'default': $nb_default\"\n\n    (( $nb_purge_match == 1 )) && (( $nb_default == 1 )) \\\n        && echo \"OK: initial fileclass matching successful\"\n\n    # check effectively purged files\n    p1_arch=`grep \"$REL_STR\" rh_purge.log | grep purge1 | wc -l`\n    d1_arch=`grep \"$REL_STR\" rh_purge.log | grep default1 | wc -l`\n    w1_arch=`grep \"$REL_STR\" rh_purge.log | grep whitelist1 | wc -l`\n    i1_arch=`grep \"$REL_STR\" rh_purge.log | grep ignore1 | wc -l`\n\n    (( $w1_arch == 0 )) || error \"whitelist1 should not have been purged\"\n    (( $i1_arch == 0 )) || error \"ignore1 should not have been purged\"\n    (( $p1_arch == 1 )) || error \"purge1 should have been purged\"\n    (( $d1_arch == 1 )) || error \"default1 should have been purged\"\n\n    (( $w1_arch == 0 )) && (( $i1_arch == 0 )) && (( $p1_arch == 1 )) \\\n        && (( $d1_arch == 1 )) && echo \"OK: All expected purge actions triggered\"\n\n    if (( $is_lhsm + $shook > 0 )); then\n        st1=$(grep purge1 report.out | cut -d ',' -f 6 | tr -d ' ')\n        st2=$(grep default1 report.out | cut -d ',' -f 6 | tr -d ' ')\n\n        [ \"$st1\" = \"released\" ] || [ \"$st1\" = \"release_pending\" ] ||\n            error \"purge1 should be 'released' or 'release_pending' (actual: $st1)\"\n        [ \"$st2\" = \"released\" ] || [ \"$st2\" = \"release_pending\" ] ||\n            error \"default1 should be 'released' or 'release_pending' (actual: $st2)\"\n    else\n        # entries should be removed\n        grep purge1 report.out && error \"purge1 should have been removed from DB\"\n        grep default1 report.out && error \"default1 should have been removed from DB\"\n\n        [ -f $RH_ROOT/purge1 ] && error \"purge1 should have been removed from filesystem\"\n        [ -f $RH_ROOT/default1 ] && error \"default1 should have been removed from filesystem\"\n    fi\n\n    rm -f report.out\n\n    # check that purge fileclasses are properly matched at scan time,\n    # then at application time\n    return 0\n}\n\n\nfunction periodic_class_match_purge\n{\n\tconfig_file=$1\n\tupdate_period=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n\n\techo \"Writing and archiving files...\"\n\t#create test tree of archived files\n\tfor file in ignore1 whitelist1 purge1 default1 ; do\n\t\ttouch $RH_ROOT/$file\n\n\t\tif (( $is_lhsm != 0 )); then\n\t\t\tflush_data\n\t\t\t$LFS hsm_archive $RH_ROOT/$file\n\t\tfi\n\tdone\n\tif (( $is_lhsm != 0 )); then\n\t\twait_done 60 || error \"Copy timeout\"\n\tfi\n\n\techo \"FS Scan...\"\n\tif (( $is_hsmlite != 0 )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing $CMD --sync\"\n\t\tcheck_db_error rh_migr.log\n\t    else\n   \t\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log || error \"executing $CMD --scan\"\n\t\tcheck_db_error rh_chglogs.log\n\tfi\n\n\t# now apply policies\n\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --dry-run -l FULL -L rh_purge.log  || error \"\"\n\n\tnb_updt=`grep \"matches target file class\" rh_purge.log | wc -l`\n\tnb_whitelist=`grep \"matches ignored target\" rh_purge.log | wc -l`\n\tnb_purge_match=`grep \"matches the condition for policy rule 'purge_match'\" rh_purge.log | wc -l`\n\tnb_default=`grep \"matches the condition for policy rule 'default'\" rh_purge.log | wc -l`\n\n\t# we must have 4 lines like this: \"Need to update fileclass (not set)\"\n\t(( $nb_updt == 1 )) || error \"********** TEST FAILED: wrong count of matched fileclasses: $nb_updt/1\"\n\t(( $nb_whitelist == 2 )) || error \"********** TEST FAILED: wrong count of ignored entries : $nb_whitelist/2\"\n\t(( $nb_purge_match == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'purge_match': $nb_purge_match\"\n\t(( $nb_default == 1 )) || error \"********** TEST FAILED: wrong count of files matching 'default': $nb_default\"\n\n        (( $nb_updt == 1 )) && (( $nb_whitelist == 2 )) && (( $nb_purge_match == 1 )) && (( $nb_default == 1 )) \\\n\t\t&& echo \"OK: initial fileclass matching successful\"\n\n\t# TMP_FS_MGR:  whitelisted status is always checked at scan time (?)\n\t# \t2 entries are new (default and to_be_released)\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\talready=0\n\t\tnew=2\n\telse\n\t\talready=0\n\t\tnew=0\n\tfi\n\n\t# update db content and rematch entries: should update all fileclasses\n\tclean_logs\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n\tcheck_db_error rh_chglogs.log\n\n\techo \"Waiting $update_period sec...\"\n\tsleep $update_period\n\n\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --dry-run -l FULL -L rh_purge.log  || error \"\"\n\n\tnb_updt=`grep \"matches target file class\" rh_purge.log | wc -l`\n\tnb_whitelist=`grep \"matches ignored target\" rh_purge.log | wc -l`\n\n\t(( $nb_updt == 1 )) || error \"********** TEST FAILED: wrong count of matched fileclasses: $nb_updt/1\"\n\t(( $nb_whitelist == 2 )) || error \"********** TEST FAILED: wrong count of ignored entries : $nb_whitelist/2\"\n\n    (( $nb_updt == 1 )) && (( $nb_whitelist == 2 )) && echo \"OK: all fileclasses updated\"\n}\n\nfunction wait_for_size_update()\n{\n    local file=$1\n    local expected_size=$2\n    local size=$($FIND \"$file\" -f $RBH_CFG_DIR/$config_file -printf \"%s\\n\")\n    local count=0\n\n    while [[ $size != $expected_size ]]; do\n        [ \"$DEBUG\" = \"1\" ] &&\n            $FIND \"$file\" -f $RBH_CFG_DIR/$config_file -ls\n\n        sleep .5\n        size=$($FIND \"$file\" -f $RBH_CFG_DIR/$config_file -printf \"%s\\n\")\n        if ((count++ == 30)); then\n            break\n        fi\n    done\n}\n\nfunction test_size_updt\n{\n\tconfig_file=$1\n\tevent_read_delay=$2\n\tpolicy_str=\"$3\"\n    cl_delay=6 # time between action and its impact on rbh-report\n\n\tinit=`date \"+%s\"`\n\n\tLOG=rh_chglogs.log\n\n\tif (( $no_log )); then\n\t\techo \"changelog disabled: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n    # create a log reader\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L $LOG --detach ||\n        error \"starting chglog reader\"\n    sleep 1\n\n    # create a small file and write it (20 bytes, incl \\n)\n    echo \"qqslmdkqslmdkqlmsdk\" > $RH_ROOT/file\n    wait_for_size_update $RH_ROOT/file 20\n    size=$($FIND $RH_ROOT/file -f $RBH_CFG_DIR/$config_file -printf \"%s\\n\")\n    if (( $size != 20 )); then\n        error \"unexpected size value: $size != 20 (is Lustre version < 2.3?)\"\n    fi\n\n    # now appending the file (+20 bytes, incl \\n)\n    echo \"qqslmdkqslmdkqlmsdk\" >> $RH_ROOT/file\n    wait_for_size_update $RH_ROOT/file 40\n    size=$($FIND $RH_ROOT/file -f $RBH_CFG_DIR/$config_file -printf \"%s\\n\")\n    if (( $size != 40 )); then\n        error \"unexpected size value: $size != 40\"\n    fi\n\n    pkill -9 $PROC\n}\n\n#used by test_action_params\nfunction check_action_param # $log $id $name $value\n{\n    local log=$1\n    local id=$2\n    local name=$3\n    local val=$4\n\n    line=$(grep 'action_params:' $log | grep \"\\[$id\\]\" | grep \" $name=\")\n    if [ -z \"$line\" ]; then\n        error \"parameter '$name' not found for $id\"\n        return 1\n    fi\n\n    local found=$(echo \"$line\" | sed -e \"s/.* $name=\\([^,]*\\).*/\\1/\")\n    if [[ \"$val\" == \"$found\" ]]; then\n        echo \"OK: $id: $name = $val\"\n        return 0\n    else\n        error \"$id: invalid value for parameter '$name': got '$found', '$val' expected.\"\n        return 1\n    fi\n}\n\n#used by test_action_params\nfunction check_rule_and_class #  $log $path $rule $class\n{\n    local log=$1\n    local path=$2\n    local rule=$3\n    local class=$4\n\n    if [[ -n \"$class\" ]]; then\n        grep \"success for '$path', matching rule '$rule' (fileset=$class)\" $log || error \"action success not found for $path, rule $rule, class $class\"\n    else\n        grep \"success for '$path', matching rule '$rule',\" $log || error \"action success not found for $path, rule $rule\"\n    fi\n}\n\n\n#used by test_action_params\nfunction check_action_patterns # $log $id $pattern...\n{\n    local log=$1\n    local id=$2\n    shift 2\n\n    local act=$(grep 'action:' $log | grep \"\\[$id\\]\"| sed -e \"s/.*cmd(\\(.*\\))$/\\1/\" | tr -d \"'\")\n\n    for pattern in \"$@\"; do\n        echo \"$act\" | grep -- \"$pattern\" || error \"pattern '$pattern' not found in cmd '$act'\"\n    done\n}\n\n#used by test_action_params\nfunction check_action_function # $log $id $name\n{\n    local log=$1\n    local id=$2\n    local name=$3\n\n    grep \"action:\" $log | grep \"\\[$id\\]\" | grep $name || error \"action $name expected\"\n}\n\nfunction test_action_params\n{\n    config_file=$1\n\n\tclean_logs\n\n    if (( $is_lhsm == 0 )); then\n       echo \"This test uses Lustre/HSM actions. Skipping for current test mode $PURPOSE.\"\n       set_skipped\n       return 1\n    fi\n\n    # create 1 file in each class + default\n    touch $RH_ROOT/file.1a\n    touch $RH_ROOT/file.1b\n    touch $RH_ROOT/file.2\n    touch $RH_ROOT/file.3\n    touch $RH_ROOT/file.4\n\n    id1a=$(get_id $RH_ROOT/file.1a)\n    id1b=$(get_id $RH_ROOT/file.1b)\n    id2=$(get_id $RH_ROOT/file.2)\n    id3=$(get_id $RH_ROOT/file.3)\n    id4=$(get_id $RH_ROOT/file.4)\n    idroot=$(get_id $RH_ROOT)\n\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scan error\"\n    check_db_error rh_scan.log\n\n    # check classinfo report\n    $REPORT -f $RBH_CFG_DIR/$config_file --class-info -q > rh_report.log || error \"report error\"\n    # find_valueInCSVreport $logFile $typeValues $countValues $colSearch\n    find_valueInCSVreport rh_report.log class1a 1  2 || error \"invalid count for class1a\"\n    find_valueInCSVreport rh_report.log class1b 1  2 || error \"invalid count for class1b\"\n    find_valueInCSVreport rh_report.log class2  1  2 || error \"invalid count for class2\"\n    find_valueInCSVreport rh_report.log class3  1  2 || error \"invalid count for class3\"\n\n    # run migration policy (force run to ignore rule time condition)\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration --force-all -l DEBUG -L rh_migr.log || error \"policy run error\"\n    check_db_error rh_migr.log\n\n    [ \"$DEBUG\" = \"1\" ] && egrep -e 'action:|action_params:' rh_migr.log\n\n    # check selected action and action_params for each file\n\n    # file.1a (class1a, rule migr1)\n    #   'prio = 4' from fileclass\n    #   'cos = 2' from rule\n    #   'archive_id = 1' (policy default)\n    #   'mode = trigger' from trigger\n    # action: lfs hsm_archive -a {archive_id} {fullpath} --data cos={cos},class={fileclass}\"\n    ## BUG: the whole argument should be quoted (not the replaced part) in \"cos={cos}\").\n    check_action_param rh_migr.log $id1a prio 4\n    check_action_param rh_migr.log $id1a cos 2\n    check_action_param rh_migr.log $id1a archive_id 1\n    check_action_param rh_migr.log $id1a mode trigger\n    check_rule_and_class rh_migr.log $RH_ROOT/file.1a \"migr1\" \"class1a\"\n    check_action_patterns rh_migr.log $id1a \"-a 1\" \"$RH_ROOT/file.1a\" \"cos=2\" \"class=class1a\"\n\n    # file.1b (class1b, rule migr1)\n    #   'prio = 5' from fileclass\n    #   'cos = 2' from rule\n    #   'archive_id = 1' (policy default)\n    #   'mode = trigger' from trigger\n    check_action_param rh_migr.log $id1b prio 5\n    check_action_param rh_migr.log $id1b cos 2\n    check_action_param rh_migr.log $id1b archive_id 1\n    check_action_param rh_migr.log $id1b mode trigger\n    check_rule_and_class rh_migr.log $RH_ROOT/file.1b \"migr1\" \"class1b\"\n    check_action_patterns rh_migr.log $id1b \"-a 1\" \"$RH_ROOT/file.1b\" \"cos=2\" \"class=class1b\"\n\n    # file.2 (class2, rule migr2)\n    #   'cos = 4' from fileclass (override 'cos = 3' from rule).\n    #   'archive_id = 1' (policy default)\n    #   'mode = trigger' from trigger\n    # action: lhsm.archive (rule)\n    check_action_param rh_migr.log $id2 cos 4\n    check_action_param rh_migr.log $id2 archive_id 1\n    check_action_param rh_migr.log $id2 mode trigger\n    check_rule_and_class rh_migr.log $RH_ROOT/file.2 \"migr2\" \"class2\"\n    check_action_function rh_migr.log \"$id2\" \"lhsm.archive\"\n\n    # file.3 (class3, rule migr3)\n    #   'archive_id = 2' (override 'archive_id = 1' from policy)\n    #   'cos = 1' (policy default)\n    #   'mode = over1' (override 'mode = trigger' from trigger)\n    # action from policy: lfs hsm_archive -a {archive_id} /mnt/lustre/.lustre/fid/{fid} --data cos={cos}\n    check_action_param rh_migr.log $id3 cos 1\n    check_action_param rh_migr.log $id3 archive_id 2\n    check_action_param rh_migr.log $id3 mode over1\n    check_action_patterns rh_migr.log $id3 \"-a 2\" \"$RH_ROOT/.lustre/fid/$id3\" \"cos=1\"\n\n    # file.4 (no class, rule default)\n    #   'archive_id = 1' (policy default)\n    #   'cos = 1' (policy default)\n    #   'mode = over2' (override 'mode = trigger' from trigger)\n    check_action_param rh_migr.log $id4 cos 1\n    check_action_param rh_migr.log $id4 archive_id 1\n    check_action_param rh_migr.log $id4 mode over2\n    check_rule_and_class rh_migr.log $RH_ROOT/file.4 \"default\" \"\"\n    check_action_patterns rh_migr.log $id4 \"-a 1\" \"$RH_ROOT/.lustre/fid/$id4\" \"cos=1\"\n\n    if (($is_lhsm != 0)); then\n\t\twait_done 60 || error \"Copy timeout\"\n    fi\n\n    # check copy completion\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l EVENT -L rh_chglogs.log || error \"readlog\"\n    check_db_error rh_chglogs.log\n\n\tclean_logs\n\n    # now purge all (not by trigger)\n    ## BUG: don't quote again if an argument is already quoted\n    ## BUG: don't redirect &1 and &2 if the command already does\n    $RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT --force-all -l DEBUG -L rh_purge.log || error \"policy run error\"\n    check_db_error rh_purge.log\n\n    [ \"$DEBUG\" = \"1\" ] && egrep -e 'action:|action_params:' rh_purge.log\n\n    # no mode expected (not triggered)\n    grep \"action_params:\" rh_purge.log | grep \" mode=\" && error \"no mode parameter expected\"\n\n    # file.1a (class1a, rule purge1)\n    #   'arg = 2' from rule\n    # action: rm -f {fullpath}\n    check_action_param rh_purge.log $id1a arg 2\n    check_rule_and_class rh_purge.log $RH_ROOT/file.1a \"purge1\" \"class1a\"\n    check_action_patterns rh_purge.log $id1a \"rm -f\" \"$RH_ROOT/file.1a\"\n\n    # file.1b (class1b, rule purge1)\n    #   'arg = 55' from fileclass\n    # action: rm -f {fullpath}\n    check_action_param rh_purge.log $id1b arg 55\n    check_rule_and_class rh_purge.log $RH_ROOT/file.1b \"purge1\" \"class1b\"\n    check_action_patterns rh_purge.log $id1b \"rm -f\" \"$RH_ROOT/file.1b\"\n\n    # file.2 (class2, rule purge2)\n    #   'arg = 3' from rule\n    # action: echo '{fid}' '{rule}' '{arg}'  >> /tmp/purge.log\n    check_action_param rh_purge.log $id2 arg 3\n    check_action_param rh_purge.log $id2 grouping \"$idroot\"\n    check_rule_and_class rh_purge.log $RH_ROOT/file.2 \"purge2\" \"class2\"\n    check_action_patterns rh_purge.log $id2 \"echo\" \"$id2\" \"purge2\" \"$idroot\" \"3\"\n    # FIXME grep the line in /tmp/purge.log\n\n    # file.3 (class3, rule purge3)\n    #   'arg = 1' from policy\n    # action: lhsm.release (policy definition default)\n    check_action_param rh_purge.log $id3 arg 1\n    check_rule_and_class rh_purge.log $RH_ROOT/file.3 \"purge3\" \"class3\"\n    check_action_function rh_purge.log \"$id3\" \"lhsm.release\"\n\n    # file.4 (rule default)\n    #   'arg = 4' from rule\n    # action: lhsm.release (policy definition default)\n    check_action_param rh_purge.log $id4 arg 4\n    check_rule_and_class rh_purge.log $RH_ROOT/file.4 \"default\" \"\"\n    check_action_patterns rh_purge.log $id4 \"echo\" \"$id4\" \"default\" \"4\"\n}\n\nfunction test_nlink_crit\n{\n    config_file=$1\n\n    logfile=rh_purge.log\n    rm -f $logfile\n\n    # create test entries\n    touch $RH_ROOT/file.1\n    touch $RH_ROOT/file.2\n    ln $RH_ROOT/file.2 $RH_ROOT/file.3\n\n    # scan, then run the cleanup policy\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n    $RH -f $RBH_CFG_DIR/$config_file --run=cleanup -L $logfile -l FULL -O 2>/dev/null\n\n    check_rule_and_class $logfile $RH_ROOT/file.1 \"file_cleanup\" \"single\"\n\n    # it may be file.2 or file.3 (they are 2 hardlinks): test both\n    grep \"success for '$RH_ROOT/file.2', matching rule 'link_cleanup' (fileset=dual)\" $logfile ||\n    grep \"success for '$RH_ROOT/file.3', matching rule 'link_cleanup' (fileset=dual)\" $logfile ||\n        error \"action success not found for file.2 or file.3, rule link_cleanup, class dual\"\n\n    # update fileclass and run cleanup again\n    :> $logfile\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n    $RH -f $RBH_CFG_DIR/$config_file --run=cleanup -L $logfile -l FULL -O 2>/dev/null\n\n    # last link reference should be removed\n    grep \"success for '$RH_ROOT/file.2', matching rule 'file_cleanup' (fileset=single)\" $logfile ||\n    grep \"success for '$RH_ROOT/file.3', matching rule 'file_cleanup' (fileset=single)\" $logfile ||\n        error \"action success not found for file.2 or file.3, rule file_cleanup, class single\"\n}\n\nfunction test_iname\n{\n    config_file=$1\n    clean_logs\n\n    # create 2 files with deferent case\n    touch $RH_ROOT/file\n    touch $RH_ROOT/File\n    touch $RH_ROOT/x\n\n    # file matches fmin_name and both f*_iname\n    # File matches fmaj_name and both f*_iname\n    # x matches other_name\n\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scan error\"\n    check_db_error rh_scan.log\n\n    # check classinfo report\n    $REPORT -f $RBH_CFG_DIR/$config_file --class-info -q | grep -v special > rh_report.log || error \"report error\"\n    # find_valueInCSVreport $logFile $typeValues $countValues $colSearch\n    find_valueInCSVreport rh_report.log fmaj_name+fmaj_iname+fmin_iname   1  2 || error \"invalid count for fmaj_name\"\n    find_valueInCSVreport rh_report.log fmin_name+fmaj_iname+fmin_iname   1  2 || error \"invalid count for fmin_name\"\n    find_valueInCSVreport rh_report.log other_name  1  2 || error \"invalid count for other_name\"\n}\n\nfunction test_copy\n{\n    config_file=$1\n    clean_logs\n\n    echo 123 > $RH_ROOT/file.1\n    echo 123 > $RH_ROOT/file.2\n    echo 123 > $RH_ROOT/file.3\n    echo 123 > $RH_ROOT/file.4\n    echo 123 > $RH_ROOT/file.5\n    mkdir $RH_ROOT/one_dir\n    ln -s \"$RH_ROOT/one_dir\" $RH_ROOT/one_link\n\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scan error\"\n    check_db_error rh_scan.log\n    sleep 1\n\n    $RH -f $RBH_CFG_DIR/$config_file --run=copy --target=all -l DEBUG -L rh_migr.log || error \"run error\"\n    check_db_error rh_migr.log\n\n    # expect file1 to be copied to a compressed file\n    grep \"copy success for '$RH_ROOT/file.1', matching rule 'copy_compress'\" rh_migr.log || error \"no copy of file.1\"\n    file $RH_ROOT/file.1.gz | grep compressed || error \"file.1.gz should be compressed\"\n    grep \"copy success for '$RH_ROOT/file.2', matching rule 'copy_mkdir'\" rh_migr.log || error \"no copy of file.2\"\n    (( $(find $RH_ROOT/backup -name file.2 | wc -l) == 1 )) || error \"file.2 backup not found\"\n    grep \"Error applying action on entry $RH_ROOT/file.3\" rh_migr.log || error \"copy of file.3 should have failed\"\n    (( $(ls $RH_ROOT/backup/*/file.3 | wc -l) == 0 )) || error \"no backup copy of file.3 expected\"\n    grep \"copy success for '$RH_ROOT/file.5', matching rule 'copy_link_to_dir'\" rh_migr.log || error \"no copy of file.5\"\n    (( $(find $RH_ROOT/one_dir -name file.5 | wc -l) == 1 )) || error \"file.5 backup not found\"\n}\n\n# helper for test_move\nfunction check_trash_count\n{\n    local src=\"$1\"\n    local c=\"$2\"\n    local trashed=\"$RH_ROOT/.trash/$src\"\n\n    if [ -e \"$src\" ]; then\n        error \"$src should have been moved to trash\"\n    fi\n\n    local n=$(ls -1 \"${trashed}\"* | wc -l)\n    (($n==$c)) || error \"Expect $c files ${trashed}* but found $n\"\n}\n\n# helper for test_move\nfunction check_nottrashed\n{\n    local src=\"$1\"\n    local trashed=\"$2\"\n\n    if [ ! -e \"$src\" ]; then\n        error \"$src should NOT have been moved to trash\"\n    fi\n\n    local n=$(ls ${trashed}* | wc -l)\n    (($n==0)) || error \"No ${trashed}* file expected in trash but found $n\"\n}\n\nfunction test_move\n{\n    config_file=$1\n    clean_logs\n\n    local trash_files=($RH_ROOT/dir.1/project.1/user.1/file1.log\n                       $RH_ROOT/dir.2/project.1/user.1/file2.log\n                       $RH_ROOT/dir.2/project.2/user.1/file3.log\n                       $RH_ROOT/dir.3/project.3/file4.log)\n    local trash_over=($RH_ROOT/dir.1/project.1/user.1/file.1\n                      $RH_ROOT/dir.1/project.3/file4.1)\n    local std_files=($RH_ROOT/dir.1/project.1/user.1/file.a\n                       $RH_ROOT/dir.2/project.1/user.1/file.b\n                       $RH_ROOT/dir.2/project.2/user.1/file.c\n                       $RH_ROOT/dir.3/project.3/file.d)\n\n    echo \"populate...\"\n    # create files to be trashed\n    for f in \"${trash_files[@]}\" \"${trash_over[@]}\" \"${std_files[@]}\"; do\n        mkdir -p $(dirname \"$f\")\n        echo qsdmlkqslkd > $f\n    done\n\n    echo \"scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log \\\n        2>/dev/null || error \"scan error\"\n    check_db_error rh_scan.log\n    sleep 1\n\n    echo \"trash...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=trash --target=all -l DEBUG -L \\\n        rh_purge.log 2>/dev/null  || error \"run error\"\n    check_db_error rh_purge.log\n\n    # check that std files are still in the FS tree and not in trash\n    # check that trash files are only in trash\n    for f in \"${trash_files[@]}\" \"${trash_over[@]}\"; do\n        grep \"trash success\" rh_purge.log | grep \"$f\" ||\n            error \"No trash success found for $f\"\n        check_trash_count \"$f\" 1\n    done\n    for f in \"${std_files[@]}\"; do\n        grep \"trash success\" rh_purge.log | grep \"$f\" &&\n            error \"Trash success found for $f\"\n        check_nottrashed \"$f\" \"$RH_ROOT/.trash/$f\"\n    done\n\n    # recreate trashed files and trash them again\n    echo \"create new files...\"\n    for f in \"${trash_files[@]}\" \"${trash_over[@]}\"; do\n        echo qsdmlkqslkd > $f\n    done\n\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log \\\n        2>/dev/null || error \"scan error\"\n    check_db_error rh_scan.log\n    sleep 1\n\n    echo \"trash again...\"\n    :> rh_purge.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=trash --target=all -l DEBUG -L \\\n        rh_purge.log 2>/dev/null || error \"run error\"\n    check_db_error rh_purge.log\n\n    # check that std files are still in the FS tree and not in trash\n    # check that trash files are only in trash\n    for f in \"${trash_files[@]}\"; do\n        grep \"trash success\" rh_purge.log | grep \"$f\" || error \"No trash success found for $f\"\n        # should find 2 trashed files now\n        check_trash_count \"$f\" 2\n    done\n    for f in \"${trash_over[@]}\"; do\n        grep \"trash success\" rh_purge.log | grep \"$f\" || error \"No trash success found for $f\"\n        # should have overwritten the previous file in trash\n        check_trash_count \"$f\" 1\n    done\n    for f in \"${std_files[@]}\"; do\n        grep \"trash success\" rh_purge.log | grep \"$f\" && error \"Trash success found for $f\"\n        check_nottrashed \"$f\" \"$RH_ROOT/.trash/$f\"\n    done\n}\n\nfunction test_manual_run\n{\n\tconfig_file=$1\n    run_interval=$2\n    flavor=$3\n\n\tif (( $is_lhsm + $shook == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    # create test files (2 for each rule)\n    for i in 1 2 3 4 5 11 12 13 14 15 ; do\n        touch $RH_ROOT/file.$i\n    done\n\n    # initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    # policy rules specifies last_mod >= 1\n    sleep 1\n\n    case \"$flavor\" in\n    run)\n        cmd=\"--run\" # run with arguments\n        bgr=1       # background run?\n        nb_run_migr=2  # nb migration run expected\n        nb_run_purge=2 # nb purge run expected\n        nb_items_migr=10 # nb migration actions\n        ;;\n    run_all)\n        cmd=\"--run=all\"\n        bgr=1\n        nb_run_migr=2\n        nb_run_purge=2\n        nb_items_migr=10\n        ;;\n    run_migr)\n        cmd=\"--run=migration\"\n        bgr=1\n        nb_run_migr=2\n        nb_run_purge=0\n        nb_items_migr=10\n        ;;\n    run_migr_tgt)\n        cmd=\"--run=migration(target=class:file2)\"\n        bgr=0\n        nb_run_migr=1\n        nb_run_purge=0\n        nb_items_migr=2\n        ;;\n    run_migr_usage)\n         cmd=\"--run=migration(target=class:file1,target-usage=0%)\"\n         bgr=0\n         nb_run_migr=1\n         nb_run_purge=0\n         nb_items_migr=2\n        ;;\n    run_both)\n        # run the 2 policies as one-shot cmds (first tgt1, then tgt2?)\n        cmd=\"--run=migration(user:root),purge(target=class:file1)\"\n        bgr=0\n        nb_run_migr=1\n        nb_run_purge=1\n        nb_items_migr=10\n        ;;\n    ## --run=policy(limits) => to be added with later patch (daemon)\n    ## --run=policy1(target1,limit1),policy1(target2,limit2) => to be added with later patch (once shot)\n\n    esac\n\n    echo \"run options: $cmd\"\n    if [ $bgr = 1 ]; then\n        $RH -f $RBH_CFG_DIR/$config_file $cmd -l DEBUG -L rh_migr.log --detach \\\n            --pid-file=rh.pid || error \"starting background run\"\n\n        # sleep 1.5 the run interval (2 runs should be started)\n        sleep $((3*$run_interval/2))\n        kill_from_pidfile\n    else\n        $RH -f $RBH_CFG_DIR/$config_file $cmd -l DEBUG -L rh_migr.log || \\\n            error \"starting run\"\n    fi\n\n    c=$(grep migration rh_migr.log | grep \"Starting policy run\" | wc -l)\n    (( c == $nb_run_migr )) || error \"$nb_run_migr migration runs expected (found: $c)\"\n    c=$(grep purge rh_migr.log | grep \"Starting policy run\" | wc -l)\n    (( c == $nb_run_purge )) || error \"$nb_purge purge runs expected (found: $c)\"\n\n    c=$(grep migration rh_migr.log | grep \"Executing policy action\" | wc -l)\n    (( c == $nb_items_migr )) || error \"$nb_items_migr migration actions expected (found: $c)\"\n}\n\n# test policy limits at all levels (trigger, policy parameters, manual run...)\nfunction test_limits\n{\n\tconfig_file=$1\n    flavor=$2\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    # create test files (2 for each rule)\n    for i in 1 2 3 4 5 11 12 13 14 15 ; do\n        # 1MB each\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 2>/dev/null\n    done\n\n    # 0 for no limit\n    export trig_cnt=0\n    export trig_vol=0\n    export param_cnt=0\n    export param_vol=0\n\n    case \"$flavor\" in\n    trig_cnt)\n        # check that a count limit specified in a trigger is taken into account\n        run_opt=\"--run=migration --once\"\n        export trig_cnt=5\n        ;;\n    trig_vol)\n        # check that a size limit specified in a trigger is taken into account\n        run_opt=\"--run=migration --once\"\n        export trig_vol=\"5MB\"\n        ;;\n    param_cnt)\n        # check that a count limit specified in a policy parameter is taken into account\n        run_opt=\"--run=migration --once\"\n        export param_cnt=5\n        ;;\n    param_vol)\n        # check that a size limit specified in a policy parameter is taken into account\n        run_opt=\"--run=migration --once\"\n        export param_vol=\"5MB\"\n        ;;\n    run_cnt)\n        # check that a count limit specified in a manual run is taken into account\n        run_opt=\"--run=migration(all,max-count=5)\"\n        ;;\n    run_vol)\n        # check that a size limit specified in a manual run is taken into account\n        run_opt=\"--run=migration(all,max-vol=5MB)\"\n        ;;\n    trig_param)\n        # check that if a limit is specified in trigger+policy parameter, we take the min\n        run_opt=\"--run=migration --once\"\n        export trig_vol=\"7MB\"\n        export param_vol=\"5MB\"\n        ;;\n    trig_run)\n        # check that if a limit is specified in trigger+command line, we take the min\n        run_opt=\"--run=migration(all,max-count=5)\"\n        export trig_vol=\"7MB\"\n        ;;\n    param_run)\n        # check that if a limit is specified in command line+policy parameter, we take the min\n        run_opt=\"--run=migration(all,max-vol=7MB)\"\n        export param_cnt=\"5\"\n        ;;\n    esac\n\n    # initial scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    # policy rules specifies last_mod >= 1\n    sleep 1\n\n    # we should get 5 actions / 5MB in all cases\n    $RH -f $RBH_CFG_DIR/$config_file $run_opt -l DEBUG -L rh_migr.log || \\\n        error \"starting run\"\n\n    [ \"$DEBUG\" = \"1\" ] && grep \"run summary\" rh_migr.log\n\n    c=$(grep \"run summary\" rh_migr.log | cut -d \";\" -f 3 | awk '{print $1}')\n\n    (( c == 5 )) || error \"5 actions expected (got $c)\"\n}\n\n# test limits using max_per_run scheduler\nfunction test_sched_limits\n{\n    config_file=$1\n    flavor=$2\n\n    if (( $is_lhsm + $is_hsmlite == 0 )); then\n        echo \"HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    # create test files (2 for each rule)\n    for i in 1 2 3 4 5 11 12 13 14 15 ; do\n        # 1MB each\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 2>/dev/null\n    done\n\n    # 0 for no limit\n    export trig_cnt=0\n    export trig_vol=0\n    export param_cnt=0\n    export param_vol=0\n    export sched_max_cnt=0;\n    export sched_max_vol=0;\n\n    case \"$flavor\" in\n    sched_max_cnt)\n        run_opt=\"--run=migration --once\"\n        export trig_vol=\"7MB\"\n        export param_vol=\"6MB\"\n        export sched_max_cnt=\"5\";\n        ;;\n    sched_max_vol)\n        run_opt=\"--run=migration(all,max-count=6)\"\n        export trig_vol=\"7MB\"\n        export sched_max_vol=\"5MB\";\n        ;;\n    trigger)\n        run_opt=\"--run=migration --once\"\n        export trig_vol=\"5MB\"\n        export param_vol=\"6MB\"\n        export sched_max_cnt=7\n        ;;\n    param)\n        run_opt=\"--run=migration(all,max-vol=7MB)\"\n        export param_cnt=\"5\"\n        export sched_max_vol=\"6MB\";\n        ;;\n    cmd)\n        run_opt=\"--run=migration(all,max-vol=5MB)\"\n        export param_cnt=7\n        export sched_max_cnt=6\n    esac\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    # policy rules specifies last_mod >= 1\n    sleep 1\n\n    # we should get 5 actions / 5MB in all cases\n    $RH -f $RBH_CFG_DIR/$config_file $run_opt -l DEBUG -L rh_migr.log || \\\n        error \"starting run\"\n\n    [ \"$DEBUG\" = \"1\" ] && grep \"run summary\" rh_migr.log\n\n    c=$(grep \"run summary\" rh_migr.log | cut -d \";\" -f 3 | awk '{print $1}')\n\n    (( c == 5 )) || error \"5 actions expected (got $c)\"\n}\n\nfunction test_sched_ratelim\n{\n    local config_file=$1\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n    clean_logs\n\n    # Create test files (10KB each)\n    echo \"Create test files...\"\n    for i in {1..5}; do\n        dd if=/dev/zero of=$RH_ROOT/file1.$i bs=10K count=1 2>/dev/null\n    done\n    for i in {1..3}; do\n        dd if=/dev/zero of=$RH_ROOT/file2.$i bs=10K count=1 2>/dev/null\n        dd if=/dev/zero of=$RH_ROOT/file3.$i bs=10K count=1 2>/dev/null\n    done\n\n    # Limit processing to 2 files and 100KB per second\n    export ratelim_capacity=1\n    export ratelim_size=\"50KB\"\n    export ratelim_refill=\"500\"\n\n    # Initial scan\n    echo \"Initial scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log \\\n        2>>rh_scan.log || error \"scan error\"\n    check_db_error rh_scan.log\n\n    # migrate 10K files, must be limited to 2/sec\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration \\\n        --target=class:test1 --once -l DEBUG -L rh_migr.log \\\n        2>>rh_migr.log || error \"starting run\"\n    # Throttling based on entry count should have been reported\n    # (but not on size)\n    grep \"Throttling after $ratelim_capacity actions\" rh_migr.log ||\n        error \"expected throttling on action count\"\n    grep \"Throttling after [0-9.]* KB\" rh_migr.log &&\n        error \"unexpected throttling on size\"\n\n    grep \"run summary\" rh_migr.log || error \"Found no policy run summary\"\n    # extract info from run_summary\n    sum=$(grep \"run summary\" rh_migr.log | cut -d '|' -f 2 | cut -d ':' -f 2)\n    time=$(echo $sum | cut -d ';' -f 1 | sed -e s/.*=// -e s/s$// -e s/^0//)\n    # 5 entries to be migrated @2/sec => 3 sec run\n    ((time >= 3)) || error \"Unexpected migration run time: $time < 3\"\n\n    :> rh_migr.log\n\n    # Limit processing to 10 files and 10KB per second\n    export ratelim_capacity=5\n    export ratelim_size=\"5KB\"\n\n    # migrate 10K files, must be limited to 10K/sec (1 file)\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration \\\n        --target=class:test2 --once -l DEBUG -L rh_migr.log \\\n        2>>rh_migr.log || error \"starting run\"\n    # Throttling based on copy size should have been reported\n    # (but not on count)\n    grep \"Throttling after $ratelim_capacity actions\" rh_migr.log &&\n        error \"unexpected throttling on action count\"\n    grep \"Throttling after [0-9.]* KB\" rh_migr.log ||\n        error \"expected throttling on size\"\n\n    grep \"run summary\" rh_migr.log || error \"Found no policy run summary\"\n    # extract info from run_summary\n    sum=$(grep \"run summary\" rh_migr.log | cut -d '|' -f 2 | cut -d ':' -f 2)\n    time=$(echo $sum | cut -d ';' -f 1 | sed -e s/.*=// -e s/s$// -e s/^0//)\n    # 3 entries to be migrated @1/sec => at least 3 sec run\n    # (may be longer if the machine is loaded)\n    ((time >= 3)) || error \"Unexpected migration run time: $time < 3\"\n\n    :> rh_migr.log\n    # test the behavior when files are larger than the size limit\n    export ratelim_capacity=5\n    export ratelim_size=\"512\"\n\n    # migrate 10K files, must be limited to 10K/sec (1 file)\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration \\\n        --target=class:test3 --once -l DEBUG -L rh_migr.log \\\n        2>>rh_migr.log || error \"starting run\"\n    # Throttling based on copy size should have been reported\n    # (but not on count)\n    grep \"Throttling after $ratelim_capacity actions\" rh_migr.log &&\n        error \"unexpected throttling on action count\"\n    grep \"Throttling after $ratelim_size\" rh_migr.log ||\n        error \"expected throttling on size\"\n\n    grep \"run summary\" rh_migr.log || error \"Found no policy run summary\"\n    # extract info from run_summary\n    sum=$(grep \"run summary\" rh_migr.log | cut -d '|' -f 2 | cut -d ':' -f 2)\n    time=$(echo $sum | cut -d ';' -f 1 | sed -e s/.*=// -e s/s$// -e s/^0//)\n    # 3 entries to be migrated @1/sec => at least 3 sec run\n    # (may be longer if the machine is loaded)\n    ((time >= 3)) || error \"Unexpected migration run time: $time < 3\"\n\n    (( $NB_ERROR == 0 )) && echo OK\n}\n\nfunction test_basic_sm\n{\n    local config_file=$1\n\n    clean_logs\n\n    local nb_files_ok=10\n    local nb_files_error=5\n    local nb_all=$(( $nb_files_ok + $nb_files_error ))\n\n    # create 2 sets of files:\n    # - for file.<i> the action will succeed\n    # - for file.<i>.fail the action will fail\n    echo \"1-Creating test files...\"\n    for i in $(seq ${nb_files_ok}); do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 2>/dev/null ||\n            error \"writing file.$i\"\n    done\n    for i in $(seq ${nb_files_error}); do\n        dd if=/dev/zero of=$RH_ROOT/file.$i.fail bs=1M count=1 2>/dev/null ||\n            error \"writing file.$i.fail\"\n    done\n\n    echo \"2-Scanning\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l VERB -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    echo \"3-Checking initial 'basic' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=touch --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log file \"ok\" 0\n    check_status_count rh_report.log file \"failed\" 0\n    check_status_count rh_report.log file \"\" $nb_all\n\n    # make sure md_update of scan < now\n    sleep 1\n    echo \"4-Running basic policy\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=touch --once -l VERB -L rh_migr.log ||\n        error \"policy run error\"\n    check_db_error rh_migr.log\n\n    # check policy actions are executed on all entries\n    local actions=$(grep \"Executing policy action\" rh_migr.log | wc -l)\n    (($actions == $nb_all)) || error \"$nb_all actions expected\"\n\n    echo \"5-Checking final 'basic' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=touch --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log file \"ok\" $nb_files_ok\n    check_status_count rh_report.log file \"failed\" $nb_files_error\n\n    return 0\n}\n\nfunction test_modeguard_sm_dir\n{\n    local config_file=$1\n\n    clean_logs\n\n    local nb_dir_ok=10\n    local nb_dir_invalid=5\n    local nb_all=$(( $nb_dir_ok + $nb_dir_invalid ))\n\n    # test_modeguard_dir.conf will set mode 2000 and clear 0002\n\n    # create 2 sets of directories:\n    # - dir.<i>.ok with the setgid bit and not writable by other\n    # - dir.<i>.invalid without the setgid bit or writable by other\n    echo \"1-Creating test directories...\"\n    for i in $(seq ${nb_dir_ok}); do\n        mkdir $RH_ROOT/dir.$i.ok || error \"creating dir.$i.ok\"\n        chmod g+s,o-w $RH_ROOT/dir.$i.ok || error \"chmod dir.$i.ok\"\n    done\n    for i in $(seq 1 2 ${nb_dir_invalid}); do\n        mkdir $RH_ROOT/dir.$i.invalid || error \"creating dir.$i.invalid\"\n        chmod g-s,o-w $RH_ROOT/dir.$i.invalid || error \"chmod dir.$i.invalid\"\n    done\n    for i in $(seq 2 2 ${nb_dir_invalid}); do\n        mkdir $RH_ROOT/dir.$i.invalid || error \"creating dir.$i.invalid\"\n        chmod g+s,o+w $RH_ROOT/dir.$i.invalid || error \"chmod dir.$i.invalid\"\n    done\n\n    echo \"2-Scanning\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l VERB -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    echo \"3-Checking initial 'modeguard' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=modeguard --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log \"dir\" \"ok\" $nb_dir_ok\n    check_status_count rh_report.log \"dir\" \"invalid\" $((nb_dir_invalid+extra_dir))\n\n    # make sure md_update of scan < now\n    sleep 1\n    echo \"4-Running modeguard policy\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=modeguard --once -l VERB \\\n        -L rh_migr.log || error \"policy run error\"\n    check_db_error rh_migr.log\n\n    # check policy actions are executed on invalid entries\n    local actions=$(grep \"Executing policy action\" rh_migr.log | wc -l)\n    [ \"$DEBUG\" = \"1\" ] && cat rh_migr.log\n    (($actions == $nb_dir_invalid)) || error \"$nb_dir_invalid actions expected\"\n\n    if (( $no_log )); then\n        echo \"5-Scanning\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l VERB -L rh_scan.log ||\n            error \"scan error\"\n        check_db_error rh_scan.log\n    else\n        echo \"5-Reading changelogs\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l VERB \\\n            -L rh_chglogs.log || error \"readlog error\"\n        check_db_error rh_chglogs.log\n    fi\n\n    echo \"6-Checking final 'modeguard' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=modeguard --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log dir \"ok\" $nb_all\n    check_status_count rh_report.log dir \"invalid\" $extra_dir\n\n    return 0\n}\n\nfunction test_modeguard_sm_file\n{\n    local config_file=$1\n\n    clean_logs\n\n    local nb_file_ok=10\n    local nb_file_invalid=5\n    local nb_all=$(( $nb_file_ok + $nb_file_invalid ))\n\n    # test_modeguard_file.conf will clear mode bits 0007\n\n    # create 2 sets of files:\n    # - dir.<i>.ok with mode 770\n    # - dir.<i>.invalid either with mode 771 or 777\n    echo \"1-Creating test files...\"\n    for i in $(seq ${nb_file_ok}); do\n        touch $RH_ROOT/file.$i.ok || error \"creating file.$i.ok\"\n        chmod 0770 $RH_ROOT/file.$i.ok || error \"chmod file.$i.ok\"\n    done\n    for i in $(seq 1 2 ${nb_file_invalid}); do\n        touch $RH_ROOT/file.$i.invalid || error \"creating file.$i.ok\"\n        chmod 0771 $RH_ROOT/file.$i.invalid || error \"chmod file.$i.ok\"\n    done\n    for i in $(seq 2 2 ${nb_file_invalid}); do\n        touch $RH_ROOT/file.$i.invalid || error \"creating file.$i.ok\"\n        chmod 0777 $RH_ROOT/file.$i.invalid || error \"chmod file.$i.ok\"\n    done\n\n    echo \"2-Scanning\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l VERB -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n    echo \"3-Checking initial 'modeguard' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=modeguard --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log file \"ok\" $nb_file_ok\n    check_status_count rh_report.log file \"invalid\" $nb_file_invalid\n\n    # make sure md_update of scan < now\n    sleep 1\n    echo \"4-Running modeguard policy\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=modeguard --once -l VERB \\\n        -L rh_migr.log || error \"policy run error\"\n    check_db_error rh_migr.log\n\n    # check policy actions are executed on invalid entries\n    local actions=$(grep \"Executing policy action\" rh_migr.log | wc -l)\n    [ \"$DEBUG\" = \"1\" ] && cat rh_migr.log\n    (($actions == $nb_file_invalid)) || error \"$nb_file_invalid actions expected\"\n\n    if (( $no_log )); then\n        echo \"5-Scanning\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l VERB -L rh_scan.log ||\n            error \"scan error\"\n        check_db_error rh_scan.log\n    else\n        echo \"5-Reading changelogs\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l VERB \\\n            -L rh_chglogs.log || error \"readlog error\"\n        check_db_error rh_chglogs.log\n    fi\n\n    echo \"6-Checking final 'modeguard' status\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info=modeguard --csv -q \\\n            --count-min=1 > rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    check_status_count rh_report.log file \"ok\" $nb_all\n    check_status_count rh_report.log file \"invalid\" 0\n\n    return 0\n}\n\nfunction action_executed_on\n{\n    local target=\"$1\"\n    local log_file=\"$2\"\n\n    grep \"Executing policy action\" \"$log_file\" | grep \"$target\"\n}\n\nfunction assert_action_on\n{\n    action_executed_on \"$1\" \"$2\" ||\n        error \"Action expected on '$1'\"\n}\n\nfunction assert_no_action_on\n{\n    action_executed_on \"$1\" \"$2\" &&\n        error \"No action expected on '$1'\"\n}\n\n# test pre check matching behaviors\nfunction test_prepost_sched\n{\n    config_file=$1\n    export pre_sched=$2\n    export post_sched=$3\n    export sched=$4\n\n    if (( $is_lhsm + $is_hsmlite == 0 )); then\n        echo \"HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    # only pre_sched or post_sched != none\n    local check_mode=$pre_sched\n    [ $pre_sched = \"none\" ] && check_mode=$post_sched\n\n    clean_logs\n\n    mkdir $RH_ROOT/subdir\n    # create test files (1 for each rule)\n    # make sure these files match the policy condition (older than 1day)\n    touch -d \"now-1day\" $RH_ROOT/file.{2..4}\n    touch -d \"now-1day\" $RH_ROOT/subdir/file.1\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG \\\n        -L rh_scan.log 2>/dev/null || error \"scan error\"\n    check_db_error rh_scan.log\n\n    # file 4 is younger than previously created ones\n    touch $RH_ROOT/file.4\n    # change file depth\n    mv $RH_ROOT/file.2 $RH_ROOT/subdir/file.2\n\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration --once -l FULL \\\n        -L rh_migr.log 2>/dev/null\n\n    case \"$check_mode\" in\n    none)\n        # depth criteria is not checked => files in subdir are migrated\n        assert_action_on \"subdir/file.1\" rh_migr.log\n        assert_action_on \"file.2\" rh_migr.log\n        assert_action_on \"file.3\" rh_migr.log\n        grep \"Updating info about\" rh_migr.log &&\n            error \"No attr update expected\"\n        ;;\n\n    cache_only)\n        # mtime is not refreshed => file.4 is migrated\n        assert_action_on \"file.4\" rh_migr.log\n        # depth is from DB (move not taken into account) => file 2 is migrated\n        assert_action_on \"file.2\" rh_migr.log\n        # depth comes from DB, it is checked\n        assert_no_action_on \"subdir/file.1\" rh_migr.log\n        # action should run on file.3\n        assert_action_on \"file.3\" rh_migr.log\n        grep \"Updating info about\" rh_migr.log &&\n            error \"No attr update expected\"\n        ;;\n\n    # no path update for file2\n    auto_update_attrs)\n        assert_action_on \"file.2\" rh_migr.log\n        # any needed criteria is refreshed: subdir/file.1, and file.4 are\n        # not migrated\n        assert_no_action_on \"subdir/file.1\" rh_migr.log\n        assert_no_action_on \"file.4\" rh_migr.log\n        assert_action_on \"file.3\" rh_migr.log\n        grep -q \"Updating POSIX info\" rh_migr.log ||\n            error \"Attr update expected\"\n        grep \"Updating path info\" rh_migr.log &&\n            error \"No path update expected\"\n        ;;\n\n    # no big difference between these 2, as the used policy needs to update\n    # everything\n    auto_update_all|force_update)\n        # any needed criteria is refreshed: subdir/file.1 file2, and file.4 are\n        # not migrated\n        assert_no_action_on \"subdir/file.1\" rh_migr.log\n        assert_no_action_on \"file.2\" rh_migr.log\n        assert_no_action_on \"file.4\" rh_migr.log\n        assert_action_on \"file.3\" rh_migr.log\n        grep -q \"Updating POSIX info\" rh_migr.log ||\n            error \"Attr update expected\"\n        grep -q \"Updating path info\" rh_migr.log ||\n            error \"Path update expected\"\n        ;;\n    esac\n\n    $RH -f $RBH_CFG_DIR/$config_file --run=cleanup --once -l FULL \\\n        -L rh_purge.log 2>/dev/null\n    case \"$check_mode\" in\n    none|cache_only)\n        # no update expected\n        grep \"Updating info\" rh_purge.log && error \"No attr update expected\"\n        ;;\n    auto_update_attrs|auto_update_all)\n        # POSIX attr update expected, but no path\n        grep -q \"Updating POSIX info\" rh_purge.log ||\n            error \"Attr update expected\"\n        grep \"Updating path info\" rh_purge.log &&\n            error \"No path update expected\"\n        ;;\n    force_update)\n        # all updates expected\n        grep -q \"Updating POSIX info\" rh_purge.log ||\n            error \"Attr update expected\"\n        grep -q \"Updating path info\" rh_purge.log ||\n            error \"Path update expected\"\n        ;;\n    esac\n\n    return 0\n}\n\nfunction grep_matched_rule\n{\n    log_file=$1\n    policy_name=$2\n    rule_name=$3\n\n    grep \"$policy_name \\|\" $log_file | grep \" matches the condition for policy rule '$rule_name'\"\n}\n\nfunction test_checker\n{\n    config_file=$1\n    nb_files=4\n\n    clean_logs\n\n    # default dataversion is mtime+size\n    # use data_version with lustre 2.4+\n    if [ $FS_TYPE = \"lustre\" ]; then\n        $LFS --list-commands | grep -q data_version && export RBH_CKSUM_DV_CMD=\"lfs data_version\"\n    fi\n\n    # create initial version of files\n    for i in `seq 1 $nb_files`; do\n        dd if=/dev/urandom of=$RH_ROOT/file.$i bs=16k count=$i >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log ||\n        error \"scan error\"\n    check_db_error rh_scan.log\n\n     # if robinhood tree is available, use rbh_cksum.sh from script directory\n    if [ -d \"../../src/robinhood\" ]; then\n        export PATH=\"../../scripts/:$PATH\"\n    fi\n    # else use the installed one\n\n    # run before 5s (no checksumming: last_mod < 5)\n    echo \"No sum (last_mod criteria for new entries)\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=checksum --target=all -l DEBUG -L rh_migr.log ||\n        error \"running checksum\"\n    init=$(grep_matched_rule rh_migr.log checksum initial_check | wc -l)\n    rematch=$(grep_matched_rule rh_migr.log checksum default | wc -l)\n    [[ $init == 0 && $rematch == 0 ]] || error \"No matching rule expected ($init, $rematch)\"\n\n    :> rh_migr.log\n    # initial checksum after 5s\n    sleep 5\n    echo \"Initial sum\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=checksum --target=all -l DEBUG -L rh_migr.log ||\n        error \"running checksum\"\n    init=$(grep_matched_rule rh_migr.log checksum initial_check | wc -l)\n    rematch=$(grep_matched_rule rh_migr.log checksum default | wc -l)\n    [[ $init == 4 && $rematch == 0 ]] || error \"4 initial_check rule expected ($init, $rematch)\"\n\n    for i in `seq 1 $nb_files`; do\n        [ \"$DEBUG\" = \"1\" ] && $REPORT  -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file.$i | grep checksum\n        status=$($REPORT  -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file.$i | grep checksum\\.status | awk '{print $(NF)}')\n        [ \"$status\" = \"ok\" ] || error \"Unexpected status '$status' for $RH_ROOT/file.$i (ok expected)\"\n    done\n\n    :> rh_migr.log\n    # re-run (no checksumming: last_check < 5)\n    echo \"No sum (last_check criteria)\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=checksum --target=all -l DEBUG -L rh_migr.log ||\n        error \"running checksum\"\n    init=$(grep_matched_rule rh_migr.log checksum initial_check | wc -l)\n    rematch=$(grep_matched_rule rh_migr.log checksum default | wc -l)\n    [[ $init == 0 && $rematch == 0 ]] || error \"No matching rule expected ($init, $rematch)\"\n\n    rm -f $RH_ROOT/file.1\n    echo \"sqdkqlsdk\" >> $RH_ROOT/file.2\n    touch $RH_ROOT/file.3\n\n    :> rh_migr.log\n    # rerun (changes occurred!)\n    sleep 5\n    echo \"New sum (last_check OK)\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=checksum --target=all -l DEBUG -L rh_migr.log ||\n        error \"running checksum\"\n    init=$(grep_matched_rule rh_migr.log checksum initial_check | wc -l)\n    rematch=$(grep_matched_rule rh_migr.log checksum default | wc -l)\n    [[ $init == 0 && $rematch == 3 ]] || error \"3 default rule expected ($init, $rematch)\"\n\n    for i in `seq 2 $nb_files`; do # was removed\n        [ \"$DEBUG\" = \"1\" ] && $REPORT  -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file.$i | grep checksum\n        status=$($REPORT  -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file.$i | grep checksum\\.status | awk '{print $(NF)}')\n        [ \"$status\" = \"ok\" ] || error \"Unexpected status '$status' for $RH_ROOT/file.$i (ok expected)\"\n    done\n}\n\nfunction test_action_check\n{\n    # Test the check of outstanding actions\n    local config_file=$1\n    local FCOUNT=50\n    local ACT_TIMEO=4\n\n    if (( $is_lhsm == 0 )); then\n        echo \"No asynchronous archive for this purpose: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    echo \"Create Files ...\"\n    for i in `seq 1 $FCOUNT` ; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    echo \"Start asynchonous actions...\"\n    local t0=$(date +%s)\n    $RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=all -I -l DEBUG -L rh_migr.log\n\n    # check status of files in DB:\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info lhsm --csv -q | tee report.out\n\n    find_valueInCSVreport report.out archiving $FCOUNT 3 || error \"Invalid count of entries with status 'archiving'\"\n\n    :>rh_migr.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration -l VERB -L rh_migr.log &\n    local pid=$!\n\n    sleep 1\n\n    # is the check done?\n    grep -q \"Checking status of outstanding actions\" rh_migr.log ||\n        error \"No check of outstanding actions was performed\"\n\n    # early check of entries status (hopefully, no timeout reached yet? (3s))\n    local t1=$(date +%s)\n    local elapsed=$(($t1-$t0))\n    local nb_check=$(grep \"Updating status of\" rh_migr.log | wc -l)\n    if (( $elapsed < $ACT_TIMEO && $nb_check > 0 )); then\n        error \"No action check should be done after $elapsed sec < $ACT_TIMEO\"\n    else\n        echo \"Elapsed: $elapsed, nb_check=$nb_check\"\n    fi\n\n    # next check is after 10 sec\n    sleep 10\n\n    local run_check=$(grep \"Checking status of outstanding actions\" rh_migr.log | wc -l)\n    (( $run_check == 2 )) || error \"No 2nd check was done after 10 sec\"\n\n    t1=$(date +%s)\n    elapsed=$(($t1-$t0))\n    nb_check=$(grep \"Updating status of\" rh_migr.log | wc -l)\n    local nb_sync=$(grep \"changed: now 'synchro'\" rh_migr.log | wc -l)\n    if (( $nb_check != $FCOUNT )); then\n        error \"All actions should have been checked now (elapsed: $elapsed, nb_check=$nb_check)\"\n    else\n        echo \"Elapsed: $elapsed, nb_check=$nb_check, $nb_sync changed to 'synchro'\"\n    fi\n\n    # wait for all files to be synchro\n    if (( $nb_sync < $FCOUNT )); then\n        # once all actions are finished, check entry status changed accordingly\n        (( $is_lhsm != 0 )) && wait_done 30\n\n        # wait for next status check\n        local t2=$(date +%s)\n        (( $t2-$t1 < 10)) && sleep $((10 - ($t2-$t1)))\n        nb_sync=$(grep \"changed: now 'synchro'\" rh_migr.log | wc -l)\n        echo \"$nb_sync entries/$FCOUNT changed to status 'synchro'\"\n        (( $nb_sync != $FCOUNT )) && error \"All entries status should have been set to 'synchro'\"\n    fi\n\n    # double-check in report\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info lhsm --csv -q | tee report.out\n    find_valueInCSVreport report.out synchro $FCOUNT 3 || error \"Invalid count of entries with status 'synchro'\"\n\n    kill -9 $pid\n}\n\n\nfunction test_cnt_trigger\n{\n\tconfig_file=$1\n\tfile_count=$2\n\texpected_purge_count=$3\n\tpolicy_str=\"$4\"\n\n\tclean_logs\n    wait_stable_df\n\n\tif (( $is_hsmlite != 0 )); then\n        # this mode may create an extra inode in filesystem: initial scan\n        # to take it into account\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l MAJOR -L rh_scan.log \\\n            2>/dev/null || error \"executing $CMD --scan\"\n\t\tcheck_db_error rh_scan.log\n    fi\n\n\t# initial inode count\n\tempty_count=$(inode_usage)\n    export high_cnt=$(($file_count + $empty_count))\n    export low_cnt=$(($high_cnt - $expected_purge_count))\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"Initial inode count $empty_count, creating additional $file_count files\"\n\n\t#create test tree of archived files (1M each)\n\tfor i in `seq 1 $file_count`; do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null \\\n            2>/dev/null || error \"writing $RH_ROOT/file.$i\"\n\n\t\tif (( $is_lhsm != 0 )); then\n\t\t\t$LFS hsm_archive $RH_ROOT/file.$i\n\t\tfi\n\tdone\n\n\tif (( $is_lhsm != 0 )); then\n\t\twait_done 60 || error \"Copy timeout\"\n\tfi\n\n\t# wait for df to reflect all created files\n    wait_high_inodes $high_cnt\n    wait_stable_df\n\n\tif (( $is_hsmlite != 0 )); then\n        # scan and sync\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG \\\n            -L rh_migr.log 2>/dev/null || error \"executing $CMD --sync\"\n\t\tcheck_db_error rh_migr.log\n    else\n       \t# scan\n\t    \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG \\\n            -L rh_chglogs.log 2>/dev/null || error \"executing $CMD --scan\"\n\t\tcheck_db_error rh_chglogs.log\n    fi\n\n    df -i $RH_ROOT\n\n\t# apply purge trigger\n\t$RH -f $RBH_CFG_DIR/$config_file --run=purge --once -l FULL -L rh_purge.log\n\n\tnb_release=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\n\tif (($nb_release == $expected_purge_count)); then\n\t\techo \"OK: $nb_release files released\"\n\telse\n\t\terror \": $nb_release files released, $expected_purge_count expected\"\n\tfi\n}\n\nfunction test_cntpct_trigger\n{\n    config_file=$1\n    file_count=$2\n    dummy=$3\n    policy_str=\"$4\"\n\n    clean_logs\n\n    total_count=`df -i $RH_ROOT/ | grep \"$RH_ROOT\" | xargs |\n            awk '{print $(NF-4)}'`\n    init_pct=`df -i $RH_ROOT/ | grep \"$RH_ROOT\" | xargs |\n            awk '{print $(NF-1)}'| sed -e 's/%//'`\n    one_pct=$(($total_count/100))\n\n    [ \"$DEBUG\" = \"1\" ] &&\n        echo \"Initial percentage $init_pct, creating additional $one_pct files\"\n\n    # create additional 2% of inodes\n    #create test files\n    for i in `seq 1 $((2*$one_pct))`; do\n        touch $RH_ROOT/file.$i || error \"creating $RH_ROOT/file.$i\"\n    done\n\n    # wait df to report at least the number of created files\n    wait_high_inodes $((2*$one_pct))\n\n    # new pct should be at least +1\n    new_pct=`df -i $RH_ROOT/ | grep \"$RH_ROOT\" | xargs | awk '{print $(NF-1)}' |\n            sed -e 's/%//'`\n\n    (($new_pct > $init_pct)) ||\n        error \"New inode percentage $new_pct should be > $init_pct\"\n\n    # export high/low threshold count to config file\n    export high_pct=\"$init_pct%\"\n    export low_pct=\"$init_pct%\"\n    export trig_type=global_usage\n\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log \\\n        2>/dev/null || error \"executing $CMD --scan\"\n    check_db_error rh_chglogs.log\n\n    # apply purge trigger\n    $RH -f $RBH_CFG_DIR/$config_file --run=cleanup --once -l FULL \\\n        -L rh_purge.log\n\n    nb_clean=`grep \"cleanup success for\" rh_purge.log | wc -l`\n\n    echo \"$nb_clean files deleted\"\n\n    # clean file must be > 1%\n    (( $nb_clean > $one_pct )) || error \"At least $one_pct should have been cleaned\"\n\n    # cleaned files must be <= 2%\n    (( $nb_clean <= 2*$one_pct )) || error \"Max $((2*$one_pct)) should have been cleaned\"\n}\n\n\nfunction test_cntpct_ost_trigger\n{\n    config_file=$1\n    file_count=$2\n    dummy=$3\n    policy_str=\"$4\"\n\n    clean_logs\n\n    if [ -n \"$POSIX_MODE\" ]; then\n      echo \"Lustre-only test\"\n      set_skipped\n      return 1\n    fi\n\n    wait_stable_df\n    total_count_ost=`lfs df -i $RH_ROOT/ | grep \"OST:\" | head -n 1 | xargs |\n            awk '{print $(NF-4)}'`\n    init_pct=`lfs df -i $RH_ROOT/ | grep \"OST:\" | head -n 1 | xargs |\n            awk '{print $(NF-1)}'| sed -e 's/%//'`\n    one_pct=$(($total_count_ost/100))\n\n    echo \"Initial OST usage percentage $init_pct\"\n\n    ost_count=$(lfs df -i $RH_ROOT/ | grep \"OST:\" | wc -l)\n\n    created=0\n    for i in $(seq 1 $ost_count); do\n      ost=$(($i -1))\n      echo \"Creating additional $(($i*$one_pct)) files on OST$ost\"\n\n      # create additional 1% of inodes * ost rank\n      # create test files\n      for i in `seq 1 $(($i*$one_pct))`; do\n          lfs setstripe -c 1 -i $ost $RH_ROOT/file.$ost.$i ||\n          error \"creating $RH_ROOT/file.$i\"\n          ((created++))\n      done\n    done\n\n    # wait df to report at least the number of newly created files\n    echo \"Waiting for df to report $created inodes...\"\n    wait_high_inodes $created\n\n    for i in $(seq 1 $ost_count); do\n        ost=$(($i -1))\n\n        # new pct should be at least +1*ost_rank\n        new_pct=`lfs df -i $RH_ROOT/ | grep \"OST:$ost\" | head -n 1 | xargs |\n            awk '{print $(NF-1)}' | sed -e 's/%//'`\n\n        (($new_pct > $init_pct+$ost)) ||\n            error \"New inode pourcentage $new_pct should be > $(($init_pct+$ost))\"\n    done\n\n    # export high/low threshold count to config file\n    export high_pct=\"$init_pct%\"\n    export low_pct=\"$init_pct%\"\n    export trig_type=ost_usage\n\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log \\\n        2>/dev/null || error \"executing $CMD --scan\"\n    check_db_error rh_chglogs.log\n\n    # apply purge trigger\n    $RH -f $RBH_CFG_DIR/$config_file --run=cleanup --once -l FULL \\\n        -L rh_purge.log\n\n    # checking triggers:\n    # - fullest OST first\n    # - OST0: 1 <= purged < one_pct\n    # - OST1: one_pct <= purged < 2*one_pct\n    # - OST2: 2*one_pct <= purged < 3*one_pct\n    # - OST3: 3*one_pct <= purged < 4*one_pct\n\n    sequence=$(grep \"Policy run summary\" rh_purge.log | cut -d '|' -f 2 |\n        awk '{print $5}' | xargs | sed -e 's/target=OST#//g' | tr -d ';')\n\n    if [ \"$sequence\" = \"3 2 1 0\" ]; then\n        echo \"OK: right purge sequence $sequence\"\n    else\n        error \"Invalid purge sequence: should be fullest first\"\n    fi\n\n    for i in $(seq 1 $ost_count); do\n        ost=$(($i -1))\n\n        nb_clean=$(grep \"Policy run summary\" rh_purge.log | grep \"target=OST#$ost;\" |\n            cut -d '|' -f 2 | awk '{print $6}')\n\n        [ \"$DEBUG\" = \"1\" ] && echo \"OST $ost: $nb_clean files cleaned\"\n\n        (( $nb_clean > $ost*$one_pct )) || error \"At least $(($ost*$one_pct+1)) should have been cleaned\"\n        (( $nb_clean <= $i*$one_pct )) || error \"Max $(($i*$one_pct)) should have been cleaned\"\n    done\n}\n\n\n\nfunction test_ost_trigger\n{\n\tconfig_file=$1\n\tmb_h_threshold=$2\n\tmb_l_threshold=$3\n\tpolicy_str=\"$4\"\n\n    export ost_high_vol=\"${mb_h_threshold}MB\"\n    export ost_low_vol=\"${mb_l_threshold}MB\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No OST support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n    wait_low_OST_usage \"OST0000\" 10\n\n\tempty_vol=`$LFS df $RH_ROOT | grep OST0000 | awk '{print $3}'`\n\tempty_vol=$(($empty_vol/1024))\n\n    if (($empty_vol >= $mb_h_threshold)); then\n        error \"OST0000 IS ALREADY OVER HIGH THRESHOLD $empty_vol vs. $mb_h_threshold : cannot run test\"\n        return 1\n    fi\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"empty_vol OST0000: $empty_vol MB, HW: $mb_h_threshold MB\"\n\n\t$LFS setstripe -c 2 -i 0 $LFS_SS_SZ_OPT 1m $RH_ROOT || echo \"error setting stripe_count=2\"\n\n\t#create test tree of archived files (2M each=1MB/ost) until we reach high threshold\n    vol=$empty_vol\n    i=0\n    while (( vol < mb_h_threshold )); do\n\t\tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=2 >/dev/null 2>&1 ||\n            error \"writing $RH_ROOT/file.$i\"\n\n\t\tif (( $is_lhsm != 0 )); then\n\t\t\tflush_data\n\t\t\t$LFS hsm_archive $RH_ROOT/file.$i\n\t\tfi\n        vol=$($LFS df $RH_ROOT | grep OST0000 | awk '{print $3}')\n        vol=$(( vol / 1024 ))\n        i=$(( i + 1 ))\n\tdone\n\tcount=$i\n\tif (( $is_lhsm != 0 )); then\n\t\twait_done 60 || error \"Copy timeout\"\n\tfi\n    # df and lfs df have a latency\n    # wait the filesystem to report at least the number of created files\n    wait_high_inodes $count\n\n\tif (( $is_hsmlite != 0 )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing $CMD --sync\"\n    fi\n\n\tif (( $is_lhsm != 0 )); then\n\t\tarch_count=`$LFS hsm_state $RH_ROOT/file.* | grep \"exists archived\" | wc -l`\n\t\t(( $arch_count == $count )) || error \"File count $count != archived count $arch_count\"\n\tfi\n\n    wait_stable_df\n   \tfull_vol=$($LFS df $RH_ROOT | grep OST0000 | awk '{print $3}')\n\tfull_vol=$(($full_vol/1024))\n\tdelta=$(($full_vol-$empty_vol))\n\techo \"OST#0 usage increased of $delta MB (total usage = $full_vol MB)\"\n\t((need_purge=$full_vol-$mb_l_threshold))\n\techo \"Need to purge $need_purge MB on OST#0\"\n\n\t# scan\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n\tcheck_db_error rh_chglogs.log\n\n\t$REPORT -f $RBH_CFG_DIR/$config_file -i\n\n\t# apply purge trigger\n\t$RH -f $RBH_CFG_DIR/$config_file --run=purge --once -l DEBUG -L rh_purge.log || error \"applying purge policy\"\n\n\tgrep summary rh_purge.log || error \"No purge was done\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_purge.log\n\n    # Retrieve the size purged\n    # \"2015/02/18 12:09:03 [5536/4] purge | Policy run summary: time=01s; target=OST#0; 42 successful actions (42.00/sec); volume: 84.00 MB (84.00 MB/sec); 0 entries skipped; 0 errors.\"\n    purged_total=`grep summary rh_purge.log | grep \"OST#0;\" | awk '{print $(NF-8)}' | sed -e \"s/\\.[0-9]\\+//g\"`\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"total_purged=$purged_total\"\n\n\t# checks\n    (( $purged_total > $need_purge )) ||\n        error \": invalid amount of data purged ($purged_total <= $need_purge)\"\n    (( $purged_total <= 2*($need_purge + 1) )) ||\n        error \": invalid amount of data purged ($purged_total > 2*($need_purge + 1)\"\n\n    # Check that RH knows all OST are now below the high threshold.\n    grep \"Top OSTs are all under high threshold\" rh_purge.log || error \"An OST is still above high threshold\"\n\n    # sync df values before checking df return\n    wait_stable_df\n\n\tfull_vol1=`$LFS df $RH_ROOT | grep OST0001 | awk '{print $3}'`\n\tfull_vol1=$(($full_vol1/1024))\n\tpurge_ost1=`grep summary rh_purge.log | grep \"OST#1\" | wc -l`\n\n\tif (($full_vol1 > $mb_h_threshold )); then\n\t\terror \": OST#1 is not expected to exceed high threshold!\"\n\telif (($purge_ost1 != 0)); then\n\t\terror \": no purge expected on OST#1\"\n\telse\n\t\techo \"OK: no purge on OST#1 (usage=$full_vol1 MB)\"\n\tfi\n\n\t# restore default striping\n\t$LFS setstripe -c 2 -i -1 $RH_ROOT\n}\n\nfunction test_ost_order\n{\n\tconfig_file=$1\n\tpolicy_str=\"$2\"\n\tclean_logs\n\n    if [ -n \"$POSIX_MODE\" ]; then\n\t\techo \"No OST support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    # reset df values\n    wait_stable_df\n\n    # nb OSTs?\n    nbost=`$LFS df $RH_ROOT | grep OST | wc -l`\n    maxidx=$((nbost -1))\n\n    # get low watermark = max current OST usage\n    local min_kb=0\n    for i in $(seq 0 $maxidx); do\n    \tempty_vol=`$LFS df $RH_ROOT | grep OST000$i | awk '{print $3}'`\n        (( $empty_vol > $min_kb )) && min_kb=$empty_vol\n    done\n\n    export ost_low_vol=\"${min_kb}KB\"\n    local trig_kb=$(($min_kb + 1024 )) # low thresh. +1MB\n    export ost_high_vol=\"${trig_kb}KB\"\n\n    [ \"$DEBUG\" = \"1\" ] && $LFS df $RH_ROOT\n    echo \"setting low threshold = $ost_low_vol, high_threshold = $ost_high_vol\"\n\n    # create nothing on OST0000 (should not be purged)\n    # ensure OST1 usage is trig_kb + 1M\n    # ensure OST2 usage is trig_kb + 2M\n    # etc...\n    for i in $(seq 1 $maxidx); do\n        vol=`$LFS df $RH_ROOT | grep OST000$i | awk '{print $3}'`\n        nbkb=$(($trig_kb + 1024*$i - $vol))\n        nbmb=$(($nbkb/1024+1))\n        for f in $(seq 1 $nbmb); do\n            $LFS setstripe -c 1 -i $i $RH_ROOT/test_ost_order.ost_$i.$f || error \"lfs setstripe\"\n            dd if=/dev/zero of=$RH_ROOT/test_ost_order.ost_$i.$f bs=1M count=$nbmb || error \"dd\"\n        done\n    done\n\n    wait_stable_df\n\n    # check thresholds only, then purge\n    for opt in \"--check-thresholds=purge\" \"--run=purge\"; do\n        :> rh_purge.log\n        $RH -f $RBH_CFG_DIR/$config_file $opt --once -l DEBUG -L rh_purge.log || error \"command $opt error\"\n        [ \"$DEBUG\" = \"1\" ] && cat rh_purge.log\n\n        # OSTs != 0 should be stated from the higher index to the lower\n        lastline=0\n        for i in $(seq 1 $maxidx); do\n            grep \"High threshold reached on OST #$i\" rh_purge.log || error \"OST #$i should be reported over high threshold\"\n            line=$(grep -n \"High threshold reached on OST #$i\" rh_purge.log | cut -d ':' -f 1)\n            if (( $lastline > 0 && $line > $lastline )); then\n                error \"OST #$i: a lower OST idx has been reported in a previous line $lastline\"\n            else\n                last_line=$line\n            fi\n        done\n\n        # OST0 should not be reported\n        grep \"High threshold reached on OST #0\" rh_purge.log && error \"OST #0 should not be reported over threshold\"\n    done\n\n}\n\n# check output of rbh-find -printf \"%y %p\\t%RP\\n\"\n# arg1: log file\n# arg2: path\n# arg3: expected projid\nfunction check_projid\n{\n    local log=\"$1\"\n    local path=\"$2\"\n    local projid=\"$3\"\n\n    grep \"\\s$path\\s\" $log | grep -qe \"\\s$projid$\" || error \"$path should have projid=$projid\"\n}\n\nfunction projectid_test_find\n{\n    config_file=$1\n    policy_str=\"$2\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No project id support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    $LFS project -p 0 -d $RH_ROOT\n    $LFS project -C -r $RH_ROOT\n\n    # create the test tree:\n    #    3 P /mnt/lustre/B\n    #    2 - /mnt/lustre/A\n    #    3 P /mnt/lustre/B/2\n    #    3 P /mnt/lustre/B/1\n    #    3 P /mnt/lustre/B/3\n    #    0 - /mnt/lustre/A/2\n    #    0 - /mnt/lustre/A/1\n    #    0 - /mnt/lustre/A/3\n\n    mkdir $RH_ROOT/A\n    mkdir $RH_ROOT/B\n    lfs project -p 1 -d $RH_ROOT/.\n    lfs project -p 2 -r $RH_ROOT/A\n    touch $RH_ROOT/A/{1..3}\n    lfs project -p 3 -rs $RH_ROOT/B\n    touch $RH_ROOT/B/{1..3}\n\n    [ \"$DEBUG\" = \"1\" ] && lfs project -r \"$RH_ROOT\"\n\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"running initial scan\"\n    check_db_error rh_chglogs.log\n\n    # test find (no filtering)\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" | sort > tmp1\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" -nobulk | sort > tmp2\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    diff tmp1 tmp2 || error \"output with nobulk should not differ\"\n    check_projid tmp1 /mnt/lustre     1\n    check_projid tmp1 /mnt/lustre/A   2\n    check_projid tmp1 /mnt/lustre/A/1 0\n    check_projid tmp1 /mnt/lustre/A/2 0\n    check_projid tmp1 /mnt/lustre/A/3 0\n    check_projid tmp1 /mnt/lustre/B   3\n    check_projid tmp1 /mnt/lustre/B/1 3\n    check_projid tmp1 /mnt/lustre/B/2 3\n    check_projid tmp1 /mnt/lustre/B/3 3\n\n    # test find (with filtering)\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" -projid 1 | sort > tmp1\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" -projid 1 -nobulk | sort > tmp2\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    diff tmp1 tmp2 || error \"output with nobulk should not differ\"\n    nb1=$(cat tmp1 | wc -l )\n    (($nb1 == 1)) || error \"1 element expected matching -projid 1\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" -projid 3 | sort > tmp1\n    $FIND -f $RBH_CFG_DIR/$config_file -printf \"%y %p\\t%RP\\n\" -projid 3 -nobulk | sort > tmp2\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    diff tmp1 tmp2 || error \"output with nobulk should not differ\"\n    nb3=$(cat tmp1 | wc -l )\n    (($nb3 == 4)) || error \"4 elements expected matching -projid 3\"\n\n    # test find (filtering with no display)\n    $FIND -f $RBH_CFG_DIR/$config_file -projid 2 -ls > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    nb2=$(cat tmp1 | wc -l )\n    (($nb2 == 1)) || error \"1 element expected matching -projid 2\"\n\n    # clean files\n    rm -f tmp1 tmp2\n}\n\nfunction projectid_test_report\n{\n    config_file=$1\n    policy_str=\"$2\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No project id support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\nif [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n    testuser_str=$(getent passwd testuser | cut -d: -f3)\n    testgroup_str=$(getent group testgroup | cut -d: -f3)\n    root_str=0\nelse\n    testuser_str=testuser\n    testgroup_str=testgroup\n    root_str=root\nfi\n\n    # create the test tree\n    #    1 P /mnt/lustre/A\t0:0\n    #    1 P /mnt/lustre/A/1\t0:tg\n    #    1 P /mnt/lustre/A/2\ttu:0\n    #    1 P /mnt/lustre/A/3\ttu:tg\n    #    2 P /mnt/lustre/B\t0:tg\n    #    2 P /mnt/lustre/B/1\ttu:0\n    #    2 P /mnt/lustre/B/2\ttu:tg\n    #    2 P /mnt/lustre/B/3\t0:0\n\n    $LFS project -p 0 -d $RH_ROOT\n    $LFS project -C -r $RH_ROOT\n\n    mkdir $RH_ROOT/A\n    mkdir $RH_ROOT/B\n    lfs project -p 1 -rs $RH_ROOT/A\n    touch $RH_ROOT/A/{1..3}\n    dd if=/dev/zero of=$RH_ROOT/A/1 bs=1M count=1\n    lfs project -p 2 -rs $RH_ROOT/B\n    touch $RH_ROOT/B/{1..3}\n    dd if=/dev/zero of=$RH_ROOT/B/1 bs=1M count=2\n\n    chown root:root \t $RH_ROOT/A\n    chown root:testgroup $RH_ROOT/A/1\n    chown testuser:root  $RH_ROOT/A/2\n    chown testuser:testgroup  $RH_ROOT/A/3\n    chown root:testgroup $RH_ROOT/B\n    chown testuser:root  $RH_ROOT/B/1\n    chown testuser:testgroup  $RH_ROOT/B/2\n    chown root:root \t $RH_ROOT/B/3\n\n    [ \"$DEBUG\" = \"1\" ] && lfs project -r \"$RH_ROOT\"\n\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"running initial scan\"\n    check_db_error rh_chglogs.log\n\n    # check report with filter\n    $REPORT -f $RBH_CFG_DIR/$config_file -q --dump --filter-project 1 | awk '{print $NF}' > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    nbA=$(grep \"$RH_ROOT/A\" tmp1 | wc -l)\n    nbB=$(grep \"$RH_ROOT/B\" tmp1 | wc -l)\n    (($nbA == 4)) || error \"Invalid number of entries in $RH_ROOT/A matching projid 1: $nbA\"\n    (($nbB == 0)) || error \"Invalid number of entries in $RH_ROOT/B matching projid 1: $nbB\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -q --dump --filter-project 2 | awk '{print $NF}' > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    nbA=$(grep \"$RH_ROOT/A\" tmp1 | wc -l)\n    nbB=$(grep \"$RH_ROOT/B\" tmp1 | wc -l)\n    (($nbA == 0)) || error \"Invalid number of entries in $RH_ROOT/A matching projid 2: $nbA\"\n    (($nbB == 4)) || error \"Invalid number of entries in $RH_ROOT/B matching projid 2: $nbB\"\n\n    # checking report split\n    # 1) no split, just filter\n    $REPORT -f $RBH_CFG_DIR/$config_file -q --csv -u root --filter-project 2 > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    # -> B and B/3 = 1 dir, 1 file\n    find_valueInCSVreport tmp1 dir  1 3 || error \"invalid count for dir\"\n    find_valueInCSVreport tmp1 file 1 3 || error \"invalid count for file\"\n\n    # 2) split per projid\n    $REPORT -f $RBH_CFG_DIR/$config_file --csv -q -u root -J > tmp1\n    # -> p=1: A A/1 ; p=2 B B/3\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    nb=$(grep \"1,\\s*dir,\" tmp1 | cut -d ',' -f 4 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for dir of project 1\"\n    nb=$(grep \"1,\\s*file\" tmp1 | cut -d ',' -f 4 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for file of project 1\"\n    nb=$(grep \"2,\\s*dir,\" tmp1 | cut -d ',' -f 4 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for dir of project 2\"\n    nb=$(grep \"2,\\s*file,\" tmp1 | cut -d ',' -f 4 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for file of project 2\"\n\n    # 2) split per group+projid\n    #    1 P /mnt/lustre/A\t0:0\n    #    1 P /mnt/lustre/A/1\t0:tg\n    #    2 P /mnt/lustre/B\t0:tg\n    #    2 P /mnt/lustre/B/3\t0:0\n    #\n    # => 1 testgroup in each project (p1: 1 file, p2: 1 dir)\n    # => 1 root group in each project (p1: 1 dir, p2: 1 file)\n#      user,      group,     projid,     type,      count,...\n#      root,       root,          1,      dir,          1,...\n#      root,       root,          2,     file,          1,...\n#      root,  testgroup,          1,     file,          1,...\n#      root,  testgroup,          2,      dir,          1,...\n\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --csv -q -u root -SJ > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    nb=$(grep \"root,\\s*1,\\s*dir,\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for dir of project 1 and group root: $nb\"\n    nb=$(grep \"root,\\s*1,\\s*file\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    [[ -z \"$nb\" ]] || error \"invalid count for file of project 1 and group root\"\n    nb=$(grep \"root,\\s*2,\\s*dir,\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    [[ -z \"$nb\" ]] || error \"invalid count for dir of project 2 and group root\"\n    nb=$(grep \"root,\\s*2,\\s*file\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for file of project 2 and group root\"\n\n    nb=$(grep \"testgroup,\\s*1,\\s*dir,\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    [[ -z \"$nb\" ]] || error \"invalid count for dir of project 1 and group testgroup\"\n    nb=$(grep \"testgroup,\\s*1,\\s*file\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for file of project 1 and group testgroup\"\n    nb=$(grep \"testgroup,\\s*2,\\s*dir,\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    (($nb == 1)) || error \"invalid count for dir of project 2 and group testgroup\"\n    nb=$(grep \"testgroup,\\s*2,\\s*file\" tmp1 | cut -d ',' -f 5 | tr -d ' ')\n    [[ -z \"$nb\" ]] || error \"invalid count for file of project 2 and group testgroup\"\n\n    # group by projid\n    $REPORT -f $RBH_CFG_DIR/$config_file --csv -q --project-info > tmp1\n    [ \"$DEBUG\" = \"1\" ] && cat tmp1 && echo \"---\"\n    # proj1=1M, proj2=2M\n    nb=$(grep \"^\\s*1,\\s*\" tmp1 | cut -d ',' -f 3 | tr -d ' ')\n    (( $nb > 1000000 && $nb < 1200000 )) || error \"1 MB expected in proj 1: got $nb\"\n    nb=$(grep \"^\\s*2,\\s*\" tmp1 | cut -d ',' -f 3 | tr -d ' ')\n    (( $nb > 2000000 && $nb < 2200000 )) || error \"2 MB expected in proj 2: got $nb\"\n\n    rm -f tmp1\n}\n\nfunction projectid_test_chglog\n{\n    config_file=$1\n    policy_str=\"$2\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No project id support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    # create the test tree:\n    $LFS project -p 0 -d $RH_ROOT\n    $LFS project -C -r $RH_ROOT\n\n    mkdir $RH_ROOT/A\n    touch $RH_ROOT/A/{1..3}\n    mkdir $RH_ROOT/B\n    touch $RH_ROOT/B/{1..3}\n\n    [ \"$DEBUG\" = \"1\" ] && lfs project -r \"$RH_ROOT\"\n\n    # scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"running initial scan\"\n    check_db_error rh_chglogs.log\n\n    $LFS changelog_clear lustre-MDT0000 cl1 0\n\n    # initial check\n    nb=$($REPORT -f $RBH_CFG_DIR/$config_file -q --dump --filter-project=0 | wc -l) # <= all in here\n    (( $nb == 8 )) || error \"8 entries expected with projid=0\"\n    nb=$($REPORT -f $RBH_CFG_DIR/$config_file -q --dump --filter-project=1 | wc -l) # none expected\n    (( $nb == 0 )) || error \"no entry expected with projid=1\"\n\n    # modify project of A\n    $LFS project -p 1 -d $RH_ROOT/A\n    [ \"$DEBUG\" = \"1\" ] && $LFS changelog lustre-MDT0000\n\n    # read the changelog\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading changelog\"\n    check_db_error rh_chglogs.log\n\n    # now they should be 1 dir in project 1\n    $REPORT -f $RBH_CFG_DIR/$config_file -q -i --filter-project=1 > tmp1\n    find_valueInCSVreport tmp1 dir  1 2 || error \"invalid count for dir\"\n\n    # modify project B recursively\n    $LFS project -p 2 -rs $RH_ROOT/B\n    [ \"$DEBUG\" = \"1\" ] && $LFS changelog lustre-MDT0000\n\n    # read the changelog\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading changelog\"\n    check_db_error rh_chglogs.log\n\n    # now they should be 1 dir and 3 files in project 2\n    $REPORT -f $RBH_CFG_DIR/$config_file -q -i --filter-project=2 > tmp1\n    find_valueInCSVreport tmp1 dir  1 2 || error \"invalid count for dir\"\n    find_valueInCSVreport tmp1 file 3 2 || error \"invalid count for file\"\n\n    # create new file in A: no change expected\n    touch $RH_ROOT/A/new\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading changelog\"\n    check_db_error rh_chglogs.log\n    nb=$($REPORT -f $RBH_CFG_DIR/$config_file -q -i --filter-project=1 | grep file)\n    [ -z \"$nb\" ] || error \"No file expected in project 1\"\n\n    # create new file and dir in B: new object expected\n    mkdir $RH_ROOT/B/new1\n    touch $RH_ROOT/B/new1/new2\n    touch $RH_ROOT/B/new2\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_chglogs.log || error \"reading changelog\"\n    check_db_error rh_chglogs.log\n    # now they should be 2 dir and 5 files in project 2\n    $REPORT -f $RBH_CFG_DIR/$config_file -q -i --filter-project=2 > tmp1\n    find_valueInCSVreport tmp1 dir  2 2 || error \"invalid count for dir\"\n    find_valueInCSVreport tmp1 file 5 2 || error \"invalid count for file\"\n\n    rm -f tmp1\n}\n\nfunction projectid_test_run\n{\n    config_file=$1\n    policy_str=\"$2\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No project id support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    # initialize root projectid to 0\n    $LFS project -p 0 -d $RH_ROOT\n    $LFS project -C -r $RH_ROOT\n\n    # create 6 files\n    for i in {1..6}; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    $LFS project -p 1 $RH_ROOT/file.1\n    $LFS project -p 2 $RH_ROOT/file.2\n    $LFS project -p 2 $RH_ROOT/file.3\n    $LFS project -p 3 $RH_ROOT/file.4\n    $LFS project -p 3 $RH_ROOT/file.5\n    $LFS project -p 3 $RH_ROOT/file.6\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    # flush data for HSM flavors\n    if (( ($is_hsmlite != 0) || ($is_lhsm != 0) )); then\n        echo \"Archiving files\"\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"archiving files\"\n\n        if (( $is_lhsm != 0 )); then\n            echo \"Waiting for end of data migration...\"\n            wait_done 60\n\n            # archive is asynchronous: read changelog to get the archive status\n            $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n        fi\n    fi\n\n    # md_update for purge must be > previous md updates\n    sleep 1\n\n    [ \"$DEBUG\" = \"1\" ] && $FIND -f $RBH_CFG_DIR/$config_file -type f -printf \"%y %p\\t%RP\\n\" | sort\n\n    :>rh_purge.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=purge -I --target=projid:1 --once -l DEBUG -L rh_purge.log || error \"purging files\"\n    nb=$(grep \"$REL_STR\" rh_purge.log | wc -l)\n    (($nb == 1)) || error \"Unexpected number of purged files for projid 1: $nb\"\n\n    :>rh_purge.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=purge -I --target=projid:2 --once -l DEBUG -L rh_purge.log || error \"purging files\"\n    nb=$(grep \"$REL_STR\" rh_purge.log | wc -l)\n    (($nb == 2)) || error \"Unexpected number of purged files for projid 2: $nb\"\n\n    :>rh_purge.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=purge -I --target=projid:3 --once -l DEBUG -L rh_purge.log || error \"purging files\"\n    nb=$(grep \"$REL_STR\" rh_purge.log | wc -l)\n    (($nb == 3)) || error \"Unexpected number of purged files for projid 3: $nb\"\n}\n\n\nfunction test_trigger_check\n{\n    config_file=$1\n    max_count=$2\n    max_vol_mb=$3\n    policy_str=\"$4\"\n    target_count=$5\n    target_fs_vol=$6\n    target_user_vol=$7\n    max_user_vol=$8\n    target_user_count=$9\n\n    clean_logs\n    wait_stable_df\n\n    if (( $is_hsmlite != 0 )); then\n        # this mode may create an extra inode in filesystem: initial scan\n        # to take it into account\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l MAJOR -L rh_scan.log || error \"executing $CMD --scan\"\n        check_db_error rh_scan.log\n    fi\n\n    # triggers to be checked\n    # - inode count > max_count\n    # - fs volume    > max_vol\n    # - root quota  > user_quota\n\n    # initial inode count\n    empty_count=`df -i $RH_ROOT/ | xargs | awk '{print $(NF-3)}'`\n    empty_count_user=0\n\n#    ((file_count=$max_count-$empty_count))\n    file_count=$max_count\n\n    # compute file size to exceed max vol and user quota\n    empty_vol=`df -k $RH_ROOT  | xargs | awk '{print $(NF-3)}'`\n    ((empty_vol=$empty_vol/1024))\n\n    if (( $empty_vol < $max_vol_mb )); then\n        ((missing_mb=$max_vol_mb-$empty_vol))\n    else\n        missing_mb=0\n    fi\n\n    if (($missing_mb < $max_user_vol )); then\n        missing_mb=$max_user_vol\n    fi\n\n    # file_size = missing_mb/file_count + 1\n    ((file_size=$missing_mb/$file_count + 1 ))\n\n    echo \"$file_count files missing, $file_size MB each\"\n\n    #create test tree of archived files (file_size MB each)\n    for i in `seq 1 $file_count`; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=$file_size  >/dev/null 2>/dev/null || error \"writing $RH_ROOT/file.$i\"\n\n        if (( $is_lhsm != 0 )); then\n            flush_data\n            $LFS hsm_archive $RH_ROOT/file.$i\n        fi\n    done\n\n    if (( $is_lhsm != 0 )); then\n        wait_done 60 || error \"Copy timeout\"\n    fi\n\n    # wait at least for created files to be reported by df\n    wait_high_inodes $file_count\n    wait_stable_df\n\n    if (( $is_hsmlite != 0 )); then\n        # scan and sync\n        $RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing $CMD --sync\"\n        check_db_error rh_migr.log\n    else\n      # scan\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log\n          check_db_error rh_chglogs.log\n    fi\n\n    # check purge triggers\n    $RH -f $RBH_CFG_DIR/$config_file --check-thresholds=purge --once -l FULL -L rh_purge.log\n\n    ((expect_count=$empty_count+$file_count-$target_count+$extra_dir))\n    ((expect_vol_fs=$empty_vol+$file_count*$file_size-$target_fs_vol))\n    ((expect_vol_user=$file_count*$file_size-$target_user_vol))\n    ((expect_count_user=$file_count+$empty_count_user-$target_user_count+$extra_dir))\n\n    echo \"over trigger limits: $expect_count entries, $expect_vol_fs MB, $expect_vol_user MB for user root, $expect_count_user entries for user root\"\n\n    nb_release=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\n    count_trig=`grep \" entries must be processed in Filesystem\" rh_purge.log | cut -d '|' -f 2 | awk '{print $1}'`\n    [ -n \"$count_trig\" ] || count_trig=0\n\n    vol_fs_trig=`grep \" blocks (x512) must be processed on Filesystem\" rh_purge.log | cut -d '|' -f 2 | awk '{print $1}'`\n    [ -n \"$vol_fs_trig\" ] || vol_fs_trig=0\n    ((vol_fs_trig_mb=$vol_fs_trig/2048)) # /2048 == *512/1024/1024\n\n    vol_user_trig=`grep \" blocks (x512) must be processed for user\" rh_purge.log | cut -d '|' -f 2 | awk '{print $1}'`\n    [ -n \"$vol_user_trig_mb\" ] || vol_user_trig_mb=0\n    ((vol_user_trig_mb=$vol_user_trig/2048)) # /2048 == *512/1024/1024\n\n    cnt_user_trig=`grep \" files to be processed for user\" rh_purge.log | cut -d '|' -f 2 | awk '{print $1}'`\n    [ -n \"$cnt_user_trig\" ] || cnt_user_trig=0\n\n    echo \"triggers reported: $count_trig entries (global), $cnt_user_trig entries (user), $vol_fs_trig_mb MB (global), $vol_user_trig_mb MB (user)\"\n\n    # check then was no actual purge\n    if (($nb_release > 0)); then\n        error \": $nb_release files released, no purge expected\"\n    elif (( $count_trig != $expect_count )); then\n        error \": trigger reported $count_trig files over threshold, $expect_count expected\"\n    elif (( $vol_fs_trig_mb != $expect_vol_fs )); then\n        error \": trigger reported $vol_fs_trig_mb MB over threshold, $expect_vol_fs expected\"\n    elif (( $vol_user_trig_mb != $expect_vol_user )); then\n        error \": trigger reported $vol_user_trig_mb MB over threshold, $expect_vol_user expected\"\n        elif ((  $cnt_user_trig != $expect_count_user )); then\n                error \": trigger reported $cnt_user_trig files over threshold, $expect_count_user expected\"\n\n    else\n        echo \"OK: all checks successful\"\n    fi\n}\n\nfunction check_released\n{\n    if (($is_lhsm != 0)); then\n        $LFS hsm_state $1 | grep released || return 1\n    elif (($shook != 0 )); then\n        # check that nb blocks is 0\n        bl=`stat -c \"%b\" $1`\n        [ \"$DEBUG\" = \"1\" ] && echo \"$1: $bl blocks\"\n        [[ -n $bl ]] && (( $bl == 0 )) || return 1\n        # check that shook_state is \"released\"\n        st=`getfattr -n security.shook_state $1 --only-values 2>/dev/null`\n        [ \"$DEBUG\" = \"1\" ] && echo \"$1: status $st\"\n        [[ \"x$st\" = \"xreleased\" ]] || return 1\n    else\n        [ -f $1 ] && return 1\n    fi\n    return 0\n}\n\nfunction wait_run_count\n{\n    local log=$1\n    local cnt=$2\n    local timeo=$3\n\n    # wait for end of run\n    while (( $(grep \"End of current pass\" $log | wc -l) < $cnt )); do\n        echo \"waiting end of pass...\"\n        sleep 1\n        ((timeo=$timeo - 1))\n        (($timeo == 0)) && return 1\n        continue\n    done\n    return 0\n}\n\nfunction test_periodic_trigger\n{\n    config_file=$1\n    sleep_time=$2\n    policy_str=$3\n\n    clean_logs\n\n    echo \"1-Populating filesystem...\"\n    # create 3 files of each type\n    # (*.1, *.2, *.3, *.4)\n    for i in `seq 1 4`; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"$? writing $RH_ROOT/file.$i\"\n        dd if=/dev/zero of=$RH_ROOT/foo.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"$? writing $RH_ROOT/foo.$i\"\n        dd if=/dev/zero of=$RH_ROOT/bar.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"$? writing $RH_ROOT/bar.$i\"\n\n        flush_data\n        if (( $is_lhsm != 0 )); then\n            $LFS hsm_archive $RH_ROOT/file.$i $RH_ROOT/foo.$i $RH_ROOT/bar.$i\n        fi\n    done\n\n    if (( $is_lhsm != 0 )); then\n        wait_done 20 || error \"Copy timeout\"\n    fi\n\n    # ensure their access time is all the same\n    touch -a $RH_ROOT/file.{1..4}\n    touch -a $RH_ROOT/foo.{1..4}\n    touch -a $RH_ROOT/bar.{1..4}\n    t0=`date +%s`\n\n    # scan\n    echo \"2-Populating robinhood database (scan)...\"\n    if (( $is_hsmlite != 0 )); then\n        # scan and sync\n        $RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG \\\n            -L rh_migr.log 2>/dev/null || error \"executing $CMD --sync\"\n          check_db_error rh_migr.log\n    else\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log \\\n            2>/dev/null || error \"executing $CMD --scan\"\n          check_db_error rh_scan.log\n    fi\n\n    # make sure files *.1 are old enough\n    sleep 1\n\n    # start periodic trigger in background\n    echo \"3.1-checking trigger for first policy run...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=purge -l DEBUG -L rh_purge.log \\\n        2>/dev/null &\n    sleep 1\n\n    t1=`date +%s`\n    ((delta=$t1 - $t0))\n\n    clean_caches # blocks is cached\n    # it first must have purged *.1 files (not others)\n\n    wait_run_count rh_purge.log 1 60 || error \"pass timeout\"\n\n    # make sure the policy delay is not elapsed\n    check_released \"$RH_ROOT/file.1\" || error \"$RH_ROOT/file.1 should have been released after $delta s\"\n    check_released \"$RH_ROOT/foo.1\"  || error \"$RH_ROOT/foo.1 should have been released after $delta s\"\n    check_released \"$RH_ROOT/bar.1\"  || error \"$RH_ROOT/bar.1 should have been released after $delta s\"\n\n    if (( $delta <= 5 )); then\n        check_released \"$RH_ROOT/file.2\" && error \"$RH_ROOT/file.2 shouldn't have been released after $delta s\"\n        check_released \"$RH_ROOT/foo.2\"  && error \"$RH_ROOT/foo.2 shouldn't have been released after $delta s\"\n        check_released \"$RH_ROOT/bar.2\"  && error \"$RH_ROOT/bar.2 shouldn't have been released after $delta s\"\n    else\n        echo \"WARNING: more than 5s elapsed, check skipped\"\n    fi\n\n    ((sleep_time=$sleep_time-$delta))\n    sleep $(( $sleep_time + 2 ))\n    # now, *.2 must have been purged\n    echo \"3.2-checking trigger for second policy run...\"\n\n    wait_run_count rh_purge.log 2 60 || error \"pass timeout\"\n\n    t2=`date +%s`\n    ((delta=$t2 - $t0))\n\n    clean_caches # blocks is cached\n    check_released \"$RH_ROOT/file.2\" || error \"$RH_ROOT/file.2 should have been released after $delta s ($(date))\"\n    check_released \"$RH_ROOT/foo.2\" || error \"$RH_ROOT/foo.2 should have been released after $delta s ($(date))\"\n    check_released \"$RH_ROOT/bar.2\" || error \"$RH_ROOT/bar.2 should have been released after $delta s ($(date))\"\n\n    if (( $delta <= 10 )); then\n        check_released \"$RH_ROOT/file.3\" && error \"$RH_ROOT/file.3 shouldn't have been released after $delta s\"\n        check_released \"$RH_ROOT/foo.3\"  && error \"$RH_ROOT/foo.3 shouldn't have been released after $delta s\"\n        check_released \"$RH_ROOT/bar.3\" && error \"$RH_ROOT/bar.3 shouldn't have been released after $delta s\"\n    else\n        echo \"WARNING: more than 10s elapsed, check skipped\"\n    fi\n\n    # wait 4 more secs (so another purge policy is applied)\n    sleep 4\n    # now, it's *.3\n    # *.4 must be preserved\n    echo \"3.3-checking trigger for third policy...\"\n\n    wait_run_count rh_purge.log 3 60 || error \"pass timeout\"\n    t3=`date +%s`\n    ((delta=$t3 - $t0))\n\n    clean_caches # blocks is cached\n    check_released \"$RH_ROOT/file.3\" || error \"$RH_ROOT/file.3 should have been released after $delta s\"\n    check_released \"$RH_ROOT/foo.3\"  || error \"$RH_ROOT/foo.3 should have been released after $delta s\"\n    check_released \"$RH_ROOT/bar.3\"  || error \"$RH_ROOT/bar.3 should have been released after $delta s\"\n    check_released \"$RH_ROOT/file.4\" && error \"$RH_ROOT/file.4 shouldn't have been released after $delta s\"\n    check_released \"$RH_ROOT/foo.4\"  && error \"$RH_ROOT/foo.4 shouldn't have been released after $delta s\"\n    check_released \"$RH_ROOT/bar.4\"  && error \"$RH_ROOT/bar.4 shouldn't have been released after $delta s\"\n\n    # final check: 3x \"Policy run summary: [...] 3 successful actions\"\n    nb_pass=$(grep -c \"Checking trigger \" rh_purge.log)\n    # trig count should be (elapsed/period) +/- 1\n    min_trig=$(($delta/5 - 1))\n    max_trig=$(($delta/5 + 1))\n    if (( $nb_pass < $min_trig )) || (( $nb_pass > $max_trig )); then\n        error \"unexpected trigger count $nb_pass (in $delta sec)\"\n    else\n        echo \"OK: triggered $nb_pass times in $delta sec\"\n    fi\n\n    # terminate\n    pkill -9 $PROC\n}\n\nfunction fileclass_test\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_lhsm + $is_hsmlite == 0 )); then\n\t\techo \"HSM test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"running initial scan\"\n\tcheck_db_error rh_chglogs.log\n\n\t# create test tree\n\n\tmkdir -p $RH_ROOT/dir_A # odd or A\n\tmkdir -p $RH_ROOT/dir_B # none\n\tmkdir -p $RH_ROOT/dir_C # none\n\n\t# classes are:\n\t# 1) even_and_B\n\t# 2) even_and_not_B\n\t# 3) odd_or_A\n\t# 4) none\n\n\techo \"data\" > $RH_ROOT/dir_A/file.0 #2+3\n\techo \"data\" > $RH_ROOT/dir_A/file.1 #3\n\techo \"data\" > $RH_ROOT/dir_A/file.2 #2+3\n\techo \"data\" > $RH_ROOT/dir_A/file.3 #3\n\techo \"data\" > $RH_ROOT/dir_A/file.x #3\n\techo \"data\" > $RH_ROOT/dir_A/file.y #3\n\n\techo \"data\" > $RH_ROOT/dir_B/file.0 #1\n\techo \"data\" > $RH_ROOT/dir_B/file.1 #3\n\techo \"data\" > $RH_ROOT/dir_B/file.2 #1\n\techo \"data\" > $RH_ROOT/dir_B/file.3 #3\n\n\techo \"data\" > $RH_ROOT/dir_C/file.0 #2\n\techo \"data\" > $RH_ROOT/dir_C/file.1 #3\n\techo \"data\" > $RH_ROOT/dir_C/file.2 #2\n\techo \"data\" > $RH_ROOT/dir_C/file.3 #3\n\techo \"data\" > $RH_ROOT/dir_C/file.x #4\n\techo \"data\" > $RH_ROOT/dir_C/file.y #4\n\n\t# policies => 2x 1), 4x 2), 8x 3), 2x 4)\n\t# matching => 2x 1), 2x 2) 2x 2+3) 9x3) 4x 4)\n\n\techo \"1bis-Sleeping $sleep_time seconds...\"\n\tsleep $sleep_time\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"2-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"2-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tfi\n\tcheck_db_error rh_chglogs.log\n\n    # check classinfo report\n    $REPORT -f $RBH_CFG_DIR/$config_file --class-info -q > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n\n    # fileclasses with 'report = no' are not expected in the report\n    for f in  even_files odd_files in_dir_A in_dir_B; do\n        egrep \"[ +]$f[,+]\" rh_report.log && error \"non matchable fileclass '$f' should not be in report\"\n    done\n\n    # check other fileclasses\n    # find_valueInCSVreport $logFile $typeValues $countValues $colSearch\n\t# matching => 2x 1), 2x 2) 8x 3) 2x 2+3) 4x 4)\n    expect=( 2 2 9 2 4 )\n    i=0\n    for f in  even_and_B even_and_not_B odd_or_A 'odd_or_A\\+even_and_not_B' none; do\n        val=$(egrep \"[^+]$f[^+]\" rh_report.log | cut -d ',' -f 2 | tr -d ' ')\n        echo \"$f: $val\"\n        [ \"$val\" = \"${expect[$i]}\" ] || error \"$f: ${expect[$i]} expected, got $val\"\n        ((i++))\n    done\n\n\techo \"3-Applying migration policy ($policy_str)...\"\n\t# start a migration files should not be migrated this time\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=all -l FULL -L rh_migr.log   || error \"\"\n\n    [ \"$DEBUG\" = \"1\" ] && grep action_params rh_migr.log\n\n\t# count the number of file for each policy\n\tnb_pol1=`grep action_params rh_migr.log | grep class=even_and_B | wc -l`\n\tnb_pol2=`grep action_params rh_migr.log | grep class=even_and_not_B | wc -l`\n\tnb_pol3=`grep action_params rh_migr.log | grep class=odd_or_A | wc -l`\n\tnb_pol4=`grep action_params rh_migr.log | grep class=unmatched | wc -l`\n\n\t(( $nb_pol1 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for fileclass 'even_and_B': $nb_pol1\"\n\t(( $nb_pol2 == 4 )) || error \"********** TEST FAILED: wrong count of matching files for fileclass 'even_and_not_B': $nb_pol2\"\n\t(( $nb_pol3 == 8 )) || error \"********** TEST FAILED: wrong count of matching files for fileclass 'odd_or_A': $nb_pol3\"\n\t(( $nb_pol4 == 2 )) || error \"********** TEST FAILED: wrong count of matching files for fileclass 'unmatched': $nb_pol4\"\n\n    # test rbh-find -class option\n    cfg=$RBH_CFG_DIR/$config_file\n    check_find \"\" \"-f $cfg -class even_and_B -lsclass\" 2\n    check_find \"\" \"-f $cfg -b -class even_and_B -lsclass\" 2\n    check_find $RH_ROOT \"-f $cfg -class even_and_B -lsclass\" 2\n    check_find $RH_ROOT \"-f $cfg -b -class even_and_B -lsclass\" 2\n    check_find $RH_ROOT \"-f $cfg -class even* -lsclass\" 6\n    check_find $RH_ROOT \"-f $cfg -b -class even* -lsclass\" 6\n    check_find $RH_ROOT \"-f $cfg -not -class even* -lsclass\" 14\n    check_find $RH_ROOT \"-f $cfg -b -not -class even* -lsclass\" 14\n}\n\nfunction test_info_collect\n{\n\tconfig_file=$1\n\tsleep_time1=$2\n\tsleep_time2=$3\n\tpolicy_str=\"$4\"\n\n\tclean_logs\n\n\t# test reading changelogs or scanning with strange names, etc...\n\tmkdir $RH_ROOT'/dir with blanks'\n\tmkdir $RH_ROOT'/dir with \"quotes\"'\n\tmkdir \"$RH_ROOT/dir with 'quotes'\"\n\n\ttouch $RH_ROOT'/dir with blanks/file 1'\n\ttouch $RH_ROOT'/dir with blanks/file with \"double\" quotes'\n\ttouch $RH_ROOT'/dir with \"quotes\"/file with blanks'\n\ttouch \"$RH_ROOT/dir with 'quotes'/file with 1 quote: '\"\n\n\tsleep $sleep_time1\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"1-Scanning...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log \\\n            --once 2>/dev/null || error \"scan\"\n\t\tnb_cr=0\n\telse\n        [ \"$DEBUG\" = \"1\" ] && $LFS changelog lustre-MDT0000\n\t\techo \"1-Reading changelogs...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  \\\n            --once 2>/dev/null || error \"readlog\"\n\t\tnb_cr=4\n\tfi\n\tcheck_db_error rh_chglogs.log\n\n\tsleep $sleep_time2\n\n\tgrep \"DB query failed\" rh_chglogs.log && error \": a DB query failed when reading changelogs\"\n\n\tnb_create=`grep ChangeLog rh_chglogs.log | grep 01CREAT | wc -l`\n\tnb_close=`grep ChangeLog rh_chglogs.log | grep 11CLOSE | wc -l`\n\tnb_db_apply=`grep ': DB_APPLY' rh_chglogs.log | tail -1 | cut -d '|' -f 6 | cut -d ':' -f 2 |\n                 cut -d ',' -f 1 | tr -d ' '`\n\n    # (directories are always inserted since robinhood 2.4)\n    # 4 file + 3 dirs -> 7 changelogs\n    # (all close are suppressed)\n    ((db_expect=7))\n    close_expect=4\n\n   # special directories inserted to the database\n   ((db_expect+=$extra_dir))\n\n\tif (( $no_log == 0 )); then\n        if (( $nb_close != $close_expect )); then\n            if [[ $LVERSION = 2.[01]* ]] ; then\n                # CLOSE record is only expected since Lustre 2.2\n                # for previous versions, just display a warning\n                echo \"warning: $nb_close close record (lustre version $LVERSION), $close_expect expected\"\n            elif [[ $LVERSION = 2.[234]* ]] ; then\n                # CLOSE is expected from 2.2 to 2.4\n                error \": unexpected number of close: $nb_close / $close_expect\"\n            else\n                echo \"warning: $nb_close close record (lustre version $LVERSION), $close_expect expected\"\n            fi\n            return 1\n        fi\n    fi\n\n\tif (( $nb_create == $nb_cr && $nb_db_apply == $db_expect )); then\n\t\techo \"OK: $nb_cr files created, $db_expect database operations\"\n\telse\n\t\terror \": unexpected number of operations: $nb_create files created/$nb_cr, $nb_db_apply database operations/$db_expect\"\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"2-Scanning...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log \\\n        --once 2>/dev/null || error \"scan\"\n\tcheck_db_error rh_chglogs.log\n\n\tgrep \"DB query failed\" rh_chglogs.log && error \": a DB query failed when scanning\"\n\tnb_db_apply=`grep ': DB_APPLY' rh_chglogs.log | tail -1 | cut -d '|' -f 6 | cut -d ':' -f 2 |\n                 cut -d ',' -f 1 | tr -d ' '`\n\n\t# 7 db operations expected (1 for each file and dir)\n\tif (( $nb_db_apply == $db_expect )); then\n\t\techo \"OK: $db_expect database operations\"\n\telse\n#\t\tgrep ENTRIES rh_chglogs.log\n\t\terror \": unexpected number of operations: $nb_db_apply database operations/$db_expect\"\n\tfi\n}\n\n\nfunction readlog_chk\n{\n\tlocal config_file=$1\n\n\techo \"Reading changelogs...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l FULL -L rh_chglogs.log \\\n        --once 2>/dev/null || error \"reading logs\"\n\tgrep \"DB query failed\" rh_chglogs.log &&\n        error \": a DB query failed:\" \\\n              \"`grep 'DB query failed' rh_chglogs.log | tail -1`\"\n\tclean_logs\n}\n\nfunction scan_chk\n{\n\tlocal config_file=$1\n\n\techo \"Scanning...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log \\\n            --once 2>/dev/null || error \"scanning filesystem\"\n\tgrep \"DB query failed\" rh_chglogs.log &&\n        error \": a DB query failed:\" \\\n            \"`grep 'DB query failed' rh_chglogs.log | tail -1`\"\n\tclean_logs\n}\n\nfunction diff_chk\n{\n    local config_file=$1\n\n    echo \"Scanning with rbh-diff...\"\n    $DIFF -f $RBH_CFG_DIR/$config_file --apply=db -l DEBUG > rh_chglogs.log  2>&1 || error \"scanning filesystem\"\n    grep \"DB query failed\" rh_chglogs.log && error \": a DB query failed: `grep 'DB query failed' rh_chglogs.log | tail -1`\"\n    clean_logs\n}\n\nfunction check_fcount\n{\n    local nb=$1\n\n    nbfile=$($REPORT -f $RBH_CFG_DIR/$config_file -icq | grep file | awk -F ',' '{print $2}' | tr -d ' ')\n    [[ -z $nbfile ]] && nbfile=0\n    [ \"$DEBUG\" = \"1\" ] && echo \"nb_files=$nbfile\"\n\n    [[ $nbfile != $nb ]] && error \"Unexpected file count: $nbfile/$nb\"\n}\n\nfunction empty_fs\n{\n    if [[ -n \"$RH_ROOT\" ]]; then\n        find \"$RH_ROOT\" -mindepth 1 -delete 2>/dev/null\n    fi\n}\n\nfunction test_info_collect2\n{\n\tlocal config_file=$1\n\tlocal flavor=$2\n\tlocal policy_str=\"$3\"\n\n    local fcount=2000\n\n\tclean_logs\n\n\tif (($no_log != 0 && $flavor != 1 )); then\n\t\techo \"Changelogs not supported on this config: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\t# create 5k entries\n    echo \"Creating $fcount files...\"\n\t$RBH_TESTS_DIR/fill_fs.sh $RH_ROOT $fcount >/dev/null\n\n\t# flavor 1: scan only x3\n\t# flavor 2: mixed (readlog/scan/readlog/scan)\n\t# flavor 3: mixed (readlog/readlog/scan/scan)\n\t# flavor 4: mixed (scan/scan/readlog/readlog)\n\t# flavor 5: diff --apply=db x2\n\n\tif (( $flavor == 1 )); then\n\t\tscan_chk $config_file\n        check_fcount $fcount\n\t\tscan_chk $config_file\n        check_fcount $fcount\n        empty_fs\n        # sleep 1 to ensure md_update >= 1s\n        sleep 1\n\t\tscan_chk $config_file\n        check_fcount 0\n\telif (( $flavor == 2 )); then\n\t\treadlog_chk $config_file\n        check_fcount $fcount\n\t\tscan_chk    $config_file\n        check_fcount $fcount\n\t\t# touch entries before reading log\n\t\t$RBH_TESTS_DIR/fill_fs.sh $RH_ROOT $fcount >/dev/null\n\t\treadlog_chk $config_file\n        check_fcount $fcount\n        empty_fs\n        # sleep 1 to ensure md_update >= 1s\n        sleep 1\n\t\tscan_chk    $config_file\n        check_fcount 0\n\telif (( $flavor == 3 )); then\n\t\treadlog_chk $config_file\n        check_fcount $fcount\n\t\t# touch entries before reading log again\n\t\t$RBH_TESTS_DIR/fill_fs.sh $RH_ROOT $fcount >/dev/null\n\t\treadlog_chk $config_file\n        check_fcount $fcount\n\t\tscan_chk    $config_file\n        check_fcount $fcount\n        empty_fs\n        # sleep 1 to ensure md_update >= 1s\n        sleep 1\n\t\tscan_chk    $config_file\n        check_fcount 0\n\telif (( $flavor == 4 )); then\n\t\tscan_chk    $config_file\n        check_fcount $fcount\n\t\tscan_chk    $config_file\n        check_fcount $fcount\n\t\treadlog_chk $config_file\n        check_fcount $fcount\n        empty_fs\n\t\treadlog_chk $config_file\n        check_fcount 0\n\telif (( $flavor == 5 )); then\n        diff_chk $config_file\n        check_fcount $fcount\n        empty_fs\n        # sleep 1 to ensure md_update >= 1s\n        sleep 1\n        diff_chk $config_file\n        check_fcount 0\n\telse\n\t\terror \"Unexpexted test flavor '$flavor'\"\n\tfi\n\n}\n\nfunction get_db_info\n{\n    local config_file=$1\n    local field=$2\n    local entry=$3\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $entry -c | egrep -E \"^$field,\" | cut -d ',' -f 2 | sed -e 's/^ //g'\n}\n\nfunction test_root_changelog\n{\n\tconfig_file=$1\n\tclean_logs\n\n\tif (( $no_log )); then\n    \techo \"Changelogs not supported on this config: skipped\"\n\t\tset_skipped\n\t\treturn 1\n    fi\n\n    # create a directory and a file\n    local d=$RH_ROOT/subdir\n    local f=$RH_ROOT/subdir/file\n    mkdir $d || error \"creating directory $d\"\n    id1=$(get_id $d)\n    touch $f || error \"creating file $f\"\n    id2=$(get_id $f)\n    idr=$(get_id $RH_ROOT/.)\n\n    [ \"$DEBUG\" = \"1\" ] && echo -e \"$RH_ROOT: $idr\\n$d: $id1\\n$f: $id2\"\n\n    # read the changelog\n    readlog_chk $config_file\n\n    # check the id, path and parent for $RH_ROOT, $d and $f\n    idrb=$(get_db_info $config_file id $idr | tr -d '[]')\n    [ \"$idr\" = \"$idrb\" ] || error \"id doesn't match: $idr != $idrb\"\n    pathr=$(get_db_info $config_file path $idr)\n    # path must be empty or match $RH_ROOT\n    [ \"$pathr\" = \"\" ] || [ \"$pathr\" = \"$RH_ROOT\" ] || error \"path doesn't match: $RH_ROOT != $pathr\"\n\n    # name and parent are supposed to be empty for ROOT\n    nr=$(get_db_info $config_file name $idr)\n    [ \"$nr\" = \"\" ] || error \"name for $RH_ROOT is not empty: '$nr'\"\n    pr=$(get_db_info $config_file parent_id $idr)\n    [ \"$pr\" = \"\" ] || error \"parent_id for $RH_ROOT is not empty: '$pr'\"\n\n    id1b=$(get_db_info $config_file id $id1 | tr -d '[]')\n    [ \"$id1\" = \"$id1b\" ] || error \"id doesn't match: '$id1' != '$id1b'\"\n    path1=$(get_db_info $config_file path $id1)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$d: path=$path1\"\n    [ \"$path1\" = \"$d\" ] || error \"path doesn't match: $d != $path1\"\n    parent1=$(get_db_info $config_file parent_id $id1 | tr -d '[]')\n    [ \"$DEBUG\" = \"1\" ] && echo \"$d: parent=$parent1\"\n    [ \"$parent1\" = \"$idr\" ] || error \"parent doesn't match: $idr != $parent1\"\n\n    id2b=$(get_db_info $config_file id $id2 | tr -d '[]')\n    [ \"$id2\" = \"$id2b\" ] || error \"id doesn't match: '$id2' != '$id2b'\"\n    path2=$(get_db_info $config_file path $id2)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$f: path=$path2\"\n    [ \"$path2\" = \"$f\" ] || error \"path doesn't match: $f != $path2\"\n    parent2=$(get_db_info $config_file parent_id $id2 | tr -d '[]')\n    [ \"$DEBUG\" = \"1\" ] && echo \"$f: parent=$parent2\"\n    [ \"$parent2\" = \"$id1\" ] || error \"parent doesn't match: $id1 != $parent2\"\n\n    # generate an event on $RH_ROOT and do the checks again\n    touch $RH_ROOT/.\n    sleep 1\n    # read the changelog\n    readlog_chk $config_file\n\n    # check the id, path and parent for $RH_ROOT, $d and $f\n    idrb=$(get_db_info $config_file id $idr | tr -d '[]')\n    [ \"$idr\" = \"$idrb\" ] || error \"id doesn't match: $idr != $idrb\"\n    pathr=$(get_db_info $config_file path $idr)\n    # path must be empty or match $RH_ROOT\n    [ \"$pathr\" = \"\" ] || [ \"$pathr\" = \"$RH_ROOT\" ] || error \"path doesn't match: $RH_ROOT != $pathr\"\n\n    # name and parent are supposed to be empty for ROOT\n    nr=$(get_db_info $config_file name $idr)\n    [ \"$nr\" = \"\" ] || error \"name for $RH_ROOT is not empty: '$nr'\"\n    pr=$(get_db_info $config_file parent_id $idr)\n    [ \"$pr\" = \"\" ] || error \"parent_id for $RH_ROOT is not empty: '$pr'\"\n\n    id1b=$(get_db_info $config_file id $id1 | tr -d '[]')\n    [ \"$id1\" = \"$id1b\" ] || error \"id doesn't match: '$id1' != '$id1b'\"\n    path1=$(get_db_info $config_file path $id1)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$d: path=$path1\"\n    [ \"$path1\" = \"$d\" ] || error \"path doesn't match: $d != $path1\"\n    parent1=$(get_db_info $config_file parent_id $id1 | tr -d '[]')\n    [ \"$DEBUG\" = \"1\" ] && echo \"$d: parent=$parent1\"\n    [ \"$parent1\" = \"$idr\" ] || error \"parent doesn't match: $idr != $parent1\"\n\n    id2b=$(get_db_info $config_file id $id2 | tr -d '[]')\n    [ \"$id2\" = \"$id2b\" ] || error \"id doesn't match: '$id2' != '$id2b'\"\n    path2=$(get_db_info $config_file path $id2)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$f: path=$path2\"\n    [ \"$path2\" = \"$f\" ] || error \"path doesn't match: $f != $path2\"\n    parent2=$(get_db_info $config_file parent_id $id2 | tr -d '[]')\n    [ \"$DEBUG\" = \"1\" ] && echo \"$f: parent=$parent2\"\n    [ \"$parent2\" = \"$id1\" ] || error \"parent doesn't match: $id1 != $parent2\"\n}\n\nfunction partial_paths\n{\n\tconfig_file=$1\n\tclean_logs\n\n    # create a tree\n    mkdir -p $RH_ROOT/dir1/dir2\n    mkdir -p $RH_ROOT/dir3\n    touch $RH_ROOT/file1\n    touch $RH_ROOT/dir1/file2\n    touch $RH_ROOT/dir1/dir2/file3\n    touch $RH_ROOT/dir3/file4\n\n    # initial scan\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n    check_db_error rh_scan.log\n\n    # remove a path component from the DB\n    id=$(get_id $RH_ROOT/dir1/dir2)\n    [ -z $id ] && error \"could not get id\"\n    # FIXME only for Lustre 2.x\n    mysql $RH_DB -e \"DELETE FROM NAMES WHERE id='$id'\" || error \"DELETE request\"\n\n\tif (( $is_hsmlite + $is_lhsm > 0 )); then\n        # check how a child entry is archived\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log\n        check_db_error rh_migr.log\n        if (( $is_hsmlite > 0 )); then\n            name=$(find $BKROOT -type f -name \"file3__*\")\n            cnt=$(echo $name | wc -w)\n            (( $cnt == 1 )) || error \"1 file expected to match file 3 in backend, $cnt found\"\n            echo \"file3 archived as $name\"\n\t    else\n\t\t    wait_done 60\n        fi\n    fi\n\n    # check what --dump reports\n    f3=$($REPORT -f $RBH_CFG_DIR/$config_file --dump --csv -q | grep \"file3\" | awk '{print $(NF)}')\n    echo \"file3 reported with path $f3\"\n    [[ $f3 = /* ]] && [[ $f3 != $RH_ROOT/dir1/dir2/file3 ]] && error \"$f3 : invalid fullpath\"\n\n    # check filter path behavior\n    # should report at least file2 (and optionnally file3 : must check its path is valid)\n    f2=$($REPORT -f $RBH_CFG_DIR/$config_file --dump --csv -q -P \"$RH_ROOT/dir1\" | grep file2 | awk '{print $(NF)}')\n    [[ -n $f2 ]] && echo \"file2 reported with path $f2\"\n    [[ $f2 != $RH_ROOT/dir1/file2 ]] && error \"wrong path reported for file2: $f2\"\n\n    f3=$($REPORT -f $RBH_CFG_DIR/$config_file --dump --csv -q -P \"$RH_ROOT/dir1\" | grep file3 | awk '{print $(NF)}')\n    [[ -n $f3 ]] && echo \"file3 reported with path $f3\"\n    [[ $f3 = /* ]] && [[ $f3 != $RH_ROOT/dir1/dir2/file3 ]] && error \"$f3 : invalid fullpath\"\n\n    f3=$($REPORT -f $RBH_CFG_DIR/$config_file --dump --csv -q -P \"$RH_ROOT/dir1/dir2\" | grep file)\n    [[ -n $f3 ]] && echo \"file3 reported with path $f3\"\n    [[ $f3 = /* ]] && [[ $f3 != $RH_ROOT/dir1/dir2/file3 ]] && error \"$f3 : invalid fullpath\"\n\n    # check find behavior\n    # find cannot go into dir2\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/dir1 | grep dir2 && echo \"$RH_ROOT/dir1/dir2 reported?!\"\n    # starting from dir2 fid, it can list file3 in it\n    f3=$($FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/dir1/dir2 | grep file3)\n    echo \"find: $f3\"\n    [[ $f3 = $RH_ROOT/dir1/dir2/file3 ]] || error \"$f3 : invalid fullpath\"\n\n    # like find, should count file3\n    fc=$($DU -d -f $RBH_CFG_DIR/$config_file $RH_ROOT/dir1/dir2 | grep \"file count\" | cut -d ':' -f 2 | cut -d ',' -f 1)\n    [[ $fc = 1 ]] || error \"expected filecount in $RH_ROOT/dir1/dir2: 1 (got $fc)\"\n\n    # check -e report\n    # dir2 should be in DB, even with no path\n    $REPORT -f $RBH_CFG_DIR/$config_file --csv -e \"$RH_ROOT/dir1/dir2\" | grep \"md updt\" || error \"$RH_ROOT/dir1/dir2 should have a DB entry\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --csv -e \"$RH_ROOT/dir1/dir2/file3\"  > report.log || error \"report error for $RH_ROOT/dir1/dir2/file3\"\n    grep \"md updt\" report.log || error \"$RH_ROOT/dir1/dir2/file3 should have a DB entry\"\n    f3=$(egrep \"^path,\" report.log)\n    [[ $f3 = /* ]] && [[ $f3 != $RH_ROOT/dir1/dir2/file3 ]] && error \"$f3 : invalid fullpath\"\n\tif (( $is_hsmlite > 0 )); then\n        b3=$(grep \"backend_path,\" report.log | cut -d ',' -f 2)\n        # b3 should be in 'dir2' or in '__unknown_path'\n        echo $b3 | egrep \"dir1/dir2|unknown_path\" || error \"unexpected backend path $b3\"\n    fi\n\n    # check what rm does (+undelete)\n    if (( $no_log==0 )); then\n        # This test was introduced by: 00c3cfd4263679df54bfffbc875419de503e8dcf\n        # It seems to be related to the backup module. This test fails for the\n        # LUSTRE_HSM flavor. rbh-undelete will fail if we remove the changelogs\n        # here because the parent is has been deleted from the DB.\n        if (( $is_hsmlite )); then\n            $LFS changelog_clear lustre-MDT0000 cl1 0\n        fi\n\n        rm -f $RH_ROOT/dir1/dir2/file3\n        readlog_chk $config_file\n\n        if (( $is_lhsm + $is_hsmlite > 0 )); then\n            $REPORT -f $RBH_CFG_DIR/$config_file -Rcq > report.log\n            [ \"$DEBUG\" = \"1\" ] && cat report.log\n            nb=$(cat report.log | grep file3 | wc -l)\n            (($nb == 1)) || error \"file3 not reported in remove-pending list\"\n            f3=$(cat report.log | grep file3 | awk '{print $(NF)}')\n            [[ $f3 = /* ]] && [[ $f3 != $RH_ROOT/dir1/dir2/file3 ]] && error \"$f3 : invalid fullpath\"\n        fi\n\n        if (( $is_hsmlite + $is_lhsm > 0 )); then\n            $UNDELETE -f $RBH_CFG_DIR/$config_file -R '*/file3' -l DEBUG || error \"undeleting file3\"\n            find $RH_ROOT -name \"file3\" -ls | tee report.log\n            (( $(wc -l report.log | awk '{print $1}') == 1 )) || error \"file3 not restored\"\n        fi\n    fi\n\n    rm -f report.log\n}\n\nfunction test_mnt_point\n{\n\tconfig_file=$1\n\tclean_logs\n\n    export fs_path=$RH_ROOT/subdir # retrieved from env when parsing config file\n\n    local dir_rel=\"dir1 dir2\"\n    local file_rel=\"dir1/file.1 dir1/file.2 dir2/file.3 file.4\"\n\n    for d in $dir_rel; do\n        mkdir -p $fs_path/$d || error mkdir\n    done\n    for f in $file_rel; do\n        touch $fs_path/$f || error touch\n    done\n\n    # scan the filesystem\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n    check_db_error rh_scan.log\n\n    # check that rbh-find output is correct (2 methods)\n    for opt in \"-nobulk $fs_path\" \"$fs_path\" \"-nobulk\" \"\"; do\n        echo \"checking output for rbh-find $opt...\"\n        $FIND -f $RBH_CFG_DIR/$config_file $opt > rh_report.log\n        for e in $dir_rel $file_rel; do\n            egrep -E \"^$fs_path/$e$\" rh_report.log || error \"$e not found in rbh-find output\"\n        done\n    done\n\n    # check that rbh-report output is correct\n    $REPORT -f $RBH_CFG_DIR/$config_file -q --dump | awk '{print $(NF)}'> rh_report.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    for e in $dir_rel $file_rel; do\n        egrep -E \"^$fs_path/$e$\" rh_report.log || error \"$e not found in report output\"\n    done\n\n    # backup: check that backend path is correct\n    if (( $is_hsmlite > 0 )); then\n        # wait atime > 1s\n        sleep 1\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log\n        check_db_error rh_migr.log\n\n        for e in $file_rel; do\n            ls -d $BKROOT/${e}__* || error \"$BKROOT/$e* not found in backend\"\n        done\n    fi\n}\n\nfunction uid_gid_as_numbers\n{\n\tconfig_file=$1\n\n\tclean_logs\n    $CFG_SCRIPT empty_db $RH_DB > /dev/null\n\n    # create the following files with different owners/groups:\n    #\n    #   -rw-r--r-- 1       0      0     10 Jun  7 13:40 file1\n    #   -rw-r--r-- 1      12     16    100 Jun  7 13:40 file2\n    #   -rw-r--r-- 1 7856568 345654   1000 Jun  7 13:40 file3\n    #   -rw-r--r-- 1       0 645767  10000 Jun  7 13:40 file4\n    #   -rw-r--r-- 1 3476576      0 100000 Jun  7 13:40 file5\n\n    echo \"1-Creating files...\"\n    rm -f $RH_ROOT/file[1-4]\n    dd if=/dev/zero of=$RH_ROOT/file1 bs=10 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n    dd if=/dev/zero of=$RH_ROOT/file2 bs=100 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n    dd if=/dev/zero of=$RH_ROOT/file3 bs=1000 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n    dd if=/dev/zero of=$RH_ROOT/file4 bs=10000 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n    dd if=/dev/zero of=$RH_ROOT/file5 bs=100000 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n\n    chown 0:0 $RH_ROOT/file1\n    chown 12:16 $RH_ROOT/file2\n    chown 7856568:345654 $RH_ROOT/file3\n    chown 0:645767 $RH_ROOT/file4\n    chown 3476576:0 $RH_ROOT/file5\n\n    echo \"2-Initial scan of empty filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l FULL -L rh_scan.log  --once || error \"\"\n\n    echo \"3-Check report\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file -D --csv > rh_report.log\n    egrep --quiet \"\\s0,\\s+0,.*/file1\" rh_report.log || error \"bad for file1\"\n    egrep --quiet \"\\s12,\\s+16,.*/file2\" rh_report.log || error \"bad for file2\"\n    egrep --quiet \"\\s7856568,\\s+345654,.*/file3\" rh_report.log || error \"bad for file3\"\n    egrep --quiet \"\\s0,\\s+645767,.*/file4\" rh_report.log || error \"bad for file4\"\n    egrep --quiet \"\\s3476576,\\s+0,.*/file5\" rh_report.log || error \"bad for file5\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --top-user > rh_report.log\n    # spc used (4th field) depends on filesystem preallocation algorithm: don't rely on it.\n    grep --quiet -e \"1,    3476576,   97.66 KB, [^,]*,[ ]* 1,   97.66 KB\" rh_report.log || error \"bad top user1\"\n    grep --quiet -e \"2,          0,    9.78 KB, [^,]*,[ ]* 2,    4.89 KB\" rh_report.log || error \"bad top user3\"\n    grep --quiet -e \"3,    7856568,       1000, [^,]*,[ ]* 1,       1000\" rh_report.log || error \"bad top user3\"\n    grep --quiet -e \"4,         12,        100, [^,]*,[ ]* 1,        100\" rh_report.log || error \"bad top user3\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --top-size > rh_report.log\n    grep --quiet -e \"1, [ ]* $RH_ROOT/file5,   97.66 KB,    3476576,\" rh_report.log || error \"bad top size1\"\n    grep --quiet -e \"5, [ ]* $RH_ROOT/file1,         10,          0,\" rh_report.log || error \"bad top size2\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump > rh_report.log\n    grep --quiet \"file,       1000,    7856568,     345654,.*/file3\" rh_report.log || error \"bad dump1\"\n    grep --quiet \"Total: 5 entries, 111110 bytes\" rh_report.log || error \"bad dump2\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-user=root > rh_report.log\n    grep --quiet \"Total: 2 entries, 10010 bytes (9.78 KB)\" rh_report.log || error \"bad dump user root\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-user=0 > rh_report.log\n    grep --quiet \"Total: 2 entries, 10010 bytes (9.78 KB)\" rh_report.log || error \"bad dump user 0\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-user=7856568 > rh_report.log\n    grep --quiet \"Total: 1 entries, 1000 bytes\" rh_report.log || error \"bad dump user 7856568\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-group=root > rh_report.log\n    grep --quiet \"Total: 2 entries, 100010 bytes (97.67 KB)\" rh_report.log || error \"bad dump group root\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-group=0 > rh_report.log\n    grep --quiet \"Total: 2 entries, 100010 bytes (97.67 KB)\" rh_report.log || error \"bad dump group root\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-group=645767 > rh_report.log\n    grep --quiet \"Total: 1 entries, 10000 bytes\" rh_report.log || error \"bad dump group 645767\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --fs-info > rh_report.log\n    grep --quiet \"Total: 5 entries, volume: 111110 bytes\" rh_report.log || error \"bad fs info\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --user-info=root > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 10010 bytes\" rh_report.log || error \"bad info for user root\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --user-info=0 > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 10010 bytes\" rh_report.log || error \"bad info for user 0\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --user-info=7856568 > rh_report.log\n    grep --quiet \"Total: 1 entries, volume: 1000 bytes\" rh_report.log || error \"bad info for user 7856568\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --group-info=0 > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 100010 bytes\" rh_report.log || error \"bad info for group 0\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --group-info=root > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 100010 bytes\" rh_report.log || error \"bad info for group root\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --group-info=645767 > rh_report.log\n    grep --quiet \"Total: 1 entries, volume: 10000 bytes\" rh_report.log || error \"bad info for group 645767\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --entry-info=$RH_ROOT/file1 > rh_report.log\n    grep --quiet \"user           : \t0$\" rh_report.log || error \"bad user for entry file1\"\n    grep --quiet \"group          : \t0$\" rh_report.log || error \"bad group for entry file1\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --entry-info=$RH_ROOT/file3 > rh_report.log\n    grep --quiet \"user           : \t7856568$\" rh_report.log || error \"bad user for entry file3\"\n    grep --quiet \"group          : \t345654$\" rh_report.log || error \"bad group for entry file3\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo > rh_report.log\n    wc -l < rh_report.log | grep --quiet \"^8$\" || error \"bad classinfo report\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=uroot1 > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 10010 bytes\" rh_report.log || error \"bad info for class root1\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=uroot2 > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 10010 bytes\" rh_report.log || error \"bad info for class root2\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=u7856568 > rh_report.log\n    grep --quiet \"Total: 1 entries, volume: 1000 bytes\" rh_report.log || error \"bad info for class u7856568\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=g645767 > rh_report.log\n    grep --quiet \"Total: 1 entries, volume: 10000 bytes\" rh_report.log || error \"bad info for class g645767\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=groot > rh_report.log\n    grep --quiet \"Total: 2 entries, volume: 100010 bytes\" rh_report.log || error \"bad info for class groot\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --classinfo=mix > rh_report.log\n    grep --quiet \"Total: 3 entries, volume: 101010 bytes\" rh_report.log || error \"bad info for class mix\"\n\n    echo \"4-Check rbh-find\"\n    # rbh-find will also find the mount point which belong to root.\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls > find.out\n    egrep --quiet \"\\s0\\s+0\\s.*/file1\" find.out || error \"bad for file1\"\n    egrep --quiet \"\\s12\\s+16\\s.*/file2\" find.out || error \"bad for file2\"\n    egrep --quiet \"\\s7856568\\s+345654\\s.*/file3\" find.out || error \"bad for file3\"\n    egrep --quiet \"\\s0\\s+645767\\s.*/file4\" find.out || error \"bad for file4\"\n    egrep --quiet \"\\s3476576\\s+0\\s.*/file5\" find.out || error \"bad for file5\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -user 12 > find.out\n    wc -l < find.out | grep --quiet \"^1$\" || error \"incorrect number of files found1\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -user 0 > find.out\n    wc -l < find.out | grep --quiet \"^3$\" || error \"incorrect number of files found2\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -not -user 7856568 > find.out\n    wc -l < find.out | grep --quiet \"^5$\" || error \"incorrect number of files found3\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -group 0 > find.out\n    wc -l < find.out | grep --quiet \"^3$\" || error \"incorrect number of files found4\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -group 645767 > find.out\n    wc -l < find.out | grep --quiet \"^1$\" || error \"incorrect number of files found5\"\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -not -group 345654 > find.out\n    wc -l < find.out | grep --quiet \"^5$\" || error \"incorrect number of files found6\"\n\n    echo \"5-Check rbh-find with printf\"\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -printf \"%p:%g:%u\\n\" > find.out\n    grep --quiet \"$RH_ROOT:0:0\" find.out\n    grep --quiet \"$RH_ROOT/file1:0:0\" find.out || error \"bad for file1\"\n    grep --quiet \"$RH_ROOT/file2:16:12\" find.out || error \"bad for file2\"\n    grep --quiet \"$RH_ROOT/file3:345654:7856568\" find.out || error \"bad for file3\"\n    grep --quiet \"$RH_ROOT/file4:645767:0\" find.out || error \"bad for file4\"\n    grep --quiet \"$RH_ROOT/file5:0:3476576\" find.out || error \"bad for file5\"\n\n    echo \"6-Check rbh-du\"\n    # Each file has a precise size, so we know what the result in\n    # bytes will be for any combination\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b $RH_ROOT | egrep --quiet \"^111110\\s\" || error \"bad sum1\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -u 0 $RH_ROOT | egrep  --quiet \"^10010\\s\" || error \"bad sum2\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -u 12 $RH_ROOT | egrep  --quiet \"^100\\s\" || error \"bad sum3\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -u 1234 $RH_ROOT | egrep  --quiet \"^0\\s\" || error \"bad sum4\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -g 0 $RH_ROOT | egrep  --quiet \"^100010\\s\" || error \"bad sum5\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -g 645767 $RH_ROOT | egrep  --quiet \"^10000\\s\" || error \"bad sum6\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -g 1234 $RH_ROOT | egrep  --quiet \"^0\\s\" || error \"bad sum7\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -u 0 -g 16 $RH_ROOT | egrep --quiet \"^0\\s\" || error \"bad sum8\"\n    $DU -f $RBH_CFG_DIR/$config_file -t f -b -u 0 -g 0 $RH_ROOT | egrep --quiet \"^10\\s\" || error \"bad sum9\"\n}\n\n# Create a file and touch it to set atime/mtime. Check that crtime and\n# ctime are properly set, using the search criteria of rbh-find, and\n# display of rbh-report. Check that creation_time never changes while\n# ctime does.\nfunction posix_acmtime\n{\n    config_file=$1\n    local cfg=$RBH_CFG_DIR/$config_file\n    clean_logs\n\n    local org_RBH_TEST_LAST_ACCESS_ONLY_ATIME=${RBH_TEST_LAST_ACCESS_ONLY_ATIME}\n    export RBH_TEST_LAST_ACCESS_ONLY_ATIME=yes\n\n    # create file\n    echo \"1-Creating file...\"\n    rm -f $RH_ROOT/file\n    dd if=/dev/zero of=$RH_ROOT/file bs=10 count=1 >/dev/null 2>/dev/null || error \"writing file\"\n\n    # Set a given atime and mtime. touch can't change ctime.\n    touch -m -t 201004171230 $RH_ROOT/file\n    touch -a -t 201004171300 $RH_ROOT/file\n    stat $RH_ROOT/file | grep --quiet \"Modify: 2010-04-17 12:30:00\" || error \"bad mtime\"\n    stat $RH_ROOT/file | grep --quiet \"Access: 2010-04-17 13:00:00\" || error \"bad atime\"\n\n    echo \"2-Initial scan of filesystem\"\n    $RH -f $cfg --scan -l FULL -L rh_scan.log  --once || error \"\"\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file > report.out\n\n    # Check that the DB has the correct atime and mtime\n    egrep --quiet \"last_mod\\s+:\\s+2010/04/17 12:30:00\" report.out || error \"bad mtime\"\n    egrep --quiet \"last_access\\s+:\\s+2010/04/17 13:00:00\" report.out || error \"bad atime\"\n\n    # Ensure that the DB and FS agree on atime/mtime and ctime, this\n    # time using rbh-find.\n    local real_acmtime=$(stat -c '%X %Y %Z' $RH_ROOT/file)\n    local db_acmtime=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%As %Ts %Cs\")\n    [[ $real_acmtime == $db_acmtime ]] || error \"FS and DB times don't match1\"\n\n    local crtime=$(egrep \"creation\\s+:\" report.out)\n    local ctime=$(egrep \"last_mdchange\\s+:\" report.out)\n\n    # Check crtime and ctime in a small time interval\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -crtime -30s) ]] || error \"file not found1\"\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -crtime +1s) ]] && error \"file found1\"\n\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -ctime -30s) ]] || error \"file not found2\"\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -ctime +1s) ]] && error \"file found2\"\n\n    echo \"3-Change mtime\"\n\n    # Sleep lomg enough the time to change by at least one second, so ctime will be\n    # updatedwhen the file is touched.\n    sleep 5\n\n    # Again, check crtime and ctime. Both must fail as the file is at\n    # least 5s old now.\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -crtime -1s) ]] && error \"file found3\"\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -ctime -1s) ]] && error \"file found4\"\n\n    # Make mtime > atime. Normally last_access would take the value of\n    # the most recent of atime and mtime, but with the\n    # last_access_only_atime option, it should stay at atime.\n    touch -m -t 201004171400 $RH_ROOT/file\n    stat $RH_ROOT/file | grep --quiet \"Modify: 2010-04-17 14:00:00\" || error \"bad mtime\"\n    stat $RH_ROOT/file | grep --quiet \"Access: 2010-04-17 13:00:00\" || error \"bad atime\"\n    $RH -f $cfg --scan -l FULL -L rh_scan.log  --once || error \"\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -e $RH_ROOT/file > report.out\n\n    # Again, check crtime and ctime. Hopefully less than 5 seconds\n    # elapsed between touch and this command.\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -crtime -4s) ]] && error \"file found1\"\n    [[ $($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -ctime -4s) ]] || error \"file not found2\"\n\n    egrep --quiet \"last_mod\\s+:\\s+2010/04/17 14:00:00\" report.out || error \"bad mtime\"\n    egrep --quiet \"last_access\\s+:\\s+2010/04/17 13:00:00\" report.out || error \"bad atime\"\n\n    # Check that FS and DB agree, using rbh-find\n    local real_acmtime=$(stat -c '%X %Y %Z' $RH_ROOT/file)\n    local db_acmtime=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%As %Ts %Cs\")\n    [[ $real_acmtime == $db_acmtime ]] || error \"FS and DB times don't match2\"\n\n    # Check that FS and DB agree, using rbh-report\n    local newcrtime=$(egrep \"creation\\s+:\" report.out)\n    local newctime=$(egrep \"last_mdchange\\s+:\" report.out)\n\n    [[ $ctime != $newctime ]] || error \"ctime hasn't changed\"\n    [[ $crtime = $newcrtime ]] || error \"creation time has changed\"\n\n    ctime=$newctime\n\n    echo \"4-Change atime\"\n    touch -a -t 201004171600 $RH_ROOT/file\n\n    stat $RH_ROOT/file | grep --quiet \"Modify: 2010-04-17 14:00:00\" || error \"bad mtime\"\n    stat $RH_ROOT/file | grep --quiet \"Access: 2010-04-17 16:00:00\" || error \"bad atime\"\n\n    $RH -f $cfg --scan -l FULL -L rh_scan.log  --once || error \"\"\n\n    # Check that FS and DB agree, using rbh-find\n    local real_acmtime=$(stat -c '%X %Y %Z' $RH_ROOT/file)\n    local db_acmtime=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%As %Ts %Cs\")\n    [[ $real_acmtime == $db_acmtime ]] || error \"FS and DB times don't match3\"\n\n    # Check that FS and DB agree, using rbh-report\n    local newcrtime=$(egrep \"creation\\s+:\" report.out)\n    local newctime=$(egrep \"last_mdchange\\s+:\" report.out)\n\n    [[ $ctime = $newctime ]] || error \"ctime has changed\"\n    [[ $crtime = $newcrtime ]] || error \"creation time has changed\"\n\n    export RBH_TEST_LAST_ACCESS_ONLY_ATIME=${org_RBH_TEST_LAST_ACCESS_ONLY_ATIME}\n}\n\n# check that changing ACCT schema updates triggers code\nfunction check_acct_update_triggers\n{\n    local log=$1\n\n    grep \"dropping and repopulating table ACCT_STAT\" $log || return 0\n\n    # there was an ACCT_STAT change, triggers should have been updated\n    grep \"trigger ACCT_ENTRY\" $log || error \"Triggers should have been updated\"\n}\n\n# test various scenarios of DB schema changes\nfunction db_schema_convert\n{\n    local schema25=$RBH_CFG_DIR/rbh25.sql\n    local cfg1=$RBH_CFG_DIR/test1.conf\n    local cfg2=$RBH_CFG_DIR/test_checker.conf\n    local cfg3=$RBH_CFG_DIR/test_checker_invert.conf\n\n    # import Robinhood 2.5 DB schema\n    mysql $RH_DB < $schema25 || error \"importing DB schema\"\n    # set the right FS path to allow running robinhood commands\n    mysql $RH_DB -e \"UPDATE VARS SET value='$RH_ROOT' WHERE varname='FS_Path'\"\n\n    local nbent=100\n    populate $nbent\n\n    :> rh.log\n    echo \"rbh-report\"\n    # run rbhv3 report\n    $REPORT -f $cfg1 -i -l FULL 2>&1 > rh.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n\n    # robinhood should have suggested to run '--alter-db'\n    grep \"Run 'robinhood --alter-db'\" rh.log || error \"robinhood should have reported DB schema changes\"\n\n    # no ALTER TABLE expected\n    grep \"ALTER TABLE\" rh.log && error \"no ALTER TABLE expected\"\n\n    :> rh.log\n    echo \"robinhood (no alter-db)\"\n    # run a simple rbhv3 over this initial schema\n    $RH -f $cfg1 --scan --once -l FULL -L rh.log\n\n    # robinhood should have suggested to run '--alter-db'\n    grep \"Run 'robinhood --alter-db'\" rh.log || error \"robinhood should have reported DB schema changes\"\n    # it should change the default size\n    grep \"Changing default value of 'ENTRIES.size'\" rh.log || error \"default value should have been changed\"\n    grep \"ALTER COLUMN size SET DEFAULT 0\" rh.log || error \"change of default size expected\"\n\n    # no other ALTER TABLE expected\n    grep -v \"SET DEFAULT\" rh.log | grep \"ALTER TABLE\" && error \"no ALTER TABLE expected\"\n\n    :> rh.log\n    echo \"robinhood --alter-db\"\n    # run alter DB on initial schema\n    $RH -f $cfg1 --alter-db -l FULL -L rh.log\n\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    grep \"ALTER TABLE\" rh.log || error \"ALTER TABLE expected\"\n    check_acct_update_triggers rh.log\n\n    # after alter, no more DB change should be reported\n    :> rh.log\n\n    echo \"robinhood on converted DB\"\n    $RH -f $cfg1 --scan --once -l VERB -L rh.log\n    grep \"DB schema change detected\" rh.log && error \"DB should be right\"\n    check_db_error rh.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    :> rh.log\n    $REPORT -f $cfg1 -i > rh.log || error \"Report should succeed\"\n    grep \"Run 'robinhood --alter-db'\" rh.log && error \"DB should be right\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    config_file=$(basename $cfg1) check_fcount $nbent\n\n    # now use cfg2\n    :> rh.log\n    echo \"cfg2: robinhood (no alter-db)\"\n    # run a simple rbhv3 over this initial schema\n    $RH -f $cfg2 --scan --once -l FULL -L rh.log\n\n    # robinhood should have suggested to run '--alter-db'\n    grep \"Run 'robinhood --alter-db'\" rh.log || error \"robinhood should have reported DB schema changes\"\n\n    # no ALTER TABLE expected\n    grep \"ALTER TABLE\" rh.log && error \"no ALTER TABLE expected\"\n\n    :> rh.log\n    echo \"cfg2: robinhood --alter-db\"\n    # run alter DB on initial schema\n    $RH -f $cfg2 --alter-db -l FULL -L rh.log\n\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    grep \"ALTER TABLE\" rh.log || error \"ALTER TABLE expected\"\n    check_acct_update_triggers rh.log\n\n    # after alter, no more DB change should be reported\n    :> rh.log\n    echo \"cfg2: robinhood on converted DB\"\n    $RH -f $cfg2 --scan --once -l VERB -L rh.log\n    grep \"DB schema change detected\" rh.log && error \"DB should be right\"\n    check_db_error rh.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    :> rh.log\n    $REPORT -f $cfg2 -i > rh.log || error \"Report should succeed\"\n    grep \"Run 'robinhood --alter-db'\" rh.log && error \"DB should be right\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    config_file=$(basename $cfg2) check_fcount $nbent\n\n    # test inversion only if the tested mode has a status manager\n    if [[ \"$STATUS_MGR\" != \"none\" ]]; then\n\t    # now test inversion with cfg3\n\t    :> rh.log\n\t    echo \"cfg3: robinhood (no alter-db)\"\n\t    # run a simple rbhv3 over this initial schema\n\t    $RH -f $cfg3 --scan --once -l FULL -L rh.log\n\n\t    # robinhood must report field shuffling\n\t    grep \"Shuffled DB fields\" rh.log || error \"lismgr should report shuffled fields\"\n\t    # alter is only required for the ACCT_STAT table\n\t    grep \"Run 'robinhood --alter-db'\" rh.log | grep -v \"modification in ACCT_STAT\" && error \"lismgr should deal with field shuffling\"\n\t    # no ALTER TABLE expected\n\t    grep \"ALTER TABLE\" rh.log && error \"no ALTER TABLE expected\"\n\n\t    :> rh.log\n\t    # scan successful?\n\t    echo \"cfg3: scan\"\n\t    $RH -f $cfg3 --scan --once --alter-db -l MAJOR -L rh.log || error \"scan failed\"\n\t    grep \"FS Scan finished\" rh.log || error \"Scan failed?\"\n\t    # DB errors reported during scan?\n\t    check_db_error rh.log\n\n\t    # report should work\n\t    echo \"cfg3: report\"\n\t    $REPORT -f $cfg3 -i -l FULL 2>&1 > rh.log || error \"Report should work\"\n\t    grep \"Run 'robinhood --alter-db'\" rh.log && error \"DB should be right\"\n\t    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n\t    config_file=$(basename $cfg3) check_fcount $nbent\n    fi\n\n    # back to cfg1\n    echo \"cfg1: report\"\n    $REPORT -f $cfg1 -i -l FULL 2>&1 > rh.log || error \"Report should work\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    grep \"Run 'robinhood --alter-db'\" rh.log | grep -v \"modification in ACCT_STAT\" && error \"report should deal with policy removal\"\n    config_file=$(basename $cfg1) check_fcount $nbent\n\n    :> rh.log\n    echo \"cfg1: robinhood (no alter-db)\"\n    # run a simple rbhv3 over this initial schema\n    $RH -f $cfg1 --scan --once -l FULL -L rh.log\n\n    # robinhood should have suggested to run '--alter-db'\n    grep \"Run 'robinhood --alter-db'\" rh.log || error \"robinhood should have reported DB schema changes\"\n\n    # no ALTER TABLE expected\n    grep \"ALTER TABLE\" rh.log && error \"no ALTER TABLE expected\"\n\n    :> rh.log\n    echo \"cfg1: robinhood --alter-db\"\n    # run alter DB on initial schema\n    $RH -f $cfg1 --alter-db -l FULL -L rh.log\n\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    grep \"ALTER TABLE\" rh.log || error \"ALTER TABLE expected\"\n    grep \"dropping and repopulating table ACCT_STAT\" rh.log\n    grep \"trigger ACCT_ENTRY\" rh.log\n\n    # after alter, no more DB change should be reported\n    :> rh.log\n    echo \"cfg1: robinhood on converted DB\"\n    $RH -f $cfg1 --scan --once -l FULL -L rh.log\n    grep \"DB schema change detected\" rh.log && error \"DB should be right\"\n    check_db_error rh.log\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    :> rh.log\n    $REPORT -f $cfg1 -i > rh.log || error \"Report should succeed\"\n    grep \"Run 'robinhood --alter-db'\" rh.log && error \"DB should be right\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh.log\n    config_file=$(basename $cfg1) check_fcount $nbent\n\n    # Test conversion from numeric to text uids.\n    # This is not supposed to be an upgrade path, but it is convenient\n    # to test type conversion routines.\n    if [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n        :> rh.log\n        echo \"Numeric to text conversion (no alter)...\"\n        RBH_NUM_UIDGID=no $RH -f $cfg1 --scan --once -l FULL -L rh.log\n        # robinhood should have suggested to run '--alter-db'\n        grep \"Run 'robinhood --alter-db'\" rh.log || error \"robinhood should have reported DB schema changes\"\n\n        :> rh.log\n        echo \"Numeric to text conversion (alter)...\"\n        RBH_NUM_UIDGID=no $RH -f $cfg1 --alter-db -l FULL -L rh.log\n        [ \"$DEBUG\" = \"1\" ] && cat rh.log\n        grep \"ALTER TABLE\" rh.log || error \"ALTER TABLE expected\"\n        grep \"dropping and repopulating table ACCT_STAT\" rh.log\n        grep \"trigger ACCT_ENTRY\" rh.log\n    fi\n}\n\n# Create files with random names, and use rbh-find on them\nfunction random_names\n{\n    config_file=$1\n\n    local num_files=500\n\n    clean_logs\n    $CFG_SCRIPT empty_db $RH_DB > /dev/null\n\n    echo \"1-Creating files...\"\n    rm -rf $RH_ROOT/random/\n    mkdir $RH_ROOT/random/\n\n    echo Creating $num_files files with random names\n    $(dirname $0)/create-random $num_files 200 $RH_ROOT/random || error \"creating files failed\"\n    echo Done creating files\n\n    echo \"2-Scan of filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l FULL -L rh_scan.log --once || error \"\"\n\n    echo \"3-Find tests\"\n    $FIND -f $RBH_CFG_DIR/$config_file -type f $RH_ROOT/random/ > find.out || error \"find failed1\"\n    $FIND -f $RBH_CFG_DIR/$config_file -type f -printf \"file=%p\\n\" $RH_ROOT/random/ > find.out || error \"find failed2\"\n\n    # When the names are escaped, we will get 1 line per file\n    $FIND -f $RBH_CFG_DIR/$config_file -type f -printf \"file=%p\\n\" --escaped $RH_ROOT/random/ > find.out || error \"find failed3\"\n    wc -l < find.out | grep --quiet \"^${num_files}$\" || error \"should have found ${num_files} files\"\n\n    echo \"4-Cleanup\"\n    rm -rf $RH_ROOT/random/\n}\n\nfunction check_status_count\n{\n    report=$1\n    type=$2\n    status=$3\n    count=$4\n\n    nst=$(grep -E \"^([ ]*)$status\" $report | grep $type | cut -d ',' -f 3 | tr -d ' ')\n    [ -z \"$nst\" ] && nst=0\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"$status: $nst\"\n    [ \"$nst\" = \"$count\" ] || error \"Expected $count $status, got $nst\"\n}\n\nfunction test_compress\n{\n\tconfig_file=$1\n\tclean_logs\n\n\tif (( $is_hsmlite == 0 )); then\n    \techo \"compression is only available with backup mode\"\n\t\tset_skipped\n\t\treturn 1\n    fi\n\n    local dir_rel=\"dir1 dir2\"\n    local file_rel=\"dir1/file.1 dir1/file.2 dir2/file.3 file.4\"\n    local file_rel_mod=\"dir1/file.1 file.4\"\n    local file_rel_new=\"dir1/file.5 dir1/file.6 dir2/file.7\"\n\n    src_file=\"/etc/hosts\"\n\n    # populate the filesystem\n    for d in $dir_rel; do\n        mkdir -p $RH_ROOT/$d || error mkdir\n    done\n    for f in $file_rel; do\n        /bin/cp $src_file $RH_ROOT/$f || error cp\n    done\n\n    # scan the filesystem (compress=no)\n    export compress=no\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n    check_db_error rh_scan.log\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file new 4\n\n    # check how a child entries is archived\n    $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log\n    check_db_error rh_migr.log\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file synchro 4\n\n    # no compressed file names expected xxxx__<fid>z\n    name_comp=$(find $BKROOT -type f -name \"*z\" | wc -l)\n    name_ncomp=$(find $BKROOT -type f -name \"*[0-9]\" | wc -l)\n    type_comp=$(find $BKROOT -type f -exec file {} \\; | grep \"gzip compressed data\" | wc -l)\n    type_ncomp=$(find $BKROOT -type f -exec file {} \\; | grep \"ASCII\" | wc -l)\n\n    (( $name_comp == 0 )) || error \"No compressed file name expected in backend: found $name_comp\"\n    (( $type_comp == 0 )) || error \"No compressed file data expected in backend: found $type_comp\"\n    (( $name_ncomp == 4 )) || error \"4 non-compressed file names expected in backend: found $name_ncomp\"\n    (( $type_ncomp == 4 )) || error \"4 ASCII file data expected in backend: found $type_ncomp\"\n\n    # turn compression on\n    export compress=yes\n\n    # modify some files, create new files\n    for f in $file_rel_mod; do\n        cat $src_file >> $RH_ROOT/$f || error \"appending $f\"\n    done\n    for f in $file_rel_new; do\n        /bin/cp $src_file $RH_ROOT/$f || error \"creating $f\"\n    done\n\n    # scan the file system and check file status\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing 2nd scan\"\n    check_db_error rh_scan.log\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file synchro 2\n    check_status_count report.out file modified 2\n    check_status_count report.out file new 3\n\n    # archive all dirty data and check status\n    $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log\n    check_db_error rh_migr.log\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file synchro 7\n\n    # check backend files\n    name_comp=$(find $BKROOT -type f -name \"*z\" | wc -l)\n    name_ncomp=$(find $BKROOT -type f -name \"*[0-9]\" | wc -l)\n    type_comp=$(find $BKROOT -type f -exec file {} \\; | grep \"gzip compressed data\" | wc -l)\n    type_ncomp=$(find $BKROOT -type f -exec file {} \\; | grep \"ASCII\" | wc -l)\n\n    # 2 already archived: uncompresssed\n    # 2 modified: compressed\n    # 3 new: compressed\n    (( $name_comp == 5 )) || error \"5 compressed file names expected in backend: found $name_comp\"\n    (( $type_comp == 5 )) || error \"5 compressed file data expected in backend: found $type_comp\"\n    (( $name_ncomp == 2 )) || error \"2 non-compressed file names expected in backend: found $name_ncomp\"\n    (( $type_ncomp == 2 )) || error \"2 ASCII file data expected in backend: found $type_ncomp\"\n\n    # turn compression off compression, make some changes and check status again\n    for f in $file_rel_mod; do\n        cat $src_file >> $RH_ROOT/$f || error \"appending $f\"\n    done\n\n    export compress=no\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n    check_db_error rh_scan.log\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file synchro 5\n    check_status_count report.out file modified 2\n\n    $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log\n    check_db_error rh_migr.log\n\n    # check backend files\n    name_comp=$(find $BKROOT -type f -name \"*z\" | wc -l)\n    name_ncomp=$(find $BKROOT -type f -name \"*[0-9]\" | wc -l)\n    type_comp=$(find $BKROOT -type f -exec file {} \\; | grep \"gzip compressed data\" | wc -l)\n    type_ncomp=$(find $BKROOT -type f -exec file {} \\; | grep \"ASCII\" | wc -l)\n\n    # 2 archived at first: uncompresssed\n    # 2 modified: uncompressed\n    # 3 archived at step 2: compressed\n    (( $name_comp == 3 )) || error \"3 compressed file names expected in backend: found $name_comp\"\n    (( $type_comp == 3 )) || error \"3 compressed file data expected in backend: found $type_comp\"\n    (( $name_ncomp == 4 )) || error \"4 non-compressed file names expected in backend: found $name_ncomp\"\n    (( $type_ncomp == 4 )) || error \"4 ASCII file data expected in backend: found $type_ncomp\"\n\n    # check file status\n    $REPORT -f $RBH_CFG_DIR/$config_file --status-info $STATUS_MGR -q  > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    check_status_count report.out file synchro 7\n\n    # test disaster recovery with compressed files\n    local before=/tmp/before.$$\n    local after=/tmp/after.$$\n    local diff=/tmp/diff.$$\n    # shots before disaster (time is only significant for files)\n    find $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > $before\n    find $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> $before\n    find $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> $before\n\n    # perform 2 disaster recovery with compress=yes and compress=no\n    for c in yes no; do\n        export compress=$c\n        # FS disaster\n        if [[ -n \"$RH_ROOT\" ]]; then\n            echo \"Disaster: all FS content is lost\"\n            rm  -rf $RH_ROOT/*\n        fi\n\n        # update SOFTRM table\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing update scan\"\n        check_db_error rh_scan.log\n\n        # perform the recovery\n        echo \"Performing recovery (compress=$c)...\"\n        cp /dev/null recov.log\n        $UNDELETE -f $RBH_CFG_DIR/$config_file -R || error \"Error performing recovery\"\n\n        find $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > $after\n        find $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> $after\n        find $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> $after\n\n        diff  $before $after > /tmp/diff.$$ || error \"unexpected differences between initial and final state\"\n        [ \"$DEBUG\" = \"1\" ] && cat /tmp/diff.$$\n\n        # check that no file in Lustre is restored as compressed file\n        lucomp=$(find $RH_ROOT -type f -exec file {} \\; | grep \"gzip compressed data\" | wc -l)\n        (( $lucomp == 0 )) || error \"No compressed file expected in Lustre\"\n\n        # check backend files\n        # check all *z files are compressed\n        type_comp=$(find $BKROOT -type f -name \"*z\" -exec file {} \\; | grep -v \"gzip compressed data\" | wc -l)\n        (( $type_comp == 0 )) || error \"Some __<fid>z files are not compressed data\"\n        # check all *0x0 files are uncompressed\n        type_ncomp=$(find $BKROOT -type f -name \"*[0-9]\" -exec file {} \\; | grep \"gzip compressed data\" | wc -l)\n        (( $type_ncomp == 0 )) || error \"Some __<fid> files are actually compressed data\"\n        # check counts\n        name_comp=$(find $BKROOT -type f -name \"*z\" | wc -l)\n        name_ncomp=$(find $BKROOT -type f -name \"*[0-9]\" | wc -l)\n        (( $name_comp == 3 )) || error \"3 compressed file names expected in backend: found $name_comp\"\n        (( $name_ncomp == 4 )) || error \"4 non-compressed file names expected in backend: found $name_ncomp\"\n    done\n\n    rm -f report.out $before $after $diff\n}\n\nfunction test_enoent\n{\n\tconfig_file=$1\n\n\tif [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n\t\techo \"Incompatible configuration for numerical UID/GID: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\tif (($no_log != 0)); then\n\t\techo \"Changelogs not supported on this config: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\techo \"1-Start reading changelogs in background...\"\n\t# read changelogs\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l FULL -L rh_chglogs.log  --detach --pid-file=rh.pid || error \"could not start cl reader\"\n\n\techo \"2-create/unlink sequence\"\n    for i in $(seq 1 1000); do\n        touch $RH_ROOT/file.$i\n        rm -f $RH_ROOT/file.$i\n        touch $RH_ROOT/file.$i\n        rm -f $RH_ROOT/file.$i\n    done\n\n    # wait for consumer to read all records\n    sleep 2\n\tcheck_db_error rh_chglogs.log\n\n    # TODO add addl checks here\n\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-all -cq | \\\n        grep -v \"\\.shook\" > report.out\n    lines=$(cat report.out | wc -l)\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    (($lines == 0)) || error \"no entries expected after create/rm\"\n    rm -f report.out\n\n\t# kill event handler\n\tpkill -9 $PROC\n}\n\nfunction test_diff\n{\n\tconfig_file=$1\n\tflavor=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n\n    # diff: diff (various), no apply\n    # diffapply: diff (various) + apply to DB\n    # scan: scan with diff option (various)\n\n    # populate filesystem\n    mkdir $RH_ROOT/dir.1 || error \"mkdir\"\n    chmod 0750 $RH_ROOT/dir.1 || error \"chmod\"\n    mkdir $RH_ROOT/dir.2 || error \"mkdir\"\n    mkdir $RH_ROOT/dir.3 || error \"mkdir\"\n    touch $RH_ROOT/dir.1/a $RH_ROOT/dir.1/b $RH_ROOT/dir.1/c || error \"touch\"\n    touch $RH_ROOT/dir.2/d $RH_ROOT/dir.2/e $RH_ROOT/dir.2/f || error \"touch\"\n    touch $RH_ROOT/file || error \"touch\"\n\n    # initial scan\n    echo \"1-Initial scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n\n    # new entry (file & dir)\n    touch $RH_ROOT/dir.1/file.new || error \"touch\"\n    mkdir $RH_ROOT/dir.new\t       || error \"mkdir\"\n\n    # rm'd entry (file & dir)\n    rm -f $RH_ROOT/dir.1/b\t|| error \"rm\"\n    rmdir $RH_ROOT/dir.3\t|| error \"rmdir\"\n\n    # apply various changes\n    chmod 0700 $RH_ROOT/dir.1 \t\t|| error \"chmod\"\n    chown testuser $RH_ROOT/dir.2\t\t|| error \"chown\"\n    chgrp testgroup $RH_ROOT/dir.1/a\t|| error \"chgrp\"\n    echo \"zqhjkqshdjkqshdjh\" >>  $RH_ROOT/dir.1/c || error \"append\"\n    mv $RH_ROOT/dir.2/d  $RH_ROOT/dir.1/d     || error \"mv\"\n    mv $RH_ROOT/file $RH_ROOT/fname           || error \"rename\"\n\n    # is swap layout feature available?\n    has_swap=0\n    if [ -z \"$POSIX_MODE\" ]; then\n        $LFS --list-commands | grep swap_layout > /dev/null && has_swap=1\n        # if so invert stripe for e and f\n        if [ $has_swap -eq 1 ]; then\n            $LFS swap_layouts $RH_ROOT/dir.2/e  $RH_ROOT/dir.2/f || error \"lfs swap_layouts\"\n        fi\n    fi\n\n    # need 1s difference for md and name GC\n    sleep 1\n\n    echo \"2-diff ($policy_str)...\"\n    if [ \"$flavor\" = \"diff\" ]; then\n        $DIFF -f $RBH_CFG_DIR/$config_file -l FULL > report.out \\\n            2> rh_report.log || error \"performing diff\"\n    elif [ \"$flavor\" = \"partdiff\" ]; then\n        # the triggered bug returns a retryable error\n        # use a timeout to make this test finish\n        timeout 10 $DIFF -f $RBH_CFG_DIR/$config_file -l FULL \\\n            --scan=$RH_ROOT/dir.1 > report.out 2> rh_report.log ||\n                error \"performing partial diff\"\n    elif [ \"$flavor\" = \"diffapply\" ]; then\n        $DIFF --apply=db -f $RBH_CFG_DIR/$config_file -l FULL > report.out \\\n            2> rh_report.log || error \"performing diff\"\n    elif [ \"$flavor\" = \"scan\" ]; then\n        $RH -f $RBH_CFG_DIR/$config_file -l FULL --scan --once --diff=all \\\n            -L rh_report.log > report.out || error \"performing scan+diff\"\n    fi\n\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    # must get:\n    # new entries dir.1/file.new and dir.new\n    egrep '^++' report.out | grep -v '+++' | grep -E \"name='file.new'|path='$RH_ROOT/dir.1/file.new'\" | grep type=file || error \"missing create dir.1/file.new\"\n    if [ \"$flavor\" != \"partdiff\" ]; then\n        egrep '^++' report.out | grep -v '+++' | grep -E \"name='dir.new'|path='$RH_ROOT/dir.new'\" | grep type=dir || error \"missing create dir.new\"\n    fi\n    # rmd entries dir.1/b and dir.3\n    if [ \"$flavor\" = \"partdiff\" ]; then\n        rm_expect=1\n    else\n        rm_expect=2\n    fi\n    nbrm=$(egrep -e '^--' report.out | grep -v -- '---' | wc -l)\n    [ $nbrm  -eq $rm_expect ] || error \"$nbrm/$rm_expect removal\"\n    # changes\n    grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.1\") report.out  | grep mode= || error \"missing chmod $RH_ROOT/dir.1\"\n    if [ \"$flavor\" != \"partdiff\" ]; then\n        grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.2\") report.out | grep owner=$testuser_str || error \"missing chown $RH_ROOT/dir.2\"\n    fi\n    grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.1/a\") report.out | grep group=$testgroup_str || error \"missing chgrp $RH_ROOT/dir.1/a\"\n    grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.1/c\") report.out | grep size= || error \"missing size change $RH_ROOT/dir.1/c\"\n\n    # dir2/d -> dir1/d\n    old_parent=$(grep \"^-[^ ]*\"$(get_id \"$RH_ROOT/dir.1/d\") report.out | sed -e \"s/.*parent=\\[\\([^]]*\\).*/\\1/\" )\n    new_parent=$(grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.1/d\") report.out | sed -e \"s/.*parent=\\[\\([^]]*\\).*/\\1/\" )\n    [ -z \"$old_parent\" ] && error \"cannot get old parent of $RH_ROOT/dir.1/d\"\n    [ -z \"$new_parent\" ] && error \"cannot get new parent of $RH_ROOT/dir.1/d\"\n    [ $old_parent = $new_parent ] && error \"$RH_ROOT/dir.1/d still has the same parent\"\n\n    # file -> fname\n    file_fid=$(get_id \"$RH_ROOT/fname\")\n    old_file=$(grep \"^-[^ ]*${file_fid}.*name='file'\" report.out)\n    new_file=$(grep \"^+[^ ]*${file_fid}.*name='fname'\" report.out)\n    [ -z old_file ] && error \"missing path change $RH_ROOT/fname\"\n    [ -z new_file ] && error \"missing path change $RH_ROOT/fname\"\n\n    if [ \"$flavor\" != \"partdiff\" ] && [ $has_swap -eq 1 ]; then\n        grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.2/e\") report.out | grep stripe || error \"missing stripe change $RH_ROOT/dir.2/e\"\n        grep \"^+[^ ]*\"$(get_id \"$RH_ROOT/dir.2/f\") report.out | grep stripe || error \"missing stripe change $RH_ROOT/dir.2/f\"\n    fi\n\n    # TODO check the content of the DB for scan and diff --apply\n}\n\nfunction test_diff_apply_fs # test diff --apply=fs in particular for entry recovery\n{\n    config_file=$1\n    flavor=$2\n    policy_str=\"$3\"\n\n    clean_logs\n    # clean any previous files used for this test\n    rm -f diff.out diff.log find.out find2.out lovea fid_remap\n\n    # copy 2 instances /bin in the filesystem\n    echo \"Populating filesystem...\"\n    $LFS setstripe -c 2 $RH_ROOT/.\n    cp -ar . $RH_ROOT/bin.1 || error \"copy failed\"\n    cp -ar . $RH_ROOT/bin.2 || error \"copy failed\"\n\n    # run initial scan\n    echo \"Initial scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n\n    # save contents of bin.1\n    find $RH_ROOT/bin.1 -printf \"%n %y %m %T@ %g %u %p %l\\n\" | sort -k 7 > find.out || error \"find error\"\n\n    # remove it\n    echo \"removing objects\"\n    rm -rf \"$RH_ROOT/bin.1\"\n\n    # cause 1 sec bw initial creation and recovery\n    # to check robinhood restore the original date\n    sleep 1\n\n    echo \"running recovery...\"\n    # clear umask for recovery\n    old_umask=$(umask)\n    umask 0000\n    strace -e open,mkdir -f $DIFF -f $RBH_CFG_DIR/$config_file --apply=fs > diff.out 2> diff.log || error \"rbh-diff error\"\n    umask \"$old_umask\"\n\n    cr1=$(grep -E '^\\+\\+[^+]' diff.out | wc -l)\n    # recursive directory creation for files returns EEXIST, don't count it\n    cr2=$(grep -v EEXIST diff.log | grep -E \"O_CREAT|mkdir\" | wc -l)\n    cr3=$(wc -l find.out | awk '{print $1}')\n    echo \"diff would create $cr1 entries, $cr2 entries created, $cr3 entries initially in directory\"\n    rmhl=0\n    if (($cr1 != $cr2)) || (($cr1 != $cr3)); then\n        miss=0\n        for h in $(grep \"type=file\" diff.out | grep -E \"nlink=[^1]\"| sed -e \"s/.*nlink=\\([0-9]*\\),.*/\\1/\"); do\n            ((miss=$h-1+$miss))\n        done\n        (( $miss > 0 )) && echo \"detected $miss missing hardlinks\"\n        rmhl=1\n        if (($cr3 == $cr1 + $miss)); then\n            (( $miss > 0 )) && echo \"WARNING: $miss hardlinks not restored\"\n        else\n            error \"Unexpected number of objects created: rbh-diff displayed $cr1, rbh-diff log indicates $cr2, expected $cr3 according to find\"\n        fi\n    else\n        echo \"OK: $cr1 objects created\"\n    fi\n\n    find $RH_ROOT/bin.1 -printf \"%n %y %m %T@ %g %u %p %l\\n\" | sort -k 7 > find2.out || error \"find error\"\n\n    if (($rmhl == 1)); then\n        # remove file hardlinks from diff as their are erroneous\n        for f in $(grep -E \"^[^1]* f\" find.out | awk '{print $(NF)}'); do\n            grep -Ev \" $f \" find.out > find.out.new\n            grep -Ev \" $f \" find2.out > find2.out.new\n            /bin/mv find.out.new find.out\n            /bin/mv find2.out.new find2.out\n        done\n    fi\n\n    # diff non-files: don't compare time as it can't be set\n    sed -e \"s/\\([0-9]* [^f] [0-7]* \\)[0-9.]* /\\1/\" find.out | sort > find.out.new\n    sed -e \"s/\\([0-9]* [^f] [0-7]* \\)[0-9.]* /\\1/\" find2.out | sort > find2.out.new\n    /bin/mv find.out.new find.out\n    /bin/mv find2.out.new find2.out\n\n    diff find.out find2.out || error \"unexpected differences between initial and final state\"\n\n\n    lvers=$(lustre_version | cut -d '.' -f 1,2)\n    if [[ \"$lvers\" == \"2.1\" ]]; then\n        # lovea and fid_remap must have been generated for newly created files\n        [[ -f lovea ]] || error \"lovea not generated\"\n        [[ -f fid_remap ]] || error \"fid_remap not generated\"\n    elif [[ \"$lustre_major\" == \"2\" ]]; then\n        # not tested for those versions: display a warning for reminder\n        [[ -f lovea ]] || echo  \"WARNING: lovea not generated\"\n        [[ -f fid_remap ]] || echo \"WARNING: fid_remap not generated\"\n    fi\n    if [[ -f lovea ]] && [[ -f fid_remap ]]; then\n        nbf=$(grep -E '^\\+\\+[^+]' diff.out | grep \"type=file\" | wc -l)\n        nbso=$(grep -E '^\\+\\+[^+]' diff.out | grep \"type=file\" | sed -e \"s/.*stripe_count=\\([0-9]*\\),.*/\\1/\" | xargs | tr \" \" \"+\" | bc)\n        # check their contents\n        nbl=$(wc -l lovea | awk '{print $1}')\n        nbo=$(wc -l fid_remap | awk '{print $1}')\n\n        echo \"$nbl items in lovea, $nbo items in fid_remap\"\n        [[ \"$nbf\" == \"$nbl\" ]] || error \"unexpected number of items in lovea $nbl: $nbf expected\"\n        [[ \"$nbso\" == \"$nbo\" ]] || error \"unexpected number of items in fid_remap $nbo: $nbso expected\"\n    fi\n\n    rm -f  diff.out diff.log find.out find2.out lovea fid_remap\n}\n\nfunction test_completion\n{\n\tconfig_file=$1\n\tflavor=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n    # clean existing \"out.*\" files\n    rm -f out.1 out.2\n\n    done_str=\"Executing scan completion command\"\n    fail_str=\"Invalid scan completion command\"\n\n    # flavors:\n    case \"$flavor\" in\n        OK)\n            export TEST_CMD=\"$RBH_TESTS_DIR/completion.sh {cfg} {fspath} out\"\n            ;;\n        unmatched)\n            export TEST_CMD=\"$RBH_TESTS_DIR/completion.sh {cfg\"\n            err=\"ERROR: unmatched '{' in scan completion command\"\n            ;;\n        invalid_ctx_id)\n            export TEST_CMD=\"$RBH_TESTS_DIR/completion.sh {fid}\"\n            err=\"fid is not available in this context\"\n            ;;\n        invalid_ctx_attr)\n            export TEST_CMD=\"$RBH_TESTS_DIR/completion.sh {fullpath}\"\n            err=\"entry attributes are not available in this context\"\n            ;;\n        invalid_attr)\n            export TEST_CMD=\"$RBH_TESTS_DIR/completion.sh {foo}\"\n            err=\"unexpected variable 'foo' in scan completion command\"\n            ;;\n    esac\n\n    # populate filesystem\n    for i in `seq 1 10`; do\n        touch $RH_ROOT/file.$i || error \"creating entry\"\n    done\n\n    # do the scan\n    echo \"scan...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l EVENT -L rh_scan.log  || error \"performing initial scan\"\n\n    # if flavor is OK: completion command must have been called\n    if [ \"$flavor\" = \"OK\" ]; then\n        grep \"$done_str\" rh_scan.log || error \"Completion command not executed\"\n\n        [ -f out.1 ] || error \"file out.1 not found\"\n        [ -f out.2 ] || error \"file out.2 not found\"\n        # out.1 contains cfg\n        grep $config_file out.1 || error \"out.1 has unexpected content: $(cat out.1)\"\n        # out.2 contains fspath\n        grep $RH_ROOT out.2 || error \"out.2 has unexpected content: $(cat out.2)\"\n    else\n        grep \"$fail_str\" rh_scan.log || error \"Completion command should fail\"\n        grep \"$err\" rh_scan.log || error \"unreported cmd error\"\n    fi\n\n    rm -f out.1 out.2\n}\n\n\nfunction test_rename\n{\n    config_file=$1\n    flavor=$2\n\n    clean_logs\n\n\tif (( $no_log )) && [ \"$flavor\" = \"readlog\" ]; then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n    dirs=\"$RH_ROOT/dir.1 $RH_ROOT/dir.2 $RH_ROOT/dir.3 $RH_ROOT/dir.3/subdir\"\n    files=\"$RH_ROOT/dir.1/file.1  $RH_ROOT/dir.1/file.2  $RH_ROOT/dir.2/file.1 $RH_ROOT/dir.2/file.2 $RH_ROOT/dir.2/file.4 $RH_ROOT/dir.3/subdir/file.1\"\n    hlink_ref=\"$RH_ROOT/dir.2/file.3\"\n    hlink=\"$RH_ROOT/dir.2/link_file\" # initially points to file.3, then file.4\n\n    dirs_tgt=\"$RH_ROOT/dir.1 $RH_ROOT/dir.2 $RH_ROOT/dir.3 $RH_ROOT/dir.3/subdir.rnm\"\n    files_tgt=\"$RH_ROOT/dir.1/file.1.rnm  $RH_ROOT/dir.2/file.2.rnm  $RH_ROOT/dir.2/file.2  $RH_ROOT/dir.2/file.3  $RH_ROOT/dir.2/link_file $RH_ROOT/dir.3/subdir.rnm/file.1\"\n    deleted=\"$RH_ROOT/dir.2/file.2\"\n\n    # create several files/dirs\n    echo \"1. Creating initial objects...\"\n    mkdir $dirs || error \"mkdir $dirs\"\n    touch $files $hlink_ref || error \"touch $files $hlink_ref\"\n    ln $hlink_ref $hlink || error \"hardlink $hlink_ref $hlink\"\n\n    # get fid of deleted entries\n    rmid=`get_id \"$deleted\"`\n\n    # readlog or scan\n    if [ \"$flavor\" = \"readlog\" ]; then\n        echo \"2. Reading changelogs...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n    elif [ \"$flavor\" = \"diff\" ]; then\n        echo \"2. Diff...\"\n    \t$DIFF -f $RBH_CFG_DIR/$config_file --apply=db -l DEBUG > rh_scan.log 2>&1 || error \"scanning\"\n    else\n        echo \"2. Scanning initial state...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning\"\n    fi\n\n\tif (( $is_lhsm != 0 )); then\n\t\techo \"  -archiving all data\"\n\t\tflush_data\n\t\t$LFS hsm_archive $files || error \"executing lfs hsm_archive\"\n\t\techo \"  -Waiting for end of data migration...\"\n\t\twait_done 60 || error \"Migration timeout\"\n\telif (( $is_hsmlite != 0 )); then\n\t\techo \"  -archiving all data\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"executing $CMD --sync\"\n        [ \"$DEBUG\" = \"1\" ] && find $BKROOT -type f -ls\n\tfi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-all -q > report.out || error \"$REPORT\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -ls -nobulk > find.out || error \"$FIND\"\n    [ \"$DEBUG\" = \"1\" ] && cat find.out\n\n    # checking all objects in reports\n    for o in $dirs $files; do\n        grep -E \" $o$\" report.out > /dev/null || error \"$o not found in report\"\n        grep -E \" $o$\" find.out > /dev/null || error \"$o not found in find\"\n    done\n\n    # hlink_ref hlink must be in report.out\n    grep -E \" $hlink$\" report.out > /dev/null ||  grep -E \" $hlink_ref$\" report.out > /dev/null || error \"$hlink or $hlink_ref must be in report output\"\n    # both hlink_ref hlink must be in find.out\n    grep -E \" $hlink$\" find.out > /dev/null || error \"$hlink must be in rbh-find output\"\n    grep -E \" $hlink_ref$\" find.out > /dev/null || error \"$hlink must be in rbh-find output\"\n\n    count_nb_init=$(wc -l report.out | awk '{print $1}')\n    count_path_init=$(wc -l find.out | awk '{print $1}')\n\n    # get entry fid before they are unlinked, moved...\n    name_from=(dir.1/file.1 dir.1/file.2 dir.2/file.1 dir.3/subdir dir.2/file.4)\n    id_from=()\n    for f in ${name_from[*]}; do\n        id_from+=( \"$(get_id $RH_ROOT/$f)\" )\n    done\n\n    name_unlnk=(dir.2/file.2 dir.2/link_file)\n    id_unlnk=()\n    for f in ${name_unlnk[*]}; do\n        id_unlnk+=( \"$(get_id $RH_ROOT/$f)\" )\n    done\n\n    # rename entries\n    echo \"3. Renaming objects...\"\n    # 1) simple file rename\n    mv $RH_ROOT/dir.1/file.1 $RH_ROOT/dir.1/file.1.rnm\n    # 2) cross directory file rename\n    mv $RH_ROOT/dir.1/file.2 $RH_ROOT/dir.2/file.2.rnm\n    # 3) rename that deletes the target\n    mv -f $RH_ROOT/dir.2/file.1 $RH_ROOT/dir.2/file.2\n    # 4) upper level directory rename\n    mv $RH_ROOT/dir.3/subdir $RH_ROOT/dir.3/subdir.rnm\n    # 5) overwriting a hardlink\n    mv -f $RH_ROOT/dir.2/file.4 $hlink\n\n    # get target fids\n    name_to=(dir.1/file.1.rnm dir.2/file.2.rnm dir.2/file.2 dir.3/subdir.rnm dir.3/subdir.rnm dir.2/link_file)\n    id_to=()\n    for f in ${name_to[*]}; do\n        id_to+=( \"$(get_id $RH_ROOT/$f)\" )\n    done\n\n    # namespace GC needs 1s difference\n    sleep 1\n\n    # readlog or re-scan\n    if [ \"$flavor\" = \"readlog\" ]; then\n        echo \"4. Reading changelogs...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\n        ## check the \"fake\" records are correctly built\n\n        # \"rename from\"\n        for i in $(seq 1 ${#name_from[@]}); do\n            n=${name_from[$((i-1))]}\n            id=${id_from[$((i-1))]}\n            grep \"RECORD:\" rh_scan.log | egrep \"RENME|RNMFM\" | grep $(basename $n) | grep $id || error \"Missing RENME $n\"\n        done\n\n        # \"rename to\"\n        for i in $(seq 1 ${#name_to[@]}); do\n            n=${name_to[$((i-1))]}\n            id=${id_to[$((i-1))]}\n            grep \"RECORD:\" rh_scan.log | grep RNMTO | grep $(basename $n) | grep $id || error \"Missing RNMTO $n\"\n        done\n\n        # unlinked targets\n        for i in $(seq 1 ${#name_unlnk[@]}); do\n            n=${name_unlnk[$((i-1))]}\n            id=${id_unlnk[$((i-1))]}\n            grep \"RECORD:\" rh_scan.log | grep UNLNK | grep $(basename $n) | grep $id || error \"Missing UNLNK $n\"\n        done\n\n    elif [ \"$flavor\" = \"scan\" ]; then\n        echo \"4. Scanning again...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning\"\n    elif [ \"$flavor\" = \"diff\" ]; then\n        echo \"4. Diffing again...\"\n    \t$DIFF -f $RBH_CFG_DIR/$config_file --apply=db -l DEBUG > rh_scan.log 2>&1 || error \"scanning\"\n    elif [ \"$flavor\" = \"partial\" ]; then\n        i=0\n        for d in $dirs_tgt; do\n            # namespace GC needs 1s difference\n            sleep 1\n            ((i++))\n            echo \"4.$i Partial scan ($d)...\"\n        \t$RH -f $RBH_CFG_DIR/$config_file --scan=$d --once -l DEBUG -L rh_scan.log || error \"scanning $d\"\n        done\n    elif [ \"$flavor\" = \"partdiff\" ]; then\n        i=0\n        for d in $dirs_tgt; do\n            # namespace GC needs 1s difference\n            sleep 1\n            ((i++))\n            echo \"4.$i Partial diff+apply ($d)...\"\n        \t$DIFF -f $RBH_CFG_DIR/$config_file --scan=$d --apply=db -l DEBUG  > rh_scan.log 2>&1 || error \"scanning $d\"\n        done\n    fi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-all -q > report.out || error \"$REPORT\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -nobulk -ls > find.out || error \"$FIND\"\n    [ \"$DEBUG\" = \"1\" ] && cat find.out\n\n    # checking all objects in reports\n    for o in $dirs_tgt $files_tgt; do\n        grep -E \" $o$\" report.out > /dev/null || error \"$o not found in report\"\n        grep -E \" $o$\" find.out > /dev/null || error \"$o not found in report\"\n    done\n\n    grep \"\\[$rmid\\]\" find.out && error \"id of deleted file ($rmid) found in rbh-find output\"\n    unset count_nb_final\n\tif (( $is_lhsm + $is_hsmlite == 1 )); then\n\t\t# additionally check that the entry is scheduled for deferred rm (and only this one)\n\t    $REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > rh_report.log\n\n        # The following test is not critical for partial scanning\n        # In the worst case, the deleted entry remains in the archive.\n        if [ \"$flavor\" = \"partial\" ] || [ \"$flavor\" = \"partdiff\" ]; then\n            grep \"\\[$rmid\\]\" rh_report.log > /dev/null || echo \"WARNING: $rmid should be in HSM rm list\"\n            # Conservative behavior for partial scans in front of an archive: allow n/a in paths\n            count_nb_final=$(awk '{print $(NF)}' report.out | grep -v 'n/a' | wc -l)\n        else\n            # in the other cases, raise an error\n            grep \"\\[$rmid\\]\" rh_report.log > /dev/null || error \"$rmid should be in HSM rm list\"\n        fi\n\n        # the following is the most CRITICAL, as this would result in removing archived entries\n        # for existing file!\n        grep -v \"\\[$rmid\\]\" rh_report.log && error \"Existing entries are in HSM rm list!!!\"\n\tfi\n\n    [ -z \"$count_nb_final\" ] && count_nb_final=$(wc -l report.out | awk '{print $1}')\n    count_path_final=$(wc -l find.out | awk '{print $1}')\n\n    (( $count_nb_final == $count_nb_init - 1)) || error \"1 entry should have been removed (rename target), got $(($count_nb_init - $count_nb_final))\"\n    (( $count_path_final == $count_path_init - 2)) || error \"2 paths should have been removed (rename target), got $(( $count_path_init - $count_path_final ))\"\n\n    rm -f report.out find.out\n}\n\nfunction test_unlink\n{\n    config_file=$1\n    flavor=$2\n\n    clean_logs\n\n\tif (( $no_log )); then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n\t# Create one file and a hardlink\n    touch \"$RH_ROOT/foo1\"\n\tln \"$RH_ROOT/foo1\" \"$RH_ROOT/foo2\"\n\n\t# Check nlink == 2\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\t$FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/foo1 -nobulk -ls > report.out || error \"$REPORT\"\n\tnlink=$( cat report.out | awk '{ print $4; }' )\n\t(( $nlink == 2 )) || error \"nlink should be 2 instead of $nlink\"\n\n\t# Remove one file and check nlink == 1\n\trm \"$RH_ROOT/foo2\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\t$FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/foo1 -nobulk -ls > report.out || error \"$REPORT\"\n\tnlink=$( cat report.out | awk '{ print $4; }' )\n\t(( $nlink == 1 )) || error \"nlink should be 1 instead of $nlink\"\n\n\t# Add a new hard link and check nlink == 2\n\tln \"$RH_ROOT/foo1\" \"$RH_ROOT/foo3\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\t$FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/foo1 -nobulk -ls > report.out || error \"$REPORT\"\n\tnlink=$( cat report.out | awk '{ print $4; }' )\n\t(( $nlink == 2 )) || error \"nlink should be 1 instead of $nlink\"\n\n\t# Remove one file and check nlink == 1\n\trm \"$RH_ROOT/foo3\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\t$FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/foo1 -nobulk -ls > report.out || error \"$REPORT\"\n\tnlink=$( cat report.out | awk '{ print $4; }' )\n\t(( $nlink == 1 )) || error \"nlink should be 1 instead of $nlink\"\n\n    # Now create one hardlink, then remove it, but do not run RH in between.\n\tln \"$RH_ROOT/foo1\" \"$RH_ROOT/foo2\"\n\trm \"$RH_ROOT/foo2\"\n\t# check nlink == 1\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n\t$FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/foo1 -nobulk -ls > report.out || error \"$REPORT\"\n\tnlink=$( cat report.out | awk '{ print $4; }' )\n\t(( $nlink == 1 )) || error \"nlink should be 1 instead of $nlink\"\n\n    rm -f report.out find.out\n}\n\nfunction test_layout\n{\n    config_file=$1\n    flavor=$2\n\n    has_swap=0\n    $LFS --list-commands | grep swap_layout > /dev/null && has_swap=1\n\n    if (( $has_swap == 0 )); then\n        echo \"Layout change no supported on this config: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n\tif (( $no_log )); then\n        echo \"Changelogs not supported on this config: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    # Create a file and change its layout.\n    DSTFILE=\"$RH_ROOT/foo1\"\n    $LFS setstripe -c 1 $DSTFILE\n    dd if=/dev/zero of=$DSTFILE bs=1M count=10\n    $LFS migrate -c 2 $DSTFILE\n\n\t# Check if a CL_LAYOUT record was emitted and triggered a getstripe().\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n    ngetstripe_zero=$(grep LYOUT rh_scan.log | grep -c \"getstripe=0\")\n    ngetstripe=$(grep LYOUT rh_scan.log | grep -c \"getstripe=1\")\n    (( $ngetstripe_zero == 0 && $ngetstripe > 0 )) || error \"CL_LAYOUT should trigger a getstripe() operation.\"\n\n    $LFS migrate -c 1 $DSTFILE\n    fsdiff=$($RH -f $RBH_CFG_DIR/$config_file --readlog --diff=stripe --once -l DEBUG -L rh_scan.log)\n    (( $? == 0 )) || error \"reading changelog (diff)\"\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"$fsdiff\"\n    echo $fsdiff | egrep \"\\-.*,stripe_count=2,.* \\+.*,stripe_count=1,.*\" > /dev/null || error \"missed layout change\"\n\n    rm -f $DSTFILE\n}\n\nfunction flavor2rbh_cmd\n{\n    case \"$1\" in\n        scan)\n            echo \"$RH --scan --once -L stderr\"\n            ;;\n        scandiff1) # default\n            echo \"$RH --scan --once --diff=all -L stderr\"\n            ;;\n        scandiff2) # explicit nostripe\n            echo \"$RH --scan --once --diff=posix -L stderr\"\n            ;;\n        scandiff3) # explicit stripe\n            echo \"$RH --scan --once --diff=stripe -L stderr\"\n            ;;\n        diffna1) # default\n            echo \"$DIFF --diff=all\"\n            ;;\n        diffna2) # explicit nostripe\n            echo \"$DIFF --diff=posix\"\n            ;;\n        diffna3) # explicit stripe\n            echo \"$DIFF --diff=stripe\"\n            ;;\n        diff1) # default\n            echo \"$DIFF --apply=db\"\n            ;;\n        diff2) # explicit nostripe\n            echo \"$DIFF --diff=posix --apply=db\"\n            ;;\n        diff3) # explicit stripe\n            echo \"$DIFF --diff=stripe --apply=db\"\n            ;;\n        cl)\n            echo \"$RH --readlog --once -L stderr\"\n            ;;\n        cldiff1) # default\n            echo \"$RH --readlog --once --diff=all -L stderr\"\n            ;;\n        cldiff2) # explicit nostripe\n            echo \"$RH --readlog --once --diff=posix -L stderr\"\n            ;;\n        cldiff3) # explicit stripe\n            echo \"$RH --readlog --once --diff=stripe -L stderr\"\n            ;;\n    esac\n}\n\nfunction run_scan_cmd\n{\n    local cfg=$1\n    local mode=$2\n\n    local cmd=$(flavor2rbh_cmd $mode)\n\n    :> rh.out\n    $cmd -f $RBH_CFG_DIR/$cfg -l FULL > rh.out 2>> rh.log || error \"running $cmd\"\n\tcheck_db_error rh.log\n    grep -E \"Warning\" rh.log && grep -E \"doesn't match stripe count\" rh.log > /dev/null && error \"Stripe count mismatch detected\"\n}\n\nfunction scan_check_no_update\n{\n    cfg=$1\n    mode=$2\n\n    # no stripe update expected for 2nd run\n    :> rh.log\n    :> rh.out\n    run_scan_cmd $cfg $mode\n    grep STRIPE_I rh.log | egrep -i \"INSERT|DELETE|UPDATE\" && error \"No stripe update expected during second run\"\n}\n\n# check diff output (rh.out) when [[ $flavor = *\"diff\"* ]]\nfunction check_stripe_diff\n{\n    old=\"$1\"\n    new=\"$2\"\n    expect=$3\n    if [ $expect = 1 ]; then\n        if [ -n \"$old\" ]; then\n            egrep \"^\\-\" rh.out | egrep \"$old\"  || error \"pattern '- ... $old' not found in diff output\"\n        fi\n        if [ -n \"$new\" ]; then\n            egrep \"^\\+\" rh.out | egrep \"$new\"  || error \"pattern '+ ... $new' not found in diff output\"\n        fi\n    else\n        if [ -n \"$old\" ]; then\n            egrep \"^\\-\" rh.out | egrep \"$old\"  && error \"pattern '- ... $old' not expected in diff output\"\n        fi\n        if [ -n \"$new\" ]; then\n            egrep \"^\\+\" rh.out | egrep \"$new\"  && error \"pattern '+ ... $new' not expected in diff output\"\n        fi\n    fi\n}\n\nfunction check_stripe\n{\n    local cfg=$1\n    local f=$2\n    local pattern=$3\n\n    :> rh.out\n    $REPORT -f $RBH_CFG_DIR/$cfg -c -e $f > rh.out 2>> rh.log || error \"$f not in RBH DB\"\n\tcheck_db_error rh.log\n    egrep \"^stripes,\" rh.out | egrep \"$pattern\" || error \"pattern \\\"$pattern\\\" not found in report output: $(cat rh.out)\"\n}\n\nfunction stripe_update\n{\n    config_file=$1\n    flavor=$2 # way to update stripe info (scan, scan diff <mask1>, scan diff <mask2>,\n              # diff (no apply, apply=db)x(mask1, ..maskN), changelog...)\n              # see function flavor2rbh_opt().\n\n    :> rh.out\n    :> rh.log\n\n\tif [ -n \"$POSIX_MODE\" ]; then\n        echo \"No stripe information in POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    has_swap=0\n    $LFS --list-commands | grep swap_layout > /dev/null && has_swap=1\n    getstripe=1 # allow getstripe\n    [ $has_swap = 1 ] && getstripe=0 # no getstripe expected\n    diff=0\n\n\tif [[ $flavor = \"cl\"* ]] && (( $no_log )); then\n        echo \"Changelogs not supported on this config: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    [[ $flavor = \"cl\"* ]] && clean_logs\n    [[ $flavor = \"cl\"* ]] && getstripe=1 # getstripe allowed\n\n    # only diff1 and 3 should display stripe changes\n    [[ $flavor = *\"diff\"* ]] && [[ $flavor != *\"2\" ]] && diff=1\n    rm -f $RH_ROOT/file.*\n\n    echo \"test setup: checking diff=$diff, getstripe allowed=$getstripe, has_swap=$has_swap\"\n\n    echo \"- non-striped file\"\n    # case 1 (all Lustre versions): create an unstriped file, then stripe it\n    create_nostripe $RH_ROOT/file.1 || error \"creating unstriped file\"\n    run_scan_cmd $config_file $flavor\n    # FIXME: there are getstripe for shook locks...\n    fid=$(get_id \"$RH_ROOT/file.1\")\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"\" \"stripe_count=0\" 1\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    # no update expected for second run\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe\" \"stripe\" 0 # no stripe change expected\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    # check if \"getstripe -g\" exists\n    has_gen=0\n    $LFS getstripe -g $RH_ROOT/ 2>/dev/null && has_gen=1\n\n    # stripe it\n    echo \"- stripe file\"\n    $LFS setstripe -c 1 $RH_ROOT/file.1 || error \"setting file stripe\"\n    idx=$($LFS getstripe -i $RH_ROOT/file.1)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$RH_ROOT/file.1: ost$idx\"\n    [ \"$DEBUG\" = \"1\" ] && [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    run_scan_cmd $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe_count=0\" \"stripe_count=1\" 1\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx\"\n\n    # no update expected for second run\n    [ \"$DEBUG\" = \"1\" ] &&  [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe\" \"stripe\" 0 # no stripe change expected\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx\"\n\n    # other cases: play with layout_swap (skip for Lustre < 2.4)\n    if (( $has_swap == 0 )); then\n        echo \"No layout swap: skipping the end of the test\"\n        return 0\n    fi\n\n    # swap with another striped file\n    $LFS setstripe -c 1 $RH_ROOT/file.2 || error \"creating striped file\"\n    idx2=$($LFS getstripe -i $RH_ROOT/file.2)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$RH_ROOT/file.2: ost$idx2\"\n    echo \"- swap it with striped file\"\n    $LFS swap_layouts $RH_ROOT/file.1 $RH_ROOT/file.2 || error \"swapping file layouts\"\n    [ \"$DEBUG\" = \"1\" ] &&  [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    run_scan_cmd $config_file $flavor\n# FIXME getfid\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripes={ost#$idx\" \"stripes={ost#$idx2\" 1\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx2\"\n\n    # no update expected for second run\n    [ \"$DEBUG\" = \"1\" ] &&  [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe\" \"stripe\" 0 # no stripe change expected\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx2\"\n\n    # swap with non-striped file\n    create_nostripe $RH_ROOT/file.3 || error \"creating unstriped file\"\n    echo \"- swap it with non-striped file\"\n    $LFS swap_layouts $RH_ROOT/file.1 $RH_ROOT/file.3 || error \"swapping file layouts\"\n    [ \"$DEBUG\" = \"1\" ] &&  [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    run_scan_cmd $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe_count=1\" \"stripe_count=0\" 1\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    [ \"$DEBUG\" = \"1\" ] &&  [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"$fid.*Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe\" \"stripe\" 0 # no stripe change expected\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    return 0\n}\n\nfunction stripe_no_update\n{\n    config_file=$1\n    flavor=$2 # way to update stripe info (scan, scan diff <mask1>, scan diff <mask2>,\n              # diff (no apply, apply=db)x(mask1, ..maskN), changelog...)\n              # see function flavor2rbh_opt().\n\n    :> rh.out\n    :> rh.log\n\n\tif [ -n \"$POSIX_MODE\" ]; then\n        echo \"No stripe information in POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n    has_swap=0\n    $LFS --list-commands | grep swap_layout > /dev/null && has_swap=1\n    getstripe=1 # allow getstripe\n    [ $has_swap = 1 ] && getstripe=0 # no getstripe expected\n    # only diff1 and 3 should display stripe changes\n    diff=0\n    [[ $flavor = *\"diff\"* ]] && [[ $flavor != *\"2\" ]] && diff=1\n\n\tif [[ $flavor = \"cl\"* ]] && (( $no_log )); then\n        echo \"Changelogs not supported on this config: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    rm -f $RH_ROOT/file.*\n\n    echo \"test setup: checking diff=$diff, getstripe allowed=$getstripe, has_swap=$has_swap\"\n\n    # initial scan\n    run_scan_cmd $config_file \"scan\"\n\n    echo \"- non-striped file\"\n    # case 1 (all Lustre versions): create an unstriped file, then stripe it\n    create_nostripe $RH_ROOT/file.1 || error \"creating unstriped file\"\n    # no update expected for the given specified run\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"\" \"stripe_count=0\" 1\n    # update db contents\n    run_scan_cmd $config_file \"scan\"\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    # check if \"getstripe -g\" exists\n    has_gen=0\n    $LFS getstripe -g $RH_ROOT/ 2>/dev/null && has_gen=1\n\n    # stripe it\n    echo \"- stripe file\"\n    $LFS setstripe -c 1 $RH_ROOT/file.1 || error \"setting file stripe\"\n    idx=$($LFS getstripe -i $RH_ROOT/file.1)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$RH_ROOT/file.1: ost$idx\"\n    [ \"$DEBUG\" = \"1\" ] && [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe_count=0\" \"stripe_count=1\" 1\n    run_scan_cmd $config_file \"scan\"\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx\"\n\n    # other cases: play with layout_swap (skip for Lustre < 2.4)\n\n    if (( $has_swap == 0 )); then\n        echo \"No layout swap: skipping the end of the test\"\n        return 0\n    fi\n\n    # swap with another striped file\n    $LFS setstripe -c 1 $RH_ROOT/file.2 || error \"creating striped file\"\n    idx2=$($LFS getstripe -i $RH_ROOT/file.2)\n    [ \"$DEBUG\" = \"1\" ] && echo \"$RH_ROOT/file.2: ost$idx2\"\n    echo \"- swap it with striped file\"\n    $LFS swap_layouts $RH_ROOT/file.1 $RH_ROOT/file.2 || error \"swapping file layouts\"\n    [ \"$DEBUG\" = \"1\" ] && [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripes={ost#$idx\" \"stripes={ost#$idx2\" 1\n    run_scan_cmd $config_file \"scan\"\n    check_stripe $config_file $RH_ROOT/file.1 \"ost#$idx2\"\n\n    # swap with non-striped file\n    create_nostripe $RH_ROOT/file.3 || error \"creating unstriped file\"\n    echo \"- swap it with non-striped file\"\n    $LFS swap_layouts $RH_ROOT/file.1 $RH_ROOT/file.3 || error \"swapping file layouts\"\n    [ \"$DEBUG\" = \"1\" ] && [ \"$has_gen\" = \"1\" ] && echo \"$RH_ROOT/file.1: gen $($LFS getstripe -g $RH_ROOT/file.1)\"\n    scan_check_no_update $config_file $flavor\n    [ $getstripe = 0 ] && egrep \"Getstripe=1\" rh.log && error \"No getstripe operation expected\"\n    [ $diff = 1 ] && check_stripe_diff \"stripe_count=1\" \"stripe_count=0\" 1\n    run_scan_cmd $config_file \"scan\"\n    check_stripe $config_file $RH_ROOT/file.1 \"none\"\n\n    return 0\n}\n\n\n# test link/unlink/rename\n# flavors=readlog, scan, partial scan\nfunction test_hardlinks\n{\n    config_file=$1\n    flavor=$2\n\n    clean_logs\n\n\tif (( $no_log )) && [ \"$flavor\" = \"readlog\" ]; then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n    dirs=\"$RH_ROOT/dir.1 $RH_ROOT/dir.2 $RH_ROOT/dir.3 $RH_ROOT/dir.3/subdir $RH_ROOT/dir.4\"\n    files=\"$RH_ROOT/dir.1/file.1  $RH_ROOT/dir.1/file.2  $RH_ROOT/dir.2/file.1 $RH_ROOT/dir.2/file.2 $RH_ROOT/dir.2/file.4 $RH_ROOT/dir.3/subdir/file.1 $RH_ROOT/dir.4/file.3\"\n    hlink_refs=(\"$RH_ROOT/dir.2/file.3\" \"$RH_ROOT/dir.4/file.1\" \"$RH_ROOT/dir.4/file.2\")\n    hlinks=(\"$RH_ROOT/dir.2/link_file\" \"$RH_ROOT/dir.1/link.1 $RH_ROOT/dir.2/link.1\" \"$RH_ROOT/dir.2/link.2\")\n    #[0] file.4 will over write it, [1] one more link will be created, [2]previous path ($RH_ROOT/dir.4/file.2) will be removed\n\n    dirs_tgt=\"$RH_ROOT/dir.1 $RH_ROOT/dir.2 $RH_ROOT/dir.3 $RH_ROOT/dir.3/subdir.rnm $RH_ROOT/dir.4\"\n    files_tgt=\"$RH_ROOT/dir.1/file.1.rnm  $RH_ROOT/dir.2/file.2.rnm  $RH_ROOT/dir.2/file.2  $RH_ROOT/dir.2/file.3  $RH_ROOT/dir.2/link_file $RH_ROOT/dir.3/subdir.rnm/file.1 $RH_ROOT/dir.2/link.2 $RH_ROOT/dir.1/new\"\n    hlink_refs_tgt=(\"$RH_ROOT/dir.4/file.1\" \"$RH_ROOT/dir.2/new\")\n    hlinks_tgt=(\"$RH_ROOT/dir.1/link.1 $RH_ROOT/dir.2/link.1 $RH_ROOT/dir.4/link.1\" \"$RH_ROOT/dir.4/link.new\")\n        # only previous [1] remaining as [0], [1] is a new link\n\n    deleted=\"$RH_ROOT/dir.2/file.2 $RH_ROOT/dir.4/file.3\"\n\n    # create several files/dirs\n    echo \"1. Creating initial objects...\"\n    mkdir $dirs || error \"mkdir $dirs\"\n    touch $files ${hlink_refs[*]} || error \"touch $files ${hlink_refs[*]}\"\n    i=0\n    nb_ln=0\n    while [ -n \"${hlink_refs[$i]}\" ]; do\n        for l in ${hlinks[$i]}; do\n            ln ${hlink_refs[$i]} $l || error \"hardlink ${hlink_refs[$i]} $l\"\n            ((nb_ln++))\n        done\n        ((i++))\n    done\n\n    # get id of deleted entries\n    rmids=\"\"\n    for f in $deleted; do\n        rmids=\"$rmids `get_id $f`\"\n    done\n    [ \"$DEBUG\" = \"1\" ] && echo \"ids to be deleted: $rmids\"\n\n    # readlog or scan\n    if [ \"$flavor\" = \"readlog\" ]; then\n        echo \"2. Reading changelogs...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n    elif [ \"$flavor\" = \"diff\" ]; then\n        echo \"2. Diff...\"\n    \t$DIFF -f $RBH_CFG_DIR/$config_file --apply=db -l DEBUG > rh_scan.log 2>&1 || error \"scanning\"\n    else\n        echo \"2. Scanning initial state...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning\"\n    fi\n\n\tif (( $is_lhsm != 0 )); then\n\t\techo \"  -archiving all data\"\n\t\tflush_data\n\t\t$LFS hsm_archive $files || error \"executing lfs hsm_archive\"\n\t\techo \"  -Waiting for end of data migration...\"\n\t\twait_done 60 || error \"Migration timeout\"\n\telif (( $is_hsmlite != 0 )); then\n\t\techo \"  -archiving all data\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log || error \"executing $CMD --sync\"\n        [ \"$DEBUG\" = \"1\" ] && find $BKROOT -type f -ls\n\tfi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-all -q > report.out || error \"$REPORT\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -nobulk -ls > find.out || error \"$FIND\"\n    [ \"$DEBUG\" = \"1\" ] && cat find.out\n\n    # checking all objects in reports\n    for o in $dirs $files; do\n        grep -E \" $o$\" report.out > /dev/null || error \"$o not found in report\"\n        grep -E \" $o$\" find.out > /dev/null || error \"$o not found in find\"\n    done\n\n    i=0\n    while [ -n \"${hlink_refs[$i]}\" ]; do\n        file=\"${hlink_refs[$i]}\"\n        ok=0\n        grep -E \" $file$\" find.out > /dev/null || error \"$file must be in rbh-find output\"\n        grep -E \" $file$\" report.out > /dev/null && ok=1\n        for l in ${hlinks[$i]}; do\n            grep -E \" $l$\" find.out > /dev/null || error \"$l must be in rbh-find output\"\n            grep -E \" $l$\" report.out  > /dev/null && ok=1\n        done\n        [ \"$ok\" = \"0\" ] && error \"$file or its hardlinks (${hlinks[$i]}) must be in report output\"\n        ((i++))\n    done\n\n    count_nb_init=$(wc -l report.out | awk '{print $1}')\n    count_path_init=$(grep -v \"$RH_ROOT$\" find.out | wc -l)\n    echo \"nbr_inodes=$count_nb_init, nb_paths=$count_path_init, nb_ln=$nb_ln\"\n    (( $count_path_init == $count_nb_init + $nb_ln )) || error \"nb path != nb_inode + nb_ln\"\n\n    # rename entries\n    echo \"3. Linking/unlinking/renaming objects...\"\n    # 1) simple file rename\n    mv $RH_ROOT/dir.1/file.1 $RH_ROOT/dir.1/file.1.rnm\n    # 2) cross directory file rename\n    mv $RH_ROOT/dir.1/file.2 $RH_ROOT/dir.2/file.2.rnm\n    # 3) rename that deletes the target\n    mv -f $RH_ROOT/dir.2/file.1 $RH_ROOT/dir.2/file.2\n    # 4) upper level directory rename\n    mv $RH_ROOT/dir.3/subdir $RH_ROOT/dir.3/subdir.rnm\n    # 5) overwriting a hardlink\n    mv -f $RH_ROOT/dir.2/file.4 ${hlinks[0]}\n    ((nb_ln--))\n    # 6) creating new link to \"dir.4/file.1\"\n    ln \"$RH_ROOT/dir.4/file.1\" \"$RH_ROOT/dir.4/link.1\"\n    ((nb_ln++))\n    # 7) removing 1 link (dir.2/link.2 remains)\n    rm \"$RH_ROOT/dir.4/file.2\"\n    ((nb_ln--))\n    # 8) removing 1 file\n    rm \"$RH_ROOT/dir.4/file.3\"\n    # 9) creating 1 file\n    touch \"$RH_ROOT/dir.1/new\"\n    # 10) creating 1 file with hardlink\n    touch \"$RH_ROOT/dir.2/new\"\n    ln \"$RH_ROOT/dir.2/new\" \"$RH_ROOT/dir.4/link.new\"\n    ((nb_ln++))\n\n    # namespace GC needs 1s difference\n    sleep 1\n\n    # readlog or re-scan\n    if [ \"$flavor\" = \"readlog\" ]; then\n        echo \"4. Reading changelogs...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n    elif [ \"$flavor\" = \"scan\" ]; then\n        echo \"4. Scanning again...\"\n    \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning\"\n    elif [ \"$flavor\" = \"diff\" ]; then\n        echo \"4. Diffing again...\"\n    \t$DIFF -f $RBH_CFG_DIR/$config_file --apply=db -l DEBUG > rh_scan.log 2>&1 || error \"scanning\"\n    elif [ \"$flavor\" = \"partial\" ]; then\n        i=0\n        for d in $dirs_tgt; do\n            # namespace GC needs 1s difference\n            sleep 1\n            ((i++))\n            echo \"4.$i Partial scan ($d)...\"\n        \t$RH -f $RBH_CFG_DIR/$config_file --scan=$d --once -l DEBUG -L rh_scan.log || error \"scanning $d\"\n        done\n    elif [ \"$flavor\" = \"partdiff\" ]; then\n        i=0\n        for d in $dirs_tgt; do\n            # namespace GC needs 1s difference\n            sleep 1\n            ((i++))\n            echo \"4.$i Partial diff+apply ($d)...\"\n        \t$DIFF -f $RBH_CFG_DIR/$config_file --scan=$d --apply=db -l DEBUG  > rh_scan.log 2>&1 || error \"scanning $d\"\n        done\n    fi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file --dump-all -q > report.out || error \"$REPORT\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    $FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT -nobulk -ls > find.out || error \"$FIND\"\n    [ \"$DEBUG\" = \"1\" ] && cat find.out\n\n\n\n    # checking all objects in reports\n    for o in $dirs_tgt $files_tgt; do\n        grep -E \" $o$\" report.out > /dev/null || error \"$o not found in report\"\n        grep -E \" $o$\" find.out > /dev/null || error \"$o not found in find\"\n    done\n\n    for f in $rmids; do\n        grep \"\\[$f\\]\" find.out && error \"deleted id ($f) found in find output\"\n    done\n\n    unset count_nb_final\n    if (( $is_lhsm + $is_hsmlite == 1 )); then\n        # check that removed entries are scheduled for HSM rm\n        $REPORT -f $RBH_CFG_DIR/$config_file --deferred-rm --csv -q > rh_report.log\n        for f in $rmids; do\n\n            # The following test is not critical for partial scanning\n            # In the worst case, the deleted entry remains in the archive.\n            if [ \"$flavor\" = \"partial\" ] || [ \"$flavor\" = \"partdiff\" ]; then\n                grep \"\\[$f\\]\" rh_report.log > /dev/null || echo \"WARNING: $f should be in HSM rm list\"\n                # Conservative behavior for partial scans in front of an archive: allow n/a in paths\n                count_nb_final=$(awk '{print $(NF)}' report.out | grep -v 'n/a' | wc -l)\n            else\n                # in the other cases, raise an error\n                grep \"\\[$f\\]\" rh_report.log > /dev/null || error \"$f should be in HSM rm list\"\n            fi\n\n            grep -v \"\\[$f\\]\" rh_report.log > rh_report.log.1\n            mv rh_report.log.1 rh_report.log\n        done\n        left=$(wc -l rh_report.log | awk '{print $1}')\n        if (($left > 0)); then\n            error \"Some existing entries are scheduled for HSM rm!!!\"\n            cat rh_report.log\n        fi\n    fi\n\n\n    i=0\n    while [ -n \"${hlink_refs_tgt[$i]}\" ]; do\n        file=\"${hlink_refs_tgt[$i]}\"\n        ok=0\n        grep -E \" $file$\" find.out > /dev/null || error \"$file must be in rbh-find output\"\n        grep -E \" $file$\" report.out > /dev/null && ok=1\n        for l in ${hlinks_tgt[$i]}; do\n            grep -E \" $l$\" find.out > /dev/null || error \"$l must be in rbh-find output\"\n            grep -E \" $l$\" report.out  > /dev/null && ok=1\n        done\n        [ \"$ok\" = \"0\" ] && error \"$file or its hardlinks (${hlinks_tgt[$i]}) must be in report output\"\n        ((i++))\n    done\n    [ -z \"$count_nb_final\" ] && count_nb_final=$(wc -l report.out | awk '{print $1}')\n    count_path_final=$(grep -v \"$RH_ROOT$\" find.out | wc -l)\n\n    echo \"nbr_inodes=$count_nb_final, nb_paths=$count_path_final, nb_ln=$nb_ln\"\n    (( $count_nb_final == $count_nb_init)) || error \"same entry count ($count_nb_init) expected (2 deleted, 2 created)\"\n    (( $count_path_final == $count_nb_final + $nb_ln )) || error \"nb path != nb_inode + nb_ln\"\n\n    rm -f report.out find.out\n}\n\nfunction test_hl_count\n{\n\tlocal config_file=$1\n    local dcount=3\n    local fcount=2\n\n    clean_logs\n    # populate file system with simple files\n\n    for d in $(seq 1 $dcount); do\n        mkdir $RH_ROOT/dir.$d || error \"cannot create $RH_ROOT/dir.$d\"\n    for f in $(seq 1 $fcount); do\n        touch $RH_ROOT/dir.$d/file.$f || error \"cannot create $RH_ROOT/dir.$d/file.$f\"\n    done\n    done\n\n    # scan\n   \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning $RH_ROOT\"\n\n    ino=$(( $dcount * $fcount + $dcount ))\n    ino_subdir=$(($fcount + 1))\n\n    # reports to be checked:\n    #   dump report (9 entries, no root)\n    (($($REPORT -f $RBH_CFG_DIR/$config_file -D -q | wc -l) == ($ino+$extra_dir) )) || error \"wrong count in 'rbh-report -D' output\"\n    #   dump report with path filter (3 entries)\n    (($($REPORT -f $RBH_CFG_DIR/$config_file -D -q -P $RH_ROOT/dir.1 | wc -l) == $ino_subdir )) || error \"wrong count in 'rbh-report -D -P <path>' output\"\n    #   dump find output (whole FS) (10 entries, incl. root)\n    (($($FIND -f $RBH_CFG_DIR/$config_file -nobulk | wc -l) == $ino + 1 + $extra_dir))  || error \"wrong count in 'rbh-find' output\"\n    #   dump find output (subdir: 3 entries)\n    (($($FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/dir.1 -nobulk | wc -l) == $ino_subdir )) || error \"wrong count in 'rbh-find <path>' output\"\n\n    #   dump summary (9 entries)\n    $REPORT -f $RBH_CFG_DIR/$config_file -icq > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    typeValues=\"dir;file\"\n  \tcountValues=\"$(($dcount+$extra_dir));$(($dcount * $fcount))\"\n    # type counts are in 2nd column\n   \tcolSearch=2\n\tfind_allValuesinCSVreport report.out $typeValues $countValues $colSearch || error \"wrong count in 'rbh-report -i' output\"\n\n    #   dump summary with path filter (3 entries)\n    $REPORT -f $RBH_CFG_DIR/$config_file -iq -P $RH_ROOT/dir.1 > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n  \tcountValues=\"1;$fcount\"\n\tfind_allValuesinCSVreport report.out $typeValues $countValues $colSearch || error \"wrong count in 'rbh-report -i -P <path>' output\"\n\n    # create 1 hardlink per file and recheck\n    for d in $(seq 1 $dcount); do\n    for f in $(seq 1 $fcount); do\n        ln $RH_ROOT/dir.$d/file.$f $RH_ROOT/dir.$d/link.$f || error \"cannot create hardlink $RH_ROOT/dir.$d/link.$f -> $RH_ROOT/dir.$d/file.$f\"\n    done\n    done\n\n    # rescan\n   \t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log || error \"scanning $RH_ROOT\"\n\n    paths=$(( $dcount * $fcount * 2 + $dcount + $extra_dir ))\n    paths_subdir=$(($fcount * 2 + 1))\n\n    #   dump report (still 9 entries, no root)\n    (($($REPORT -f $RBH_CFG_DIR/$config_file -D -q | wc -l) == $ino + $extra_dir )) || error \"wrong count in 'rbh-report -D' output\"\n    #   dump report with path filter (still 3 entries)\n    (($($REPORT -f $RBH_CFG_DIR/$config_file -D -q -P $RH_ROOT/dir.1 | wc -l) == $ino_subdir )) || error \"wrong count in 'rbh-report -D -P <path>' output\"\n    #   dump find output (whole FS) (\n    (($($FIND -f $RBH_CFG_DIR/$config_file -nobulk | wc -l) == $paths + 1 ))  || error \"wrong count in 'rbh-find' output\"\n    #   dump find output (subdir: 3 entries)\n    (($($FIND -f $RBH_CFG_DIR/$config_file $RH_ROOT/dir.1 -nobulk | wc -l) == $paths_subdir )) || error \"wrong count in 'rbh-find <path>' output\"\n\n    #   dump summary (9 entries)\n    $REPORT -f $RBH_CFG_DIR/$config_file -icq > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n  \tcountValues=\"$(($dcount + $extra_dir));$(($dcount * $fcount))\"\n\tfind_allValuesinCSVreport report.out $typeValues $countValues $colSearch || error \"wrong count in 'rbh-report -i' output\"\n\n    #   dump summary with path filter (3 entries)\n    $REPORT -f $RBH_CFG_DIR/$config_file -iq -P $RH_ROOT/dir.1 > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n  \tcountValues=\"1;$fcount\"\n\tfind_allValuesinCSVreport report.out $typeValues $countValues $colSearch || error \"wrong count in 'rbh-report -i -P <path>' output\"\n\n\n    rm -f report.out\n}\n\nfunction test_pools\n{\n\tconfig_file=$1\n\tsleep_time=$2\n\tpolicy_str=\"$3\"\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No pools support in POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n\tcreate_pools\n\n\tclean_logs\n\n\t# create files in different pools (or not)\n\ttouch $RH_ROOT/no_pool.1 || error \"creating file\"\n\ttouch $RH_ROOT/no_pool.2 || error \"creating file\"\n\t$LFS setstripe -p lustre.$POOL1 $RH_ROOT/in_pool_1.a || error \"creating file in $POOL1\"\n\t$LFS setstripe -p lustre.$POOL1 $RH_ROOT/in_pool_1.b || error \"creating file in $POOL1\"\n\t$LFS setstripe -p lustre.$POOL2 $RH_ROOT/in_pool_2.a || error \"creating file in $POOL2\"\n\t$LFS setstripe -p lustre.$POOL2 $RH_ROOT/in_pool_2.b || error \"creating file in $POOL2\"\n\n\tsleep $sleep_time\n\n\t# read changelogs\n\tif (( $no_log )); then\n\t\techo \"1.1-scan and match...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l VERB -L rh_chglogs.log  --once || error \"\"\n\telse\n\t\techo \"1.1-read changelog and match...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l VERB -L rh_chglogs.log  --once || error \"\"\n\tfi\n\n\n\techo \"1.2-checking report output...\"\n\t# check classes in report output\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-all -c > report.out || error \"\"\n\tcat report.out\n\n\techo \"1.3-checking robinhood log...\"\n\tgrep \"Missing attribute\" rh_chglogs.log && error \"missing attribute when matching classes\"\n\n\t# fileclass field index\n    pf=5\n\n\tfor i in 1 2; do\n        ( [ \"`grep \"$RH_ROOT/no_pool.$i\" report.out | cut -d ',' -f $pf | tr -d ' '`\" = \"\" ] || error \"bad fileclass for no_pool.$i\" )\n\tdone\n\n\tfor i in a b; do\n\t    ( [ \"`grep \"$RH_ROOT/in_pool_1.$i\" report.out | cut -d ',' -f $pf  | tr -d ' '`\" = \"pool_1\" ] || error \"bad fileclass for in_pool_1.$i\" )\n\n\t\t( [ \"`grep \"$RH_ROOT/in_pool_2.$i\" report.out  | cut -d ',' -f $pf | tr -d ' '`\" = \"pool_2\" ] || error \"bad fileclass for in_pool_2.$i\" )\n\tdone\n\n\t# rematch and recheck\n\techo \"2.1-scan and match...\"\n\t# read changelogs\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l VERB -L rh_chglogs.log  --once || error \"\"\n\n\techo \"2.2-checking report output...\"\n\t# check classes in report output\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-all -c  > report.out || error \"\"\n\tcat report.out\n\n\tfor i in 1 2; do\n        ( [ \"`grep \"$RH_ROOT/no_pool.$i\" report.out | cut -d ',' -f $pf | tr -d ' '`\" = \"\" ] || error \"bad fileclass for no_pool.$i\" )\n\tdone\n\n\tfor i in a b; do\n\t    ( [ \"`grep \"$RH_ROOT/in_pool_1.$i\" report.out | cut -d ',' -f $pf  | tr -d ' '`\" = \"pool_1\" ] || error \"bad fileclass for in_pool_1.$i\" )\n\n\t\t( [ \"`grep \"$RH_ROOT/in_pool_2.$i\" report.out  | cut -d ',' -f $pf | tr -d ' '`\" = \"pool_2\" ] || error \"bad fileclass for in_pool_2.$i\" )\n\tdone\n\n\n\techo \"2.3-checking robinhood log...\"\n\tgrep \"Missing attribute\" rh_chglogs.log && error \"missing attribute when matching classes\"\n\n}\n\nfunction test_logs\n{\n\tconfig_file=$1\n\tflavor=$2\n\tpolicy_str=\"$3\"\n\n\tsleep_time=430 # log rotation time (300) + scan interval (100) + scan duration (30)\n\n\tclean_logs\n\trm -f /tmp/test_log.1 /tmp/test_report.1 /tmp/test_alert.1 /tmp/extract_all /tmp/extract_log /tmp/extract_report /tmp/extract_alert\n\n\t# test flavors (x=supported):\n\t# x\tfile_nobatch\n\t# x \tfile_batch\n\t# x\tsyslog_nobatch\n\t# x\tsyslog_batch\n\t# x\tstdio_nobatch\n\t# x\tstdio_batch\n\t# \tmix\n\tfiles=0\n\tsyslog=0\n\tbatch=0\n\tstdio=0\n\techo $flavor | grep nobatch > /dev/null || batch=1\n\techo $flavor | grep syslog_ > /dev/null && syslog=1\n\techo $flavor | grep file_ > /dev/null && files=1\n\techo $flavor | grep stdio_ > /dev/null && stdio=1\n\techo \"Test parameters: files=$files, syslog=$syslog, stdio=$stdio, batch=$batch\"\n\n\t# create files\n\ttouch $RH_ROOT/file.1 || error \"creating file\"\n\ttouch $RH_ROOT/file.2 || error \"creating file\"\n\ttouch $RH_ROOT/file.3 || error \"creating file\"\n\ttouch $RH_ROOT/file.4 || error \"creating file\"\n\n\tif (( $is_lhsm != 0 )); then\n\t\tflush_data\n\t\t$LFS hsm_archive $RH_ROOT/file.*\n\t\twait_done 60 || error \"Copy timeout\"\n\tfi\n\n\tif (( $syslog )); then\n\t\tinit_msg_idx=`wc -l /var/log/messages | awk '{print $1}'`\n\tfi\n\n    if (( $is_hsmlite != 0 )); then\n        extra_action=\"$SYNC_OPT\"\n    else\n        extra_action=\"\"\n    fi\n\n\t# run a scan + alert check\n\tif (( $stdio )); then\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan $extra_action --run=alert -I -l DEBUG --once >/tmp/rbh.stdout 2>/tmp/rbh.stderr || error \"scan error $(cat /tmp/rbh.stderr)\"\n\telse\n        # detach and wait, else it will log to stderr by default\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=alert -I -l DEBUG --once -d -p pidfile|| error \"scan error\"\n        sleep 2\n        [ -f pidfile ] && wait $(cat pidfile)\n        ps -edf | grep $RH | grep -v grep\n\tfi\n\n\tif (( $files )); then\n\t\tlog=\"/tmp/test_log.1\"\n\t\talert=\"/tmp/test_alert.1\"\n\t\treport=\"/tmp/test_report.1\"\n\telif (( $stdio )); then\n                log=\"/tmp/rbh.stderr\"\n\n\t\tif (( $batch )); then\n\t\t\t# batch output to file has no ALERT header on each line\n\t\t\t# we must extract between \"ALERT REPORT\" and \"END OF ALERT REPORT\"\n        \t\tlocal old_ifs=\"$IFS\"\n        \t\tIFS=$'\\t\\n :'\n\t\t\talert_lines=(`grep -n ALERT /tmp/rbh.stdout | cut -d ':' -f 1 | xargs`)\n\t\t\tIFS=\"$old_ifs\"\n\t\t#\techo ${alert_lines[0]}\n\t\t#\techo ${alert_lines[1]}\n\t\t\t((nbl=${alert_lines[1]}-${alert_lines[0]}+1))\n\t\t\t# extract nbl lines stating from line alert_lines[0]:\n\t\t\ttail -n +${alert_lines[0]} /tmp/rbh.stdout | head -n $nbl > /tmp/extract_alert\n\t\telse\n\t\t\tgrep ALERT /tmp/rbh.stdout > /tmp/extract_alert\n\t\tfi\n\t\t# grep 'robinhood\\[' => don't select lines with no headers\n\t\tgrep -v ALERT /tmp/rbh.stdout | egrep -e \"($CMD|shook)[^ ]*\\[\" > /tmp/extract_report\n\t\talert=\"/tmp/extract_alert\"\n\t\treport=\"/tmp/extract_report\"\n\telif (( $syslog )); then\n        # wait for syslog to flush logs to disk\n        sync; sleep 2\n\n\t\ttail -n +\"$init_msg_idx\" /var/log/messages | egrep -e \"($CMD|shook)[^ ]*\\[\" > /tmp/extract_all\n\t\tegrep -v 'ALERT' /tmp/extract_all | grep  ': [A-Za-z0-9_ ]* \\|' > /tmp/extract_log\n\t\tegrep -v 'ALERT|: [A-Za-z0-9_ ]* \\|' /tmp/extract_all > /tmp/extract_report\n\t\tgrep 'ALERT' /tmp/extract_all > /tmp/extract_alert\n\n\t\tlog=\"/tmp/extract_log\"\n\t\talert=\"/tmp/extract_alert\"\n\t\treport=\"/tmp/extract_report\"\n\telse\n\t\terror \": unsupported test option\"\n\t\treturn 1\n\tfi\n\n\t# check if there is something written in the log\n\tif [[ -s $log ]]; then\n\t\techo \"OK: log file is not empty\"\n\telse\n\t\terror \": empty log file\"\n\tfi\n\n\tif (( $batch )); then\n\t\t#check summary\n\t\tsum=`grep \"alert summary\" $alert | wc -l`\n\t\t(($sum==1)) || (error \": no summary found\" ; cat $alert)\n\t\t# check alerts about file.1 and file.2\n\t\t# search for line ' * 1 alert_file1', ' * 1 alert_file2'\n\t\ta1=`egrep -e \"[0-9]* entry matches 'file1'\" $alert | sed -e 's/.* \\([0-9]*\\) entry.*/\\1/' | xargs`\n\t\ta2=`egrep -e \"[0-9]* entry matches 'file2'\" $alert | sed -e 's/.* \\([0-9]*\\) entry.*/\\1/' | xargs`\n\t\te1=`grep ${RH_ROOT}'/file\\.1' $alert | wc -l`\n\t\te2=`grep ${RH_ROOT}'/file\\.2' $alert | wc -l`\n\t\t# search for alert count: \"2 alerts:\"\n\t\tif (($syslog)); then\n\t\t\tall=`egrep -e \"\\| [0-9]* alerts:\" $alert | sed -e 's/.*| \\([0-9]*\\) alerts:/\\1/' | xargs`\n\t\telse\n\t\t\tall=`egrep -e \"^[0-9]* alerts:\" $alert | sed -e 's/^\\([0-9]*\\) alerts:/\\1/' | xargs`\n\t\tfi\n\t\tif (( $a1 == 1 && $a2 == 1 && $e1 == 1 && $e2 == 1 && $all == 2)); then\n\t\t\techo \"OK: 2 alerts\"\n\t\telse\n\t\t\terror \": invalid alert counts: $a1,$a2,$e1,$e2,$all\"\n\t\t\tcat $alert\n\t\tfi\n\telse\n\t\t# check alerts about file.1 and file.2\n\t\ta1=`grep file1 $alert | wc -l`\n\t\ta2=`grep file2 $alert | wc -l`\n\t\te1=`grep 'Entry: '${RH_ROOT}'/file\\.1' $alert | wc -l`\n\t\te2=`grep 'Entry: '${RH_ROOT}'/file\\.2' $alert | wc -l`\n\t\tall=`grep \"Robinhood alert\" $alert | wc -l`\n\t\tif (( $a1 == 1 && $a2 == 1 && $e1 == 1 && $e2 == 1 && $all == 2)); then\n\t\t\techo \"OK: 2 alerts\"\n\t\telse\n\t\t\terror \": invalid alert counts: $a1,$a2,$e1,$e2,$all\"\n\t\t\tcat $alert\n\t\tfi\n\tfi\n\n\t# no purge for now\n\tif (( `wc -l $report | awk '{print $1}'` == 0 )); then\n                echo \"OK: no action reported\"\n        else\n                error \": there are reported actions after a scan\"\n  \t            cat $report\n        fi\n\n\tif (( $is_hsmlite == 0 )); then\n\n\t\t# reinit msg idx\n\t\tif (( $syslog )); then\n\t\t\tinit_msg_idx=`wc -l /var/log/messages | awk '{print $1}'`\n\t\tfi\n\n\t\t# run a purge\n\t\trm -f $log $report $alert\n\n\t\tif (( $stdio )); then\n\t\t\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT -l DEBUG --once --dry-run >/tmp/rbh.stdout 2>/tmp/rbh.stderr || error \"run failed\"\n\t\telse\n\t\t\t$RH -f $RBH_CFG_DIR/$config_file $PURGE_OPT -l DEBUG --once --dry-run -d -p pidfile || error \"run failed\"\n            sleep 2\n            [ -f pidfile ] && wait $(cat pidfile)\n            ps -edf | grep $RH | grep -v grep\n        fi\n\n\t\t# extract new syslog messages\n\t\tif (( $syslog )); then\n            # wait for syslog to flush logs to disk\n            sync; sleep 2\n\t\t\ttail -n +\"$init_msg_idx\" /var/log/messages | grep $CMD > /tmp/extract_all\n\n\t\t\tegrep -v 'ALERT' /tmp/extract_all | grep  ': [A-Za-Z0-9_ ]* \\|' > /tmp/extract_log\n\t\t\tegrep -v 'ALERT|: [A-Za-Z0-9_ ]* \\|' /tmp/extract_all > /tmp/extract_report\n\t\t\tgrep 'ALERT' /tmp/extract_all > /tmp/extract_alert\n\n            if [ \"$DEBUG\" = \"1\" ]; then\n                echo \"----- syslog alerts:\" ; cat /tmp/extract_alert\n                echo \"----- syslog actions:\" ; cat /tmp/extract_report\n                echo \"----- syslog traces:\" ; cat /tmp/extract_log\n            fi\n\t\telif (( $stdio )); then\n\t\t\tgrep ALERT /tmp/rbh.stdout > /tmp/extract_alert\n\t\t\t# grep [22909/8] => don't select lines with no headers\n\t\t\tgrep -v ALERT /tmp/rbh.stdout | grep \"\\[[0-9]*/[0-9]*\\]\" > /tmp/extract_report\n            if [ \"$DEBUG\" = \"1\" ]; then\n                echo \"----- stdio alerts:\" ; cat /tmp/extract_alert\n                echo \"----- stdio actions:\" ; cat /tmp/extract_report\n                echo \"----- stdio (all):\" ; cat /tmp/rbh.stdout\n            fi\n\t\tfi\n\n\t\t# check that there is something written in the log\n\t    if [[ -s $log ]]; then\n\t\t\techo \"OK: log file is not empty\"\n\t\telse\n\t\t\terror \": empty log file\"\n\t\tfi\n\n        egrep \"summary|Warning\" $log\n\n\t\tgrep \"could not reach the specified\" $log > /dev/null\n\t\tif (($?)); then\n\t\t\terror \": a warning should have been issued for impossible purge\"\n\t\telse\n\t\t\techo \"OK: warning issued\"\n\t\tfi\n\n\t\t# all files must have been purged\n\t\tif (( `wc -l $report | awk '{print $1}'` == 4 )); then\n\t\t\techo \"OK: 4 actions reported\"\n\t\telse\n\t\t\terror \": unexpected count of actions\"\n\t\t\tcat $report\n\t\tfi\n\n\tfi\n\t(($files==1)) || return 0\n\n\tif [[ \"x$SLOW\" != \"x1\" ]]; then\n\t\techo \"Quick tests only: skipping log rotation test (use SLOW=1 to enable this test)\"\n\t\treturn 1\n\tfi\n\n\t# start a FS scanner with FS_Scan period = 100\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -d -p pidfile\n\n\t# rotate the logs\n\tfor l in /tmp/test_log.1 /tmp/test_report.1 /tmp/test_alert.1; do\n\t\tmv $l $l.old\n\tdone\n\n\tsleep $sleep_time\n\n\t# check that there is something written in the log\n    if [[ -s $log ]]; then\n\t\techo \"OK: log file is not empty\"\n\telse\n\t\terror \": empty log file\"\n\tfi\n\n\t# check alerts about file.1 and file.2\n\ta1=`grep alert_file1 /tmp/test_alert.1 | wc -l`\n\ta2=`grep alert_file2 /tmp/test_alert.1 | wc -l`\n\te1=`grep 'Entry: '${RH_ROOT}'/file\\.1' /tmp/test_alert.1 | wc -l`\n\te2=`grep 'Entry: '${RH_ROOT}'/file\\.2' /tmp/test_alert.1 | wc -l`\n\tall=`grep \"Robinhood alert\" /tmp/test_alert.1 | wc -l`\n\tif (( $a1 > 0 && $a2 > 0 && $e1 > 0 && $e2 > 0 && $all >= 2)); then\n\t\techo \"OK: $all alerts\"\n\telse\n\t\terror \": invalid alert counts: $a1,$a2,$e1,$e2,$all\"\n\t\tcat /tmp/test_alert.1\n\tfi\n\n\t# no purge during scan\n\tif (( `wc -l /tmp/test_report.1 | awk '{print $1}'` == 0 )); then\n                echo \"OK: no action reported\"\n        else\n                error \": there are reported actions after a scan\"\n\t\tcat /tmp/test_report.1\n        fi\n\n\t[ -f pidfile ] && kill -9 $(cat pidfile)\n\trm -f /tmp/test_log.1 /tmp/test_report.1 /tmp/test_alert.1 pidfile\n\trm -f /tmp/test_log.1.old /tmp/test_report.1.old /tmp/test_alert.1.old\n}\n\nfunction test_cfg_parsing\n{\n    flavor=$1\n\n    clean_logs\n\n    # needed for reading password file\n    if [[ ! -f /etc/robinhood.d/.dbpassword ]]; then\n        if [[ ! -d /etc/robinhood.d ]]; then\n            mkdir /etc/robinhood.d\n        fi\n        echo robinhood > /etc/robinhood.d/.dbpassword\n    fi\n\n    GEN_TEMPLATE=\"/tmp/template.$CMD\"\n    if [[ $flavor == \"basic\" ]]; then\n        cp -f \"$RBH_TEMPLATE_DIR\"/basic.conf \"$GEN_TEMPLATE\"\n        sed -i \"s/fs_type = .*;/fs_type = $FS_TYPE;/\" $GEN_TEMPLATE\n        sed -ie \"s#rbh_test#$RH_DB#\" $GEN_TEMPLATE\n    elif [[ $flavor == \"example\"* ]]; then\n        if (( $is_lhsm == 0 )) && [[ $flavor == *\"lhsm\"* ]]; then\n            echo \"Example uses Lustre/HSM\"\n            set_skipped\n            return 1\n        fi\n        cp -f \"$RBH_TEMPLATE_DIR\"/$flavor.conf \"$GEN_TEMPLATE\"\n        sed -i \"s/fs_type = .*;/fs_type = $FS_TYPE;/\" $GEN_TEMPLATE\n        sed -ie \"s#robinhood_lustre#$RH_DB#\" $GEN_TEMPLATE\n    elif [[ $flavor == \"generated\" ]]; then\n        $RH --template=$GEN_TEMPLATE || error \"generating config template\"\n        sed -i \"s/fs_type = .*;/fs_type = $FS_TYPE;/\" $GEN_TEMPLATE\n        sed -ie \"s#robinhood_db#$RH_DB#\" $GEN_TEMPLATE\n    else\n        error \"invalid test flavor\"\n        return 1\n    fi\n    # link to needed files for %includes\n    rm -f \"/tmp/includes\"\n    ln -s \"$(readlink -m $RBH_TEMPLATE_DIR)\"/includes /tmp/includes\n\n    # test parsing\n    $RH --test-syntax -f \"$GEN_TEMPLATE\" 2>rh_syntax.log >rh_syntax.log ||\n        error \" reading config file \\\"$GEN_TEMPLATE\\\"\"\n\n    cat rh_syntax.log\n    grep \"unknown parameter\" rh_syntax.log > /dev/null && error \"unexpected parameter\"\n    grep \"read successfully\" rh_syntax.log > /dev/null && echo \"OK: parsing succeeded\"\n\n    # test effective run\n    sed -i \"s#/var/log/robinhood/#/tmp/#\" $GEN_TEMPLATE\n    sed -ie \"s#fs_path = .*#fs_path = $RH_ROOT;#\" $GEN_TEMPLATE\n    $RH -f \"$GEN_TEMPLATE\" --scan --run=all --target=all --once -L rh_migr.log ||\n        error \"run of example policy\"\n\n    rm -f \"$GEN_TEMPLATE\"\n    rm -f \"/tmp/includes\"\n}\n\nfunction check_recov_status\n{\n    local log=\"$1\"\n    local p=\"$2\"\n    local exp=\"$3\"\n\n    grep \"Restoring '$p'\" $log | egrep -qe \"$exp\" || error \"Bad status for $p (expected: <$exp> in <$(grep \"Restoring '$p'\" $log)>)\"\n    return $?\n}\n\nfunction recovery_test\n{\n    config_file=$1\n    flavor=$2\n    arch_slink=$3\n    policy_str=\"$4\"\n\n    if (( $is_hsmlite == 0 )); then\n        echo \"Backup test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    # flavors:\n    # full: all entries fully recovered\n    # delta: all entries recovered but some with deltas\n    # rename: some entries have been renamed since they have been saved\n    # partial: some entries can't be recovered\n    # mixed: all of them\n    if [[ $flavor == \"full\" ]]; then\n        nb_full=20\n        nb_empty=2\n        nb_rename=0\n        nb_empty_rename=0\n        nb_delta=0\n        nb_nobkp=0\n    elif [[ $flavor == \"delta\" ]]; then\n        nb_full=10\n        nb_empty=2\n        nb_rename=0\n        nb_empty_rename=2\n        nb_delta=10\n        nb_nobkp=0\n    elif [[ $flavor == \"rename\" ]]; then\n        nb_full=10\n        nb_empty=2\n        nb_rename=10\n        nb_empty_rename=2\n        nb_delta=0\n        nb_nobkp=0\n    elif [[ $flavor == \"partial\" ]]; then\n        nb_full=10\n        nb_empty=2\n        nb_rename=0\n        nb_empty_rename=0\n        nb_delta=0\n        nb_nobkp=10\n    elif [[ $flavor == \"mixed\" ]]; then\n        nb_full=5\n        nb_empty=2\n        nb_rename=5\n        nb_empty_rename=2\n        nb_delta=5\n        nb_nobkp=5\n    else\n        error \"Invalid arg in recovery_test\"\n        return 1\n    fi\n    # read logs\n\n\n    # create files\n    ((total=$nb_full + $nb_rename + $nb_delta + $nb_nobkp + $nb_empty + $nb_empty_rename))\n    ((total_empty=$nb_empty + $nb_empty_rename))\n    echo \"1.1-creating files...\"\n\n    for i in `seq 1 $total`; do\n        mkdir \"$RH_ROOT/dir.$i\" || error \"$? creating directory $RH_ROOT/dir.$i\"\n        if (( $i % 3 == 0 )); then\n            chmod 755 \"$RH_ROOT/dir.$i\" || error \"$? setting mode of $RH_ROOT/dir.$i\"\n        elif (( $i % 3 == 1 )); then\n            chmod 750 \"$RH_ROOT/dir.$i\" || error \"$? setting mode of $RH_ROOT/dir.$i\"\n        elif (( $i % 3 == 2 )); then\n            chmod 700 \"$RH_ROOT/dir.$i\" || error \"$? setting mode of $RH_ROOT/dir.$i\"\n        fi\n\n        if (($i > $total - $total_empty)); then\n            # last total_empty are empty...\n            touch $RH_ROOT/dir.$i/file.$i || error \"$? creating $RH_ROOT/file.$i\"\n        else\n            dd if=/dev/zero of=$RH_ROOT/dir.$i/file.$i bs=1M count=1 >/dev/null 2>/dev/null || error \"$? writing $RH_ROOT/file.$i\"\n        fi\n    done\n\n    echo \"1.2-creating symlinks...\"\n    for i in `seq 1 $(( $total - $total_empty))`; do\n        ln -s \"symlink_$i\" $RH_ROOT/dir.$i/link.$i  >/dev/null 2>/dev/null || error \"$? creating symlink $RH_ROOT/dir.$i/link.$\"\n    done\n\n    # read changelogs\n    if (( $no_log )); then\n        echo \"1.3-scan...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"scanning\"\n    else\n        echo \"1.3-read changelog...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading log\"\n    fi\n\n    sleep 2\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --status-info $STATUS_MGR > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    # all files are new\n    new_cnt=$(grep file rh_report.log | grep new | cut -d ',' -f 3 | tr -d ' ')\n    na_link=$(grep symlink rh_report.log | grep \"n/a\" | cut -d ',' -f 3 | tr -d ' ')\n    new_link=$(grep symlink rh_report.log | grep new | cut -d ',' -f 3 | tr -d ' ')\n    [[ -z $new_cnt ]] && new_cnt=0\n    [[ -z $new_link ]] && new_link=0\n    [[ -z $na_link ]] && na_link=0\n\n    echo \"$new_cnt files are new\"\n    echo \"$new_link symlinks are new, $na_link are n/a\"\n\n    (( $new_cnt == $total )) || error \"20 new files expected\"\n\n    if (( $arch_slink == 0 )); then\n        (( $na_link == $total - $total_empty )) || error \"$total n/a symlinks expected\"\n    else\n        (( $new_link == $total - $total_empty )) || error \"$total new symlinks expected\"\n    fi\n\n    echo \"2.1-archiving objects...\"\n    # archive and modify files\n    for i in `seq 1 $total`; do\n        if (( $i <= $nb_full )); then\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n                || error \"archiving $RH_ROOT/dir.$i/file.$i\"\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/link.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n                || error \"archiving $RH_ROOT/dir.$i/link.$i\"\n            if (( $arch_slink == 0 )); then\n                grep \"$RH_ROOT/dir.$i/link.$i\" rh_migr.log | grep \"bad type for migration\" > /dev/null 2> /dev/null \\\n                    || error \"$RH_ROOT/dir.$i/link.$i should not have been migrated\"\n            fi\n        elif (( $i <= $(($nb_full+$nb_rename)) )); then\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n                || error \"archiving $RH_ROOT/dir.$i/file.$i\"\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/link.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n                || error \"archiving $RH_ROOT/dir.$i/link.$i\"\n            if (( $arch_slink == 0 )); then\n                grep \"$RH_ROOT/dir.$i/link.$i\" rh_migr.log | grep \"bad type for migration\" > /dev/null 2> /dev/null \\\n                    || error \"$RH_ROOT/dir.$i/link.$i should not have been migrated\"\n            fi\n            mv \"$RH_ROOT/dir.$i/file.$i\" \"$RH_ROOT/dir.$i/file_new.$i\" || error \"renaming file\"\n            mv \"$RH_ROOT/dir.$i/link.$i\" \"$RH_ROOT/dir.$i/link_new.$i\" || error \"renaming link\"\n            mv \"$RH_ROOT/dir.$i\" \"$RH_ROOT/dir.new_$i\" || error \"renaming dir\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta)) )); then\n            $RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n                || error \"archiving $RH_ROOT/dir.$i/file.$i\"\n            touch \"$RH_ROOT/dir.$i/file.$i\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp)) )); then\n            # no backup\n            :\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp+$nb_empty)) )); then\n            # no backup\n            :\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp+$nb_empty+$nb_empty_rename)) )); then\n            # no backup, just rename\n            mv \"$RH_ROOT/dir.$i/file.$i\" \"$RH_ROOT/dir.$i/file_new.$i\" || error \"renaming file\"\n            mv \"$RH_ROOT/dir.$i\" \"$RH_ROOT/dir.new_$i\" || error \"renaming dir\"\n        fi\n    done\n\n    # update DB contents\n    if (( $no_log )); then\n        echo \"2.2-scan...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"scanning\"\n    else\n        echo \"2.2-read changelog...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading log\"\n    fi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv --status-info $STATUS_MGR > rh_report.log || error \"report error\"\n    [ \"$DEBUG\" = \"1\" ] && cat rh_report.log\n    # all files are new\n    new_cnt=$(grep file rh_report.log | grep new | cut -d ',' -f 3 | tr -d ' ')\n    mod_cnt=$(grep file rh_report.log | grep modified | cut -d ',' -f 3 | tr -d ' ')\n    sync_cnt=$(grep file rh_report.log | grep synchro | cut -d ',' -f 3 | tr -d ' ')\n    [[ -z $new_cnt ]] && new_cnt=0\n    [[ -z $mod_cnt ]] && mod_cnt=0\n    [[ -z $sync_cnt ]] && sync_cnt=0\n\n    echo \"files: new: $new_cnt, modified: $mod_cnt, synchro: $sync_cnt\"\n    (( $sync_cnt == $nb_full+$nb_rename )) || error \"Nbr of synchro files doesn't match: $sync_cnt != $nb_full + $nb_rename\"\n    (( $mod_cnt == $nb_delta )) || error \"Nbr of modified files doesn't match: $mod_cnt != $nb_delta\"\n    (( $new_cnt == $nb_nobkp + $nb_empty + $nb_empty_rename )) || error \"Nbr of new files doesn't match: $new_cnt != $nb_nobkp + $nb_empty + $nb_empty_rename\"\n\n    new_cnt=$(grep symlink rh_report.log | grep new | cut -d ',' -f 3 | tr -d ' ')\n    na_cnt=$(grep symlink rh_report.log | grep \"n/a\" | cut -d ',' -f 3 | tr -d ' ')\n    sync_cnt=$(grep symlink rh_report.log | grep synchro | cut -d ',' -f 3 | tr -d ' ')\n    [[ -z $new_cnt ]] && new_cnt=0\n    [[ -z $na_cnt ]] && na_cnt=0\n    [[ -z $sync_cnt ]] && sync_cnt=0\n\n    echo \"symlink: new: $new_cnt, synchro: $sync_cnt, n/a: $na_cnt\"\n    if (( $arch_slink == 0 )); then\n        (( $na_cnt == $total - $total_empty )) || error \"Nbr of links with no status doesn't match: $na_cnt != $total - $total_empty\"\n    else\n        (( $sync_cnt == $nb_full+$nb_rename )) || error \"Nbr of synchro links doesn't match: $sync_cnt != $nb_full + $nb_rename\"\n        (( $new_cnt == $nb_nobkp+$nb_delta )) || error \"Nbr of new links doesn't match: $new_cnt != $(($nb_nobkp+$nb_delta))\"\n    fi\n\n    # shots before disaster (time is only significant for files)\n    find $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > /tmp/before.$$\n    find $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/before.$$\n    find $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/before.$$\n\n    # FS disaster\n    if [[ -n \"$RH_ROOT\" ]]; then\n        echo \"3-Disaster: all FS content is lost\"\n        rm  -rf $RH_ROOT/*\n    fi\n    sleep 1\n\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_scan.log  || error \"performing update scan\"\n    check_db_error rh_scan.log\n\n    # perform the recovery\n    echo \"4-Performing recovery...\"\n    $UNDELETE -f $RBH_CFG_DIR/$config_file -R -l FULL > recov.log 2> recov.err || error \"Error performing recovery\"\n\n    find $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > /tmp/after.$$\n    find $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/after.$$\n    find $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/after.$$\n\n    diff  /tmp/before.$$ /tmp/after.$$ > /tmp/diff.$$\n    [ \"$DEBUG\" = \"1\" ] && cat /tmp/diff.$$\n\n    # checking status and diff result\n    for i in `seq 1 $total`; do\n        if (( $i <= $nb_full )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/file.$i\" \"OK \\(file\\)\"\n            grep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ && error \"$RH_ROOT/dir.$i/file.$i NOT expected to differ\"\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/link.$i\" \"OK \\(symlink\\)\"\n        elif (( $i <= $(($nb_full+$nb_rename)) )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.new_$i/file_new.$i\" \"OK\\$\"\n            grep \"$RH_ROOT/dir.new$i/link_new.$i\" /tmp/diff.$$ && error \"$RH_ROOT/dir_new.$i/link_new.$i NOT expected to differ\"\n            check_recov_status recov.log \"$RH_ROOT/dir.new_$i/link_new.$i\" \"OK \\(symlink\\)\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta)) )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/file.$i\" \"OK \\(old version\\)\"\n            grep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ >/dev/null || error \"$RH_ROOT/dir.$i/file.$i is expected to differ\"\n            # links are never expected to differ as they are stored in the database\n            grep \"$RH_ROOT/dir.$i/link.$i\" /tmp/diff.$$ >/dev/null && error \"$RH_ROOT/dir.$i/link.$i NOT expected to differ\"\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/link.$i\" \"OK \\(symlink\\)\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp)) )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/file.$i\" \"No backup\"\n            grep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ >/dev/null || error \"$RH_ROOT/dir.$i/file.$i is expected to differ\"\n            # links are never expected to differ as they are stored in the database\n            grep \"$RH_ROOT/dir.$i/link.$i\" /tmp/diff.$$ >/dev/null && error \"$RH_ROOT/dir.$i/link.$i NOT expected to differ\"\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/link.$i\" \"OK \\(symlink\\)\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp+$nb_empty)) )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.$i/file.$i\" \"OK \\(empty file\\)\"\n            grep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ >/dev/null && error \"$RH_ROOT/dir.$i/file.$i is NOT expected to differ\"\n        elif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp+$nb_empty+$nb_empty_rename)) )); then\n            check_recov_status recov.log \"$RH_ROOT/dir.new_$i/file_new.$i\" \"OK \\(empty file\\)\"\n            grep \"$RH_ROOT/dir.new_$i/file_new.$i\" /tmp/diff.$$ >/dev/null && error \"$RH_ROOT/dir.$i/file.$i is NOT expected to differ\"\n        fi\n    done\n\n    rm -f /tmp/before.$$ /tmp/after.$$ /tmp/diff.$$ recov.err\n}\n\nfunction recov_filters\n{\n    config_file=$1\n    flavor=$2\n\n    if [[ $flavor == since ]] && [[ $nolog == 1 ]]; then\n        echo \"'since' can only be used with changelogs\"\n        set_skipped\n        return 1\n    fi\n    if (( $is_hsmlite == 0 )); then\n        echo \"Backup test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    # start filters: --ost and --since\n    # resume filters: --dir\n\n    echo \"populating filesystem\"\n    # create one of each recov status matching or not matching the filter\n    # (full, delta, empty, rename, empty_new, empty_rename, nobkp, slink, slink_new)\n    mkdir $RH_ROOT/dir.match $RH_ROOT/dir.nomatch || error \"mkdir failed\"\n\n    for f in full delta rename empty empty_rnm; do\n        if [[ $flavor != since ]]; then\n            $LFS setstripe -c 1 -i 0 $RH_ROOT/dir.match/$f || error \"setstripe failed\"\n        fi\n        $LFS setstripe -c 1 -i 1 $RH_ROOT/dir.nomatch/$f || error \"setstripe failed\"\n    done\n    # write data to full and delta\n    for f in full delta rename; do\n        if [[ $flavor != since ]]; then\n            dd if=/dev/zero of=$RH_ROOT/dir.match/$f bs=1M count=5  || error \"writing data to $f\"\n        fi\n        dd if=/dev/zero of=$RH_ROOT/dir.nomatch/$f bs=1M count=5  || error \"writing data to $f\"\n    done\n    ln -s \"this is an initial symlink\" $RH_ROOT/dir.nomatch/slink || error \"creating symlink\"\n    if [[ $flavor != ost ]] && [[ $flavor != since ]]; then\n        ln -s \"this is an initial symlink\" $RH_ROOT/dir.match/slink || error \"creating symlink slink_new\"\n    fi\n\n    echo \"scan and archive\"\n    # scan and archive\n    $RH -f $RBH_CFG_DIR/$config_file --scan $SYNC_OPT -l DEBUG -L rh_scan.log  --once 2>/dev/null || error \"scanning or migrating\"\n\n    if [[ $flavor == since ]]; then\n\t    $LFS changelog_clear lustre-MDT0000 cl1 0\n        sleep 1\n        # only consider entries modifed from now\n        since=$(date +'%Y%m%d%H%M%S')\n\n        for f in full delta rename empty empty_rnm; do\n            $LFS setstripe -c 1 -i 0 $RH_ROOT/dir.match/$f || error \"setstripe failed\"\n        done\n        # write data to full and delta\n        for f in full delta rename; do\n            dd if=/dev/zero of=$RH_ROOT/dir.match/$f bs=1M count=5  || error \"writing data to $f\"\n        done\n        ln -s \"this is an initial symlink\" $RH_ROOT/dir.match/slink || error \"creating symlink slink_new\"\n\n        # don't update non-modified objects, migrate other candidates\n        $RH -f $RBH_CFG_DIR/$config_file --readlog $SYNC_OPT -l DEBUG -L rh_scan.log  --once 2>/dev/null || error \"reading changelogs\"\n    fi\n\n    echo \"making deltas\"\n    for f in empty_new nobkp; do\n        $LFS setstripe -c 1 -i 0 $RH_ROOT/dir.match/$f\n        [[ $flavor != since ]] && $LFS setstripe -c 1 -i 1 $RH_ROOT/dir.nomatch/$f\n    done\n    for d in match nomatch ; do\n        # skip no match if flavor is 'since'\n        [[ $flavor == since ]] && [[ $d == nomatch ]] && continue\n        echo \"sqdlqsldsqmdl\" >> $RH_ROOT/dir.$d/delta || error \"appending dir.$d/delta\"\n        # force modification (in case Lustre don't report small data changes)\n        touch $RH_ROOT/dir.$d/delta || error \"touching dir.$d/delta\"\n        echo \"qsldjkqlsdkqs\" >> $RH_ROOT/dir.$d/nobkp || error \"writing to dir.$d/nobkp\"\n        mv $RH_ROOT/dir.$d/rename $RH_ROOT/dir.$d/rename.mv || error \"renaming 'rename'\"\n        mv $RH_ROOT/dir.$d/empty_rnm $RH_ROOT/dir.$d/empty_rnm.mv || error \"renaming 'empty_rnm'\"\n    done\n    if [[ $flavor != since ]]; then\n        ln -s \"this is a new symlink\" $RH_ROOT/dir.nomatch/slink_new || error \"creating symlink\"\n    fi\n    if [[ $flavor != ost ]]; then\n        ln -s \"this is a new symlink\" $RH_ROOT/dir.match/slink_new || error \"creating symlink\"\n    fi\n\n    if [[ $flavor == since ]]; then\n        [ \"$DEBUG\" = \"1\" ] && $LFS changelog lustre-MDT0000\n        # don't update non-modified objects\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_scan.log  --once 2>/dev/null || error \"reading changelogs\"\n    else\n        echo \"rescan (no archive)\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once 2>/dev/null || error \"scanning\"\n    fi\n\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -i > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\n    new_cnt=`grep \"new\" report.out | grep file | cut -d ',' -f 3`\n    mod_cnt=`grep \"modified\" report.out | grep file | cut -d ',' -f 3`\n    sync_cnt=`grep \"synchro\" report.out | grep file | cut -d ',' -f 3`\n    [[ -z $new_cnt ]] && new_cnt=0\n    [[ -z $mod_cnt ]] && mod_cnt=0\n    [[ -z $sync_cnt ]] && sync_cnt=0\n\n    #          full, delta, empty, rename, empty_new, empty_rename, nobkp\n    # synchro:    2             2       2                        2\n    # modified:          2\n    # new:                                         2                    2\n    echo \"files: new: $new_cnt, modified: $mod_cnt, synchro: $sync_cnt\"\n    if [[ $flavor != since ]]; then\n        (( $sync_cnt == 8 )) || error \"Nbr of synchro files doesn't match: $sync_cnt != 8\"\n        (( $mod_cnt  == 2 )) || error \"Nbr of modified files doesn't match: $mod_cnt != 2\"\n        (( $new_cnt  == 4 )) || error \"Nbr of new files doesn't match: $new_cnt != 4\"\n    else\n        (( $sync_cnt == 9 )) || error \"Nbr of synchro files doesn't match: $sync_cnt != 9\"\n        (( $mod_cnt  == 1 )) || error \"Nbr of modified files doesn't match: $mod_cnt != 1\"\n        (( $new_cnt  == 2 )) || error \"Nbr of new files doesn't match: $new_cnt != 2\"\n    fi\n    # FS disaster\n    if [[ -n \"$RH_ROOT\" ]]; then\n        echo \"3-Disaster: all FS content is lost\"\n        rm  -rf $RH_ROOT/*\n    fi\n\n    # perform the recovery\n    echo \"4-Performing recovery...\"\n    cp /dev/null recov.log\n\n    case \"$flavor\" in\n        ost)\n            start_option=\"--ost 0\"\n            resume_option=\"\"\n            matching=(full delta empty empty_rnm.mv empty_new rename.mv nobkp)\n            status=(\"OK\" \"OK \\(old version\\)\" \"OK\" \"OK\" \"OK \\(empty file\\)\" \"OK\" \"No backup\")\n            ;;\n        since)\n            start_option=\"--since=$since\"\n            resume_option=\"\"\n            matching=(full delta empty empty_rnm.mv empty_new rename.mv nobkp slink slink_new)\n            status=(\"OK\" \"OK \\(old version\\)\" \"OK\" \"OK\" \"OK \\(empty file\\)\" \"OK\" \"No backup\")\n            ;;\n        dir)\n            start_option=\"\"\n            resume_option=\"--dir=$RH_ROOT/dir.match\"\n            matching=(full delta empty empty_rnm.mv empty_new rename.mv nobkp slink slink_new)\n            status=(\"OK\" \"OK \\(old version\\)\" \"OK\" \"OK\" \"OK \\(empty file\\)\" \"OK\" \"No backup\" \"OK \\(non-file \\)\" \"OK \\(non-file \\)\")\n            ;;\n    esac\n\n    $RECOV -f $RBH_CFG_DIR/$config_file --start $start_option -l FULL >> recov.log 2>&1 || error \"Error starting recovery\"\n    $RECOV -f $RBH_CFG_DIR/$config_file --resume $resume_option -l DEBUG >> recov.log 2>&1 || error \"Error performing recovery\"\n    if [[ $flavor != dir ]]; then # for dirs, cannot complete as long as it is only for parallelizing the recovery\n        $RECOV -f $RBH_CFG_DIR/$config_file --complete -l DEBUG >> recov.log 2>&1 || error \"Error completing recovery\"\n    fi\n\n    # check that all matching entries are recovered with the appropriate status\n    [ \"$DEBUG\" = \"1\" ] && grep Restoring recov.log\n    for i in $(seq 1 ${#matching[@]}); do\n        f=${matching[$i]}\n        s=${status[$i]}\n        check_recov_status recov.log $RH_ROOT/dir.match/$f $s\n    done\n    (( $(grep Restoring recov.log | wc -l) == ${#matching[@]} )) || error \"Too many files restored\"\n\n    (( $NB_ERROR == 0 )) && echo OK\n}\n\nfunction test_tokudb\n{\n    # Check we are using MariaDB\n    rpm -qi MariaDB-common || {\n        echo \"MariaDB not installed: skipped\"\n        set_skipped\n        return 1\n    }\n\n    # TokuDB must be available too\n    mysql $RH_DB -e \"show engines\" | grep TokuDB || {\n        echo \"TokuDB not enabled: skipped\"\n        set_skipped\n        return 1\n    }\n\n    clean_logs\n\n    # Create the tables with various compression schemes.\n\n    echo \"Test without default compression, i.e. none\"\n    $CFG_SCRIPT empty_db $RH_DB > /dev/null\n    $RH -f $RBH_CFG_DIR/tokudb1.conf --scan -l DEBUG -L rh_scan.log --once || error \"\"\n    mysql $RH_DB -e \"show create table ENTRIES;\" |\n        grep \"ENGINE=TokuDB .*\\`COMPRESSION\\`=tokudb_uncompressed\" ||\n        error \"invalid engine/compression\"\n\n    echo \"Tests with valid compression names\"\n    for COMPRESS in tokudb_uncompressed tokudb_zlib tokudb_lzma ; do\n        $CFG_SCRIPT empty_db $RH_DB > /dev/null\n        RBH_TOKU_COMPRESS=$COMPRESS $RH -f $RBH_CFG_DIR/tokudb2.conf --scan -l DEBUG -L rh_scan.log --once || error \"\"\n        mysql $RH_DB -e \"show create table ENTRIES;\" |\n            grep \"ENGINE=TokuDB .*\\`COMPRESSION\\`=${COMPRESS}\" ||\n            error \"invalid engine/compression\"\n    done\n\n    echo \"Test with invalid compression name\"\n    $CFG_SCRIPT empty_db $RH_DB > /dev/null\n    RBH_TOKU_COMPRESS=some_non_existent_compression $RH -f $RBH_CFG_DIR/tokudb2.conf --scan -l DEBUG -L rh_scan.log --once &&\n        error \"should have failed\"\n    grep \"Error: Incorrect value 'some_non_existent_compression' for option 'compression'\" rh_scan.log ||\n        error \"expected error not found\"\n}\n\nfunction test_cfg_overflow\n{\n    clean_logs\n\n    # fs_key is harcoded as 128 bytes max. Try various lengths.\n\n    echo \"Test with a valid key\"\n    FS_KEY=\"fsname\" $RH -f $RBH_CFG_DIR/overflow.conf --test-syntax |\n        grep \"has been read successfully\" || error \"valid config failed\"\n\n    echo \"Test with an invalid key, at the size limit\"\n    FS_KEY=\"ghfkfkjghsdfklhgjklsdfhgkdjfhgkljfhgkljdfghlkfjghkjfhgjklhkljdfhsglkjfhlkgjhflkjghdflkjhgldfksjhglkdfjhglkjdfhglkjdfhglkjfdhglk\" $RH -f $RBH_CFG_DIR/overflow.conf --test-syntax |&\n        grep \"Invalid type for fs_key\" || error \"unexpected result for invalid key\"\n\n    echo \"Test with a key 1 character too long\"\n    FS_KEY=\"ghfkfkjghsdfklhgjklsdfhgkdjfhgkljfhgkljdfghlkfjghkjfhgjklhkljdfhsglkjfhlkgjhflkjghdflkjhgldfksjhglkdfjhglkjdfhglkjdfhglkjfdhglkq\" $RH -f $RBH_CFG_DIR/overflow.conf --test-syntax |&\n        grep \"Option too long for parameter 'General::fs_key'\" || error \"unexpected result for 127 chars key\"\n\n    echo \"Test with a key several characters too long\"\n    FS_KEY=\"ghfkfkjghsdfklhgjklsdfhgkdjfhgkllkhfglkhjgyugfhlgfghfhfhhfkdhliutylkrhgkjdfshgskjjfhgkljdfghlkfjghkjfhgjklhkljdfhsglkjfhlkgjhflkjghdflkjhgldfksjhglkdfjhglkjdfhglkjdfhglkjfdhglkq\" $RH -f $RBH_CFG_DIR/overflow.conf --test-syntax |&\n        grep \"Option too long for parameter 'General::fs_key'\" || error \"unexpected result for too long key\"\n\n\n}\n\n# Test various aspects of rbh-find -printf\nfunction test_rbh_find_printf\n{\n    # Populate the database with a single file\n    config_file=$1\n\n    clean_logs\n\n     # if robinhood tree is available, use rbh_cksum.sh from script directory\n    if [ -d \"../../src/robinhood\" ]; then\n        export PATH=\"../../scripts/:$PATH\"\n    fi\n    # else use the installed one\n\n\n    echo \"Initial scan of empty filesystem\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\n    # create a file\n    echo \"1-Creating file...\"\n    local srcfile=$RH_ROOT/test_printf/testf\n    rm -f $srcfile\n    mkdir -p $RH_ROOT/test_printf/\n    dd if=/dev/zero of=$srcfile bs=1k count=1 >/dev/null 2>/dev/null || error \"writing file\"\n\n    local fid=$(get_id \"$srcfile\")\n\n    if (( $no_log )); then\n        echo \"2-Scanning...\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once || error \"\"\n    else\n        echo \"2-Reading changelogs...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n    fi\n    check_db_error rh_chglogs.log\n\n    if (( $is_lhsm != 0 )); then\n        echo \"3-Archiving the files\"\n        $LFS hsm_archive --archive 1 $srcfile || error \"executing lfs hsm_archive\"\n\n        wait_hsm_state $srcfile 0x00000009\n\n\techo \"4-Reading changelogs...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once || error \"\"\n\tcheck_db_error rh_chglogs.log\n    fi\n\n    echo \"5-Run checksum policy\"\n    local before_run=$(date +%s)\n    $RH -f $RBH_CFG_DIR/$config_file --run=checksum --target=all -I -l DEBUG -L stdout | grep \"Policy run summary\"\n    local after_run=$(date +%s)\n\n    echo \"6-rbh-find checks\"\n\n    # Basic functionality\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"\")\n    [[ $STR == \"\" ]] || error \"unexpected rbh-find result (001): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"some string\")\n    [[ $STR == \"some string\" ]] || error \"unexpected rbh-find result (002): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"some string %p\")\n    [[ $STR == \"some string $srcfile\" ]] || error \"unexpected rbh-find result (003): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"some string %p after\")\n    [[ $STR == \"some string $srcfile after\" ]] || error \"unexpected rbh-find result (004): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"X%%Y\")\n    [[ $STR == \"X%Y\" ]] || error \"unexpected rbh-find result (005): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"some string %%%p after\")\n    [[ $STR == \"some string %$srcfile after\" ]] || error \"unexpected rbh-find result (006): $STR\"\n\n    # Test each directive\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"blocks=%b\")\n    #[[ $STR == \"blocks=8\" ]] || error \"unexpected rbh-find result (100): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%f bar\")\n    [[ $STR == \"testf bar\" ]] || error \"unexpected rbh-find result (101): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"group is %g\")\n    [[ $STR == \"group is $root_str\" ]] || error \"unexpected rbh-find result (102): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"hi   %M is mask\")\n    [[ $STR == \"hi   rw-r--r-- is mask\" ]] || error \"unexpected rbh-find result (103): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"octal mask twice : %m %m\")\n    [[ $STR == \"octal mask twice : 644 644\" ]] || error \"unexpected rbh-find result (104): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"nlinks: %n\")\n    [[ $STR == \"nlinks: 1\" ]] || error \"unexpected rbh-find result (105): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%p\\n\")\n    [[ $STR == \"$srcfile\" ]] || error \"unexpected rbh-find result (106): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"size %s\\n\")\n    [[ $STR == \"size 1024\" ]] || error \"unexpected rbh-find result (107): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"owner %u\\n\")\n    [[ $STR == \"owner $root_str\" ]] || error \"unexpected rbh-find result (108): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"type %Y\\n\")\n    [[ $STR == \"type file\" ]] || error \"unexpected rbh-find result (109): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"short type %y\\n\")\n    [[ $STR == \"short type f\" ]] || error \"unexpected rbh-find result (110): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"short type %y\\n\")\n    [[ $STR == \"short type f\" ]] || error \"unexpected rbh-find result (111): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%p\\n\")\n    [[ $STR == \"$srcfile\" ]] || error \"unexpected rbh-find result (112): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%f\\n\")\n    [[ $STR == \"testf\" ]] || error \"unexpected rbh-find result (113): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -nobulk -type f -f $RBH_CFG_DIR/$config_file -printf \"%p\\n\")\n    [[ $STR == \"$srcfile\" ]] || error \"unexpected rbh-find result (114): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -nobulk -type f -f $RBH_CFG_DIR/$config_file -printf \"%f\\n\")\n    [[ $STR == \"testf\" ]] || error \"unexpected rbh-find result (115): $STR\"\n\n    STR=$($FIND $RH_ROOT/test_printf -type f -f $RBH_CFG_DIR/$config_file -printf \"%p\\n\")\n    [[ $STR == \"$srcfile\" ]] || error \"unexpected rbh-find result (116): $STR\"\n\n    STR=$($FIND $RH_ROOT/test_printf -type f -f $RBH_CFG_DIR/$config_file -printf \"%f\\n\")\n    [[ $STR == \"testf\" ]] || error \"unexpected rbh-find result (117): $STR\"\n\n    # Test each Robinhood sub-directive\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \" %Rc rh class\\n\")\n    [[ $STR == \" [none] rh class\" ]] || error \"unexpected rbh-find result (200): $STR\"\n\n    if (( $lustre_major >= 2 )); then\n\t# exact match\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \" %Rf fid\\n\")\n        [[ $STR == \" $fid fid\" ]] || error \"unexpected rbh-find result (201): $STR ($fid expected)\"\n    else\n\t# get_id returns '/<inode>' so we must get <something>/<inode>\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rf fid\\n\")\n        [[ $STR == *\"$fid fid\" ]] || error \"unexpected rbh-find result (201): $STR ($fid expected)\"\n    fi\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \" %Ro osts\\n\")\n    #[[ $STR == \"ost#0:1044 osts\" ]] || error \"unexpected rbh-find result (202): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"parent fid=%Rp\\n\")\n    #[[ $STR == \"parent fid=0x200000007:0x1:0x0\" ]] || error \"unexpected rbh-find result (203): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%RCF\")\n    [[ $STR == \"$(date +%F)\" ]] || error \"unexpected rbh-find result (204): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%AF\")\n    [[ $STR == \"$(date +%F)\" ]] || error \"unexpected rbh-find result (205): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%TF\")\n    [[ $STR == \"$(date +%F)\" ]] || error \"unexpected rbh-find result (206): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%CF\")\n    [[ $STR == \"$(date +%F)\" ]] || error \"unexpected rbh-find result (206b): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%A-\" 2>&1)\n    [[ $STR == *\"%-\"* ]] || error \"unexpected rbh-find result (207): $STR\" # NB: invalid format\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%TG\")\n    [[ $STR == \"$(date +%G)\" ]] || error \"unexpected rbh-find result (208): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%CG\")\n    [[ $STR == \"$(date +%G)\" ]] || error \"unexpected rbh-find result (209): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"QWERTY %RCc %TA %CA %Ap %AT\" 2>&1)\n    [[ $STR == *\"QWERTY\"* ]] || error \"unexpected rbh-find result (210): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"QWERTY %RCOe %TOS %COS %AEx %AEY\" 2>&1)\n    [[ $STR == *\"QWERTY\"* ]] || error \"unexpected rbh-find result (211): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"QWERTY %RCOA\" 2>&1)\n    [[ $STR == *\"QWERTY %OA\"* ]] || error \"unexpected rbh-find result (212): $STR\" # NB: invalid format\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"QWERTY %RCEB\" 2>&1)\n    [[ $STR == *\"QWERTY %EB\"* ]] || error \"unexpected rbh-find result (213): $STR\" # NB: invalid format\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%RC{%A, %B %dth, %Y %F}\" 2>&1)\n    [[ $STR == *\"Error:\"* ]] && error \"unexpected rbh-find result (214): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%RC{%A, \" 2>&1)\n    [[ $STR == *\"Error: invalid string format\"* ]] || error \"unexpected rbh-find result (215): $STR\"\n\n    # Test various combinations\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"FILE %p %s %Y %y %Rc %u %n and stop\\n\")\n    [[ $STR == \"FILE $srcfile 1024 file f [none] $root_str 1 and stop\" ]] || error \"unexpected rbh-find result (300): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%s\\t%d\\t%y\")\n    [[ $STR == \"1024\t1\tf\" ]] || error \"unexpected rbh-find result (301): $STR\"\n\n    # bash will not let us store \\0 in a string, so test slightly differently and run again on error\n    FMT=\"\\0\\011\\x0\\42\\x42\\x25\\x0a\"\n    cmp <($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%p$FMT\") \\\n        <(printf \"%s$FMT\" \"$srcfile\")\n    if (( $? != 0 )); then\n        $FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%p$FMT\" | cat -vT\n        error \"unexpected rbh-find result (302): (see previous line)\"\n    fi\n\n    cmp <($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -print0) \\\n        <(printf \"%s\\0\" \"$srcfile\")\n    if (( $? != 0 )); then\n        $FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -print0 | cat -vT\n        echo\n        error \"unexpected rbh-find result (303): (see previous line)\"\n    fi\n\n\n    # Test module attributes\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{}\" 2>&1)\n    [[ $STR == *\"Error: cannot extract module attribute name\"* ]] || error \"unexpected rbh-find result (400): $STR\"\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{nonexistentmod}\" 2>&1)\n    [[ $STR == *\"Error: cannot extract module attribute name\"* ]] || error \"unexpected rbh-find result (401): $STR\"\n\n    if (( $is_lhsm != 0 )); then\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{lhsm.no_such_sym}\" 2>&1)\n        [[ $STR == *\"Error: cannot extract module attribute name\"* ]] || error \"unexpected rbh-find result (402): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{lhsm.archive_id}\")\n        [[ $STR == \"1\" ]] || error \"unexpected rbh-find result (403): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{lhsm.no_release}\")\n        [[ $STR == \"0\" ]] || error \"unexpected rbh-find result (404): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{lhsm.no_archive}\")\n        [[ $STR == \"0\" ]] || error \"unexpected rbh-find result (405): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{lhsm.status}\")\n        [[ $STR == \"synchro\" ]] || error \"unexpected rbh-find result (406): $STR\"\n    fi\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{checksum.last_check}\")\n    # last check must be between before_run and after_run\n    if [[ -z \"$STR\" ]] || (( $STR < $before_run )) || (( $STR > $after_run )); then\n        error \"Unexpected checksum timestamp (407): $STR\"\n    fi\n\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%Rm{checksum.status}\")\n    [[ $STR == \"ok\" ]] || error \"unexpected rbh-find result (408): $STR\"\n\n    # With some formatting options\n    STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"file='%15f' size=%09s\")\n    [[ $STR == \"file='          testf' size=000001024\" ]] || error \"unexpected rbh-find result (500): $STR\"\n\n    if (( $is_lhsm != 0 )); then\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%7Rm{lhsm.archive_id}\")\n        [[ $STR == \"      1\" ]] || error \"unexpected rbh-find result (501): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%07Rm{lhsm.archive_id}\")\n        [[ $STR == \"0000001\" ]] || error \"unexpected rbh-find result (502): $STR\"\n\n        STR=$($FIND $RH_ROOT/ -type f -f $RBH_CFG_DIR/$config_file -printf \"%-Rm{lhsm.archive_id}\")\n        [[ $STR == \"1\" ]] || error \"unexpected rbh-find result (503): $STR\"\n    fi\n\n    rm -f report.out\n}\n\nfunction import_test\n{\n\tconfig_file=$1\n\tflavor=$2\n\tpolicy_str=\"$3\"\n\n\tif (( $is_hsmlite == 0 )); then\n\t\techo \"Backup test only: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n    ensure_init_backend || error \"Error initializing backend $BKROOT\"\n\n    # initial scan\n    echo \"0- initial scan...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --once -l DEBUG -L rh_chglogs.log 2>/dev/null || error \"scanning\"\n\n\n    # create files in backend\n    echo \"1- populating backend (import dir)...\"\n\n    # empty dir1\n    mkdir -p $BKROOT/import/dir1\n    # dir2 with files and subdir\n    mkdir -p $BKROOT/import/dir2/sub1 #subdir with files\n    mkdir -p $BKROOT/import/dir2/sub2 #empty subdir\n    # files\n    dd if=/dev/zero of=$BKROOT/import/dir2/file1 bs=1k count=5 2>/dev/null || error \"creating file\"\n    dd if=/dev/zero of=$BKROOT/import/dir2/file2 bs=1k count=10 2>/dev/null || error \"creating file\"\n    dd if=/dev/zero of=$BKROOT/import/dir2/sub1/file3 bs=1k count=15 2>/dev/null || error \"creating file\"\n    dd if=/dev/zero of=$BKROOT/import/dir2/sub1/file4 bs=1k count=20 2>/dev/null || error \"creating file\"\n    ln -s \"dummy symlink content\" $BKROOT/import/dir2/sub1/link.1 || error \"creating symlink\"\n    ln -s \"file4\" $BKROOT/import/dir2/sub1/link.2 || error \"creating symlink\"\n\n    chown -hR testuser:testgroup $BKROOT/import || error \"setting user/group in $BKROOT/import\"\n    # different times\n    touch -t 201012011234 $BKROOT/import/dir2/file1\n    touch -t 201012021234 $BKROOT/import/dir2/file2\n    touch -t 201012031234 $BKROOT/import/dir2/sub1/file3\n    touch -t 201012041234 $BKROOT/import/dir2/sub1/file4\n    # different rights\n    chmod 755 $BKROOT/import/dir1\n    chmod 750 $BKROOT/import/dir2\n    chmod 700 $BKROOT/import/dir2/sub1\n    chmod 644 $BKROOT/import/dir2/file1\n    chmod 640 $BKROOT/import/dir2/file2\n    chmod 600 $BKROOT/import/dir2/sub1/file3\n    chmod 755 $BKROOT/import/dir2/sub1/file4\n\n    find $BKROOT -printf \"%i %M %u %g %s %T@ %p\\n\" | sort -k 11 > bk.1\n\n    # 5 directories (imported, dir1, dir2, sub1, sub2) , 4 files, 2 links\n    expect_cnt=11\n\n\t# perform the import\n    echo \"2- import to $RH_ROOT...\"\n    $IMPORT -l DEBUG -f $RBH_CFG_DIR/$config_file $BKROOT/import  $RH_ROOT/dest > recov.log 2>&1 || error \"importing data from backend\"\n\n    [ \"$DEBUG\" = \"1\" ] && cat recov.log\n\n    # \"Import summary: 9 entries imported, 0 errors\"\n    info=$(grep \"Import summary: \" recov.log | awk '{print $3\"-\"$6}')\n    [ \"$info\" = \"$expect_cnt-0\" ] || error \"unexpected count of imported entries or errors: expected $expect_cnt-0, got $info\"\n\n    rm -f recov.log\n\n    # check that every dir has been imported to Lustre\n    echo \"3.1-checking dirs...\"\n    while read i m u g s t p; do\n        newp=$(echo $p | sed -e \"s#$BKROOT/import#$RH_ROOT/dest#\")\n\t[[ -d $newp ]] || error \"Missing dir $newp\"\n        read pi pm pu pg ps pt < <(stat --format \"%i %A %U %G %s %Y\" $newp || error \"Missing dir $newp\")\n        [[ $pm == $m ]] || error \"$newp has bad rights $pm<>$m\"\n        [[ $pu == $u ]] || error \"$newp has bad user $pu<>$u\"\n        [[ $pg == $g ]] || error \"$newp has bad group $pg<>$g\"\n    done < <(egrep \"^[0-9]+ d\" bk.1 | grep import)\n\n    # check that every file has been imported to Lustre with the same size, owner, rights, time\n    # TODO and it has been moved in backed\n    echo \"3.2-checking files...\"\n    while read i m u g s t p; do\n        newp=$(echo $p | sed -e \"s#$BKROOT/import#$RH_ROOT/dest#\")\n\t[[ -f $newp ]] || error \"Missing file $newp\"\n        read pi pm pu pg ps pt < <(stat --format \"%i %A %U %G %s %Y\" $newp || error \"Missing file $newp\")\n        # /!\\ on some OS, mtime is retruned as \"<epoch>.0000000\"\n        t=$(echo \"$t\" | sed -e \"s/\\.0000000000//\")\n        [[ $ps == $s ]] || error \"$newp has bad size $ps<>$s\"\n        [[ $pm == $m ]] || error \"$newp has bad rights $pm<>$m\"\n        [[ $pu == $u ]] || error \"$newp has bad user $pu<>$u\"\n        [[ $pg == $g ]] || error \"$newp has bad group $pg<>$g\"\n        [[ $pt == $t ]] || error \"$newp has bad mtime $pt<>$t\"\n\n        newb=$(echo $p | sed -e \"s#$BKROOT/import#$BKROOT/dest#\")\n        ls -d ${newb}__* || error \"${newb}__* not found in backend\"\n\n    done < <(egrep \"^[0-9]+ -\" bk.1 | grep import)\n\n    # check that every link  has been imported to Lustre with the same content, owner, rights\n    # TODO and it has been moved in backed\n    echo \"3.3-checking symlinks...\"\n    while read i m u g s t p; do\n        newp=$(echo $p | sed -e \"s#$BKROOT/import#$RH_ROOT/dest#\")\n\t[[ -L $newp ]] || error \"Missing symlink $newp\"\n        read pi pm pu pg ps pt < <(stat --format \"%i %A %U %G %s %Y\" $newp || error \"Missing symlink $newp\")\n        t=$(echo \"$t\" | sed -e \"s/\\.0000000000//\")\n        [[ $ps == $s ]] || error \"$newp has bad size $ps<>$s\"\n        [[ $pm == $m ]] || error \"$newp has bad rights $pm<>$m\"\n        [[ $pu == $u ]] || error \"$newp has bad user $pu<>$u\"\n        [[ $pg == $g ]] || error \"$newp has bad group $pg<>$g\"\n\n        newb=$(echo $p | sed -e \"s#$BKROOT/import#$BKROOT/dest#\")\n        ls -d ${newb}__* || error \"${newb}__* not found in backend\"\n    done < <(egrep \"^[0-9]+ l\" bk.1 | grep import)\n\n\n    rm -f bk.1\n\n    return 0\n\n\n\t# read changelogs to check there is no side effect\n\tif (( $no_log )); then\n\t\techo \"1.2-scan...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"scanning\"\n\telse\n\t\techo \"1.2-read changelog...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading log\"\n\tfi\n\n\tsleep 2\n\n\t# all files are new\n\tnew_cnt=`$REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -i | grep new | cut -d ',' -f 3`\n\techo \"$new_cnt files are new\"\n\t(( $new_cnt == $total )) || error \"20 new files expected\"\n\n\techo \"2.1-archiving files...\"\n\t# archive and modify files\n\tfor i in `seq 1 $total`; do\n\t\tif (( $i <= $nb_full )); then\n\t\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n\t\t\t\t|| error \"archiving $RH_ROOT/dir.$i/file.$i\"\n\t\telif (( $i <= $(($nb_full+$nb_rename)) )); then\n\t\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n\t\t\t\t|| error \"archiving $RH_ROOT/dir.$i/file.$i\"\n\t\t\tmv \"$RH_ROOT/dir.$i/file.$i\" \"$RH_ROOT/dir.$i/file_new.$i\" || error \"renaming file\"\n\t\t\tmv \"$RH_ROOT/dir.$i\" \"$RH_ROOT/dir.new_$i\" || error \"renaming dir\"\n\t\telif (( $i <= $(($nb_full+$nb_rename+$nb_delta)) )); then\n\t\t\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:\"$RH_ROOT/dir.$i/file.$i\" --ignore-conditions -l DEBUG -L rh_migr.log 2>/dev/null \\\n\t\t\t\t|| error \"archiving $RH_ROOT/dir.$i/file.$i\"\n\t\t\ttouch \"$RH_ROOT/dir.$i/file.$i\"\n\t\telif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp)) )); then\n\t\t\t# no backup\n\t\t\t:\n\t\tfi\n\tdone\n\n\tif (( $no_log )); then\n\t\techo \"2.2-scan...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"scanning\"\n\telse\n\t\techo \"2.2-read changelog...\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file --readlog -l DEBUG -L rh_chglogs.log  --once 2>/dev/null || error \"reading log\"\n\tfi\n\n\t$REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR --csv -i > /tmp/report.$$\n\tnew_cnt=`grep \"new\" /tmp/report.$$ | cut -d ',' -f 3`\n\tmod_cnt=`grep \"modified\" /tmp/report.$$ | cut -d ',' -f 3`\n\tsync_cnt=`grep \"synchro\" /tmp/report.$$ | cut -d ',' -f 3`\n\t[[ -z $new_cnt ]] && new_cnt=0\n\t[[ -z $mod_cnt ]] && mod_cnt=0\n\t[[ -z $sync_cnt ]] && sync_cnt=0\n\n\techo \"new: $new_cnt, modified: $mod_cnt, synchro: $sync_cnt\"\n\t(( $sync_cnt == $nb_full+$nb_rename )) || error \"Nbr of synchro files doesn't match: $sync_cnt != $nb_full + $nb_rename\"\n\t(( $mod_cnt == $nb_delta )) || error \"Nbr of modified files doesn't match: $mod_cnt != $nb_delta\"\n\t(( $new_cnt == $nb_nobkp )) || error \"Nbr of new files doesn't match: $new_cnt != $nb_nobkp\"\n\n\t# shots before disaster (time is only significant for files)\n\tfind $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > /tmp/before.$$\n\tfind $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/before.$$\n\tfind $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/before.$$\n\n\t# FS disaster\n\tif [[ -n \"$RH_ROOT\" ]]; then\n\t\techo \"3-Disaster: all FS content is lost\"\n\t\trm  -rf $RH_ROOT/*\n\tfi\n\n\t# perform the recovery\n\techo \"4-Performing recovery...\"\n\tcp /dev/null recov.log\n    $UNDELETE -f $RBH_CFG_DIR/$config_file -R || error \"Error performing recovery\"\n\n\tfind $RH_ROOT -type f -printf \"%n %m %T@ %g %u %s %p %l\\n\" > /tmp/after.$$\n\tfind $RH_ROOT -type d -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/after.$$\n\tfind $RH_ROOT -type l -printf \"%n %m %g %u %s %p %l\\n\" >> /tmp/after.$$\n\n\tdiff  /tmp/before.$$ /tmp/after.$$ > /tmp/diff.$$\n\n\t# checking status and diff result\n\tfor i in `seq 1 $total`; do\n\t\tif (( $i <= $nb_full )); then\n\t\t\tgrep \"Restoring $RH_ROOT/dir.$i/file.$i\" recov.log | egrep -e \"OK\\$\" >/dev/null || error \"Bad status (OK expected)\"\n\t\t\tgrep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ && error \"$RH_ROOT/dir.$i/file.$i NOT expected to differ\"\n\t\telif (( $i <= $(($nb_full+$nb_rename)) )); then\n\t\t\tgrep \"Restoring $RH_ROOT/dir.new_$i/file_new.$i\" recov.log\t| egrep -e \"OK\\$\" >/dev/null || error \"Bad status (OK expected)\"\n\t\t\tgrep \"$RH_ROOT/dir.new_$i/file_new.$i\" /tmp/diff.$$ && error \"$RH_ROOT/dir.new_$i/file_new.$i NOT expected to differ\"\n\t\telif (( $i <= $(($nb_full+$nb_rename+$nb_delta)) )); then\n\t\t\tgrep \"Restoring $RH_ROOT/dir.$i/file.$i\" recov.log\t| grep \"OK (old version)\" >/dev/null || error \"Bad status (old version expected)\"\n\t\t\tgrep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ >/dev/null || error \"$RH_ROOT/dir.$i/file.$i is expected to differ\"\n\t\telif (( $i <= $(($nb_full+$nb_rename+$nb_delta+$nb_nobkp)) )); then\n\t\t\tgrep -A 1 \"Restoring $RH_ROOT/dir.$i/file.$i\" recov.log | grep \"No backup\" >/dev/null || error \"Bad status (no backup expected)\"\n\t\t\tgrep \"$RH_ROOT/dir.$i/file.$i\" /tmp/diff.$$ >/dev/null || error \"$RH_ROOT/dir.$i/file.$i is expected to differ\"\n\t\tfi\n\tdone\n\n\trm -f /tmp/before.$$ /tmp/after.$$ /tmp/diff.$$\n}\n\n\nfunction check_find\n{\n    dir=$1\n    args=$2\n    count=$3\n\n    [ \"$DEBUG\" = \"1\" ] && echo \"===================\"\n    [ \"$DEBUG\" = \"1\" ] && echo $FIND $args $dir\n    [ \"$DEBUG\" = \"1\" ] && $FIND -d VERB $args $dir -l | grep -v \"\\.shook\"\n\n    c=`$FIND $args $dir | grep -v \"\\.shook\" | wc -l`\n    (( $c == $count )) || error \"find: $count entries expected in $dir, got: $c\"\n\n    # same test with '-ls option'\n    c=`$FIND $args $dir -ls | grep -v \"\\.shook\" | wc -l`\n    (( $c == $count )) || error \"find -ls: $count entries expected in $dir, got: $c\"\n}\n\nfunction test_find\n{\n\tcfg=$RBH_CFG_DIR/$1\n\topt=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n\n    # by default stripe all files on 0 and 1\n    if [ -z \"$POSIX_MODE\" ]; then\n\t    $LFS setstripe -c 2 -i 0 $RH_ROOT || echo \"error setting stripe on root\"\n    fi\n    # 1) create a FS tree with several levels:\n    #   root\n    #       file.1\n    #       file.2\n    #       dir.1\n    #       dir.2\n    #           file.1\n    #           file.2\n    #           dir.1\n    #           dir.2\n    #               file.1\n    #               file.2\n    #               dir.1\n    touch $RH_ROOT/file.1 || error \"creating file\"\n    chown daemon:bin $RH_ROOT/file.1\n    touch $RH_ROOT/file.2 || error \"creating file\"\n    chown bin:wheel $RH_ROOT/file.2\n    mkdir $RH_ROOT/dir.1 || error \"creating dir\"\n    mkdir $RH_ROOT/dir.2 || error \"creating dir\"\n    dd if=/dev/zero of=$RH_ROOT/dir.2/file.1 bs=1k count=10 2>/dev/null || error \"creating file\"\n    if [ -z \"$POSIX_MODE\" ]; then\n\t    $LFS setstripe -c 1 -i 1 $RH_ROOT/dir.2/file.2 || error \"creating file with stripe\"\n    else\n        touch $RH_ROOT/dir.2/file.2 || error \"creating file\"\n    fi\n    mkdir $RH_ROOT/dir.2/dir.1 || error \"creating dir\"\n    mkdir $RH_ROOT/dir.2/dir.2 || error \"creating dir\"\n    dd if=/dev/zero of=$RH_ROOT/dir.2/dir.2/file.1 bs=1M count=1 2>/dev/null || error \"creating file\"\n    if [ -z \"$POSIX_MODE\" ]; then\n\t    $LFS setstripe -c 1 -i 0 $RH_ROOT/dir.2/dir.2/file.2 || error \"creating file with stripe\"\n    else\n        touch $RH_ROOT/dir.2/dir.2/file.2 || error \"creating file\"\n    fi\n    mkdir $RH_ROOT/dir.2/dir.2/dir.1 || error \"creating dir\"\n\n    # scan FS content\n    $RH -f $cfg --scan -l DEBUG -L rh_scan.log --once 2>/dev/null || error \"scanning\"\n\n    # 2) test find at several levels\n    echo \"checking find list at all levels...\"\n    check_find \"\" \"-f $cfg\" 12 # should return all (including root)\n    check_find \"\" \"-f $cfg -b\" 12 # should return all (including root)\n    check_find $RH_ROOT \"-f $cfg\" 12 # should return all (including root)\n    check_find $RH_ROOT/file.1 \"-f $cfg\" 1 # should return only the file\n    check_find $RH_ROOT/dir.1 \"-f $cfg\" 1  # should return dir.1\n    check_find $RH_ROOT/dir.2 \"-f $cfg\" 8  # should return dir.2 + its content\n    check_find $RH_ROOT/dir.2/file.2 \"-f $cfg\" 1  # should return dir.2/file.2\n    check_find $RH_ROOT/dir.2/dir.1 \"-f $cfg\" 1  # should return dir2/dir.1\n    check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg\" 4  # should return dir.2/dir.2 + its content\n    check_find $RH_ROOT/dir.2/dir.2/file.1 \"-f $cfg\" 1  # should return dir.2/dir.2/file.1\n    check_find $RH_ROOT/dir.2/dir.2/dir.1 \"-f $cfg\" 1 # should return dir.2/dir.2/dir.1\n\n    # 3) test -td / -tf\n    echo \"testing type filter (-type d)...\"\n    check_find \"\" \"-f $cfg -type d\" 6 # should return all (including root)\n    check_find \"\" \"-f $cfg -type d -b\" 6 # should return all (including root)\n    check_find $RH_ROOT \"-f $cfg -type d\" 6 # 6 including root\n    check_find $RH_ROOT/dir.2 \"-f $cfg -type d\" 4 # 4 including dir.2\n    check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -type d\" 2 # 2 including dir.2/dir.2\n    check_find $RH_ROOT/dir.1 \"-f $cfg -type d\" 1\n    check_find $RH_ROOT/dir.2/dir.1 \"-f $cfg -type d\" 1\n    check_find $RH_ROOT/dir.2/dir.2/dir.1 \"-f $cfg -type d\" 1\n\n    echo \"testing type filter (-type f)...\"\n    check_find \"\" \"-f $cfg -type f\" 6\n    check_find \"\" \"-f $cfg -type f -b\" 6\n    check_find $RH_ROOT \"-f $cfg -type f\" 6\n    check_find $RH_ROOT/dir.2 \"-f $cfg -type f\" 4\n    check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -type f\" 2\n    check_find $RH_ROOT/dir.1 \"-f $cfg -type f\" 0\n    check_find $RH_ROOT/dir.2/dir.1 \"-f $cfg -type f\" 0\n    check_find $RH_ROOT/dir.2/dir.2/dir.1 \"-f $cfg -type f\" 0\n    check_find $RH_ROOT/file.1 \"-f $cfg -type f\" 1\n    check_find $RH_ROOT/dir.2/file.1 \"-f $cfg -type f\" 1\n\n    echo \"testing name filter...\"\n    check_find \"\" \"-f $cfg -name dir.*\" 5 # 5\n    check_find \"\" \"-f $cfg -name dir.* -b\" 5 # 5\n    check_find $RH_ROOT \"-f $cfg -name dir.*\" 5 # 5\n    check_find $RH_ROOT \"-f $cfg -not -name dir.*\" 7 # all except 5\n    check_find $RH_ROOT/dir.2 \"-f $cfg -name dir.*\" 4 # 4 including dir.2\n    check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -name dir.*\" 2 # 2 including dir.2/dir.2\n    check_find $RH_ROOT/dir.1 \"-f $cfg -name dir.*\" 1\n    check_find $RH_ROOT/dir.2/dir.1 \"-f $cfg -name dir.*\" 1\n    check_find $RH_ROOT/dir.2/dir.2/dir.1 \"-f $cfg -name dir.*\" 1\n\n    echo \"testing name filter (case insensitive)...\"\n    check_find \"\" \"-f $cfg -iname Dir.*\" 5 # match all \"dir.*\"\n    check_find \"\" \"-f $cfg -b -iname Dir.*\" 5\n    check_find \"\" \"-f $cfg -iname dir.*\" 5 # match all \"dir.*\"\n    check_find \"\" \"-f $cfg -b -iname dir.*\" 5\n    check_find $RH_ROOT \"-f $cfg -name Dir.*\" 0 # no match expected\n    check_find $RH_ROOT \"-f $cfg -b -name Dir.*\" 0\n    check_find $RH_ROOT \"-f $cfg -iname Dir.*\" 5 # match all \"dir.*\"\n    check_find $RH_ROOT \"-f $cfg -b -iname Dir.*\" 5\n    check_find $RH_ROOT \"-f $cfg -iname dir.*\" 5 # match all \"dir.*\"\n    check_find $RH_ROOT \"-f $cfg -b -iname dir.*\" 5\n    check_find $RH_ROOT \"-f $cfg -not -iname Dir.*\" 7 # all (12) except 5\n    check_find $RH_ROOT \"-f $cfg -b -not -iname Dir.*\" 7\n    check_find $RH_ROOT \"-f $cfg -not -iname dir.*\" 7\n    check_find $RH_ROOT \"-f $cfg -b -not -iname dir.*\" 7\n\n    echo \"testing size filter...\"\n    check_find \"\" \"-f $cfg -type f -size +2k\" 2\n    check_find \"\" \"-f $cfg -type f -size +2k -b\" 2\n    check_find $RH_ROOT \"-f $cfg -type f -size +2k\" 2\n    check_find $RH_ROOT \"-f $cfg -type f -size +11k\" 1\n    check_find $RH_ROOT \"-f $cfg -type f -size +1M\" 0\n    check_find $RH_ROOT \"-f $cfg -type f -size 1M\" 1\n    check_find $RH_ROOT \"-f $cfg -type f -size 10k\" 1\n    check_find $RH_ROOT \"-f $cfg -type f -size -1M\" 5\n    check_find $RH_ROOT \"-f $cfg -type f -size -10k\" 4\n    check_find $RH_ROOT \"-f $cfg -type f -! -size -10k\" 2\n\n    echo \"testing user/group filter...\"\n    check_find $RH_ROOT \"-f $cfg -user daemon\" 1\n    check_find $RH_ROOT \"-f $cfg -user bin\" 1\n    check_find $RH_ROOT \"-f $cfg -user adm\" 0\n    check_find $RH_ROOT \"-f $cfg -not -user adm\" 12\n    check_find $RH_ROOT \"-f $cfg -not -user daemon\" 11\n    check_find $RH_ROOT \"-f $cfg -not -user bin\" 11\n\n    check_find $RH_ROOT \"-f $cfg -group bin\" 1\n    check_find $RH_ROOT \"-f $cfg -group wheel\" 1\n    check_find $RH_ROOT \"-f $cfg -group sys\" 0\n    check_find $RH_ROOT \"-f $cfg -not -group sys\" 12\n    check_find $RH_ROOT \"-f $cfg -not -group bin\" 11\n    check_find $RH_ROOT \"-f $cfg -not -group wheel\" 11\n\n    check_find $RH_ROOT \"-f $cfg -user daemon -group bin\" 1\n    check_find $RH_ROOT \"-f $cfg -user daemon -group wheel\" 0\n    check_find $RH_ROOT \"-f $cfg -user daemon -not -group wheel\" 1\n    check_find $RH_ROOT \"-f $cfg -not -user daemon -not -group wheel\" 10\n    check_find $RH_ROOT \"-f $cfg -not -user daemon -not -group wheel -type f\" 4\n\n    if [ -z \"$POSIX_MODE\" ]; then\n        echo \"testing ost filter...\"\n        check_find \"\" \"-f $cfg -ost 0\" 5 # all files but 1\n        check_find \"\" \"-f $cfg -ost 0 -b\" 5 # all files but 1\n        check_find $RH_ROOT \"-f $cfg -ost 0\" 5 # all files but 1\n        check_find $RH_ROOT \"-f $cfg -ost 1\" 5 # all files but 1\n        check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -ost 1\" 1  # all files in dir.2 but 1\n        echo \"testing ost set filters...\"\n        check_find \"\" \"-f $cfg -ost 0-5,12\" 6 # all files, only have 2 ost but tests parsing\n        check_find $RH_ROOT \"-f $cfg -ost 0-5,12\" 6 # all files\n        check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -ost 0-5,12\" 2  # all files in dir.2/dir.2\n        check_find \"\" \"-f $cfg -type f -ost 0-5,12\" 6 # all files, only have 2 ost but tests parsing\n        check_find $RH_ROOT \"-f $cfg -type f -ost 0-5,12\" 6 # all files\n        check_find $RH_ROOT/dir.2/dir.2 \"-f $cfg -type f -ost 0-5,12\" 2  # all files in dir.2/dir.2\n    fi\n\n    echo \"testing mtime filter...\"\n    check_find \"\" \"-f $cfg -mtime +1d\" 0  #none\n    check_find \"\" \"-f $cfg -mtime -1d\" 12 #all\n    check_find \"\" \"-f $cfg -mtime +1d -b\" 0  #none\n    check_find \"\" \"-f $cfg -mtime -1d -b\" 12 #all\n    # change last day\n    check_find $RH_ROOT \"-f $cfg -mtime +1d\" 0  #none\n    check_find $RH_ROOT \"-f $cfg -mtime -1d\" 12 #all\n    # the same with another syntax\n    check_find $RH_ROOT \"-f $cfg -mtime +1\" 0  #none\n    check_find $RH_ROOT \"-f $cfg -mtime -1\" 12 #all\n    # without 2 hour\n    check_find $RH_ROOT \"-f $cfg -mtime +2h\" 0  #none\n    check_find $RH_ROOT \"-f $cfg -mtime -2h\" 12 #all\n    # the same with another syntax\n    check_find $RH_ROOT \"-f $cfg -mtime +120m\" 0  #none\n    check_find $RH_ROOT \"-f $cfg -mtime -120m\" 12 #all\n    # the same with another syntax\n    check_find $RH_ROOT \"-f $cfg -mmin +120\" 0  #none\n    check_find $RH_ROOT \"-f $cfg -mmin -120\" 12 #all\n    check_find $RH_ROOT \"-f $cfg -not -mmin +120\" 12 #all\n\n    check_find $RH_ROOT \"-f $cfg -links 2\" 3 # directories with no child dir\n    check_find $RH_ROOT \"-f $cfg -links +2\" 3 # directories with child dir\n    check_find $RH_ROOT \"-f $cfg -links -2\" 6 # all files\n    check_find $RH_ROOT \"-f $cfg -! -links -2\" 6 # all directories (same as +1)\n\n    # restore default striping\n    if [ -z \"$POSIX_MODE\" ]; then\n        $LFS setstripe -c 2 -i -1 $RH_ROOT\n    fi\n}\n\nfunction test_du\n{\n\tcfg=$RBH_CFG_DIR/$1\n\topt=$2\n\tpolicy_str=\"$3\"\n\n\tclean_logs\n\n    # 1) create a FS tree with several levels and sizes:\n    #   root\n    #       file.1          1M\n    #       file.2          1k\n    #       dir.1\n    #           file.1      2k\n    #           file.2      10k\n    #           link.1\n    #       dir.2\n    #           file.1      1M\n    #           file.2      1\n    #           link.1\n    #           dir.1\n    #           dir.2\n    #               file.1 0\n    #               file.2 0\n    #               dir.1\n    dd if=/dev/zero of=$RH_ROOT/file.1 bs=1M count=1 2>/dev/null || error \"creating file\"\n    dd if=/dev/zero of=$RH_ROOT/file.2 bs=1k count=1 2>/dev/null || error \"creating file\"\n\n    mkdir $RH_ROOT/dir.1 || error \"creating dir\"\n    dd if=/dev/zero of=$RH_ROOT/dir.1/file.1 bs=1k count=2 2>/dev/null || error \"creating file\"\n    dd if=/dev/zero of=$RH_ROOT/dir.1/file.2 bs=10k count=1 2>/dev/null || error \"creating file\"\n    ln -s \"content1\" $RH_ROOT/dir.1/link.1 || error \"creating symlink\"\n\n    mkdir $RH_ROOT/dir.2 || error \"creating dir\"\n    dd if=/dev/zero of=$RH_ROOT/dir.2/file.1 bs=1M count=1 2>/dev/null || error \"creating file\"\n\tdd if=/dev/zero of=$RH_ROOT/dir.2/file.2 bs=1 count=1 2>/dev/null || error \"creating file\"\n    ln -s \"content2\" $RH_ROOT/dir.2/link.1 || error \"creating symlink\"\n    mkdir $RH_ROOT/dir.2/dir.1 || error \"creating dir\"\n    mkdir $RH_ROOT/dir.2/dir.2 || error \"creating dir\"\n    touch $RH_ROOT/dir.2/dir.2/file.1 || error \"creating file\"\n\ttouch $RH_ROOT/dir.2/dir.2/file.2 || error \"creating file\"\n    mkdir $RH_ROOT/dir.2/dir.2/dir.1 || error \"creating dir\"\n\n    # write blocks to disk\n    sync\n\n    # scan FS content\n    $RH -f $cfg --scan -l DEBUG -L rh_scan.log --once 2>/dev/null || error \"scanning\"\n\n    # test byte display on root\n    size=$($DU -f $cfg -t f -b $RH_ROOT | awk '{print $1}')\n    [ $size = \"2110465\" ] || error \"bad returned size $size: 2110465 expected\"\n\n    # test on subdirs\n    size=$($DU -f $cfg -t f -b $RH_ROOT/dir.1 | awk '{print $1}')\n    [ $size = \"12288\" ] || error \"bad returned size $size: 12288 expected\"\n\n    # block count is hard to predict (due to ext4 prealloc)\n    # only test 1st digit\n    kb=$($DU -f $cfg -t f -k $RH_ROOT | awk '{print $1}')\n    [[ $kb = 2??? ]] || error \"nb 1K block should be about 2k+smthg (got $kb)\"\n\n    # 2 (for 2MB) + 1 for small files\n    mb=$($DU -f $cfg -t f -m $RH_ROOT | awk '{print $1}')\n    [[ $mb = 3 ]] || error \"nb 1M block should be 3 (got $mb)\"\n\n    # count are real\n    nb_file=$($DU -f $cfg -t f -c $RH_ROOT | awk '{print $1}')\n    nb_link=$($DU -f $cfg -t l -c $RH_ROOT | awk '{print $1}')\n    nb_dir=$($DU -f $cfg -t d -c $RH_ROOT | awk '{print $1}')\n    [[ $nb_file = 8 ]] || error \"found $nb_file files/8\"\n    [[ $nb_dir = $((6+extra_dir)) ]] || error \"found $nb_dir dirs/$((6+extra_dir))\"\n    [[ $nb_link = 2 ]] || error \"found $nb_link links/2\"\n\n}\n\nfunction check_disabled\n{\n       config_file=$1\n       flavor=$2\n       policy_str=\"$3\"\n\n       clean_logs\n\n       case \"$flavor\" in\n               purge)\n                       cmd='--run=purge'\n                       match='Policy purge is disabled'\n                       ;;\n               migration)\n                       if (( $is_hsmlite + $is_lhsm == 0 )); then\n                               echo \"hsmlite or HSM test only: skipped\"\n                               set_skipped\n                               return 1\n                       fi\n                       cmd='--run=migration'\n                       match='Policy migration is disabled'\n                       ;;\n               hsm_remove)\n                       if (( $is_hsmlite + $is_lhsm == 0 )); then\n                               echo \"hsmlite or HSM test only: skipped\"\n                               set_skipped\n                               return 1\n                       fi\n                       cmd='--run=hsm_remove'\n                       match='Policy hsm_remove is disabled'\n                       ;;\n               rmdir)\n                       cmd='--run=rmdir_empty'\n                       match='Policy rmdir_empty is disabled'\n                       ;;\n               class)\n                       cmd='--scan'\n                       match='disabling fileclass matching'\n                       ;;\n               *)\n                       error \"unexpected flavor $flavor\"\n                       return 1 ;;\n       esac\n\n       echo \"1.1. Performing action $cmd (daemon mode)...\"\n       # run with --scan, to keep the daemon alive (else, it would have nothing to do)\n       $RH -f $RBH_CFG_DIR/$config_file --scan $cmd -l DEBUG -L rh_scan.log -p rh.pid &\n\n       sleep 2\n       echo \"1.2. Checking that kill -HUP does not terminate the process...\"\n       kill -HUP $(cat rh.pid)\n       sleep 2\n       [[ -f /proc/$(cat rh.pid)/status ]] || error \"process terminated on kill -HUP\"\n\n       kill $(cat rh.pid)\n       sleep 2\n       rm -f rh.pid\n\n       grep \"$match\" rh_scan.log || error \"log should contain \\\"$match\\\"\"\n\n       cp /dev/null rh_scan.log\n       echo \"2. Performing action $cmd (one shot)...\"\n        $RH -f $RBH_CFG_DIR/$config_file $cmd --once -l DEBUG -L rh_scan.log\n\n       grep \"$match\" rh_scan.log || error \"log should contain \\\"$match\\\"\"\n\n}\n\nfunction test_reload\n{\n    config_file=$1\n\n    clean_logs\n\n    # create test cases\n    $RBH_TESTS_DIR/fill_fs.sh $RH_ROOT 100 >/dev/null\n\n    # create a tmp copy of the config to modify it\n    cfg=$RBH_CFG_DIR/${config_file}.COPY.conf\n    cp -f $RBH_CFG_DIR/$config_file $cfg\n\n    # run regular scan + alerts\n    export ALERT_CLASS=size10k\n    $RH -f $cfg --scan --run=alert -L rh_scan.log -l DEBUG -p rh.pid &\n\n    # check the effect of reload\n    sleep 2 # wait for full rbh initialization\n    # change config file\n    echo \"EntryProcessor { nb_threads = 2; }\" >> $cfg\n    kill -HUP $(cat rh.pid)\n    sleep 2 # signal processing loop awakes every second\n\n    # check the signal is properly received\n    grep \"SIGHUP received\" rh_scan.log || error \"Signal not received\"\n    # check the reload operation is properly triggered\n    grep \"Reloading configuration\" rh_scan.log ||\n        error \"config reload not triggered\"\n\n    grep \"Failure reloading\" rh_scan.log && error \"failed to parse cfg\"\n\n    # check config parsing is triggered for submodules\n    grep \"Loading policies config\"  rh_scan.log ||\n        error \"Config parsing not triggered for submodules\"\n\n    # check the config change is taken into account (to be completed)\n    grep \"EntryProcessor::nb_threads changed\" rh_scan.log ||\n        error \"Parameter change not taken into account\"\n\n    kill $(cat rh.pid)\n    rm -f $cfg\n    return 0\n}\n\nfunction escape_chars\n{\n    config_file=$1\n    local special_charset='^$+(){}\\\\\\\\|.'\n    local target_command=\"$2\"\n    local command\n\n    # Create files with special characters\n    declare -a files\n    while read -n1 char; do\n        # Some of the special characters make up for a valid pattern\n        # when they are alone and unescaped but not when they are doubled\n        files+=(\"$RH_ROOT/file-${char}${char}\")\n    done < <(printf \"$special_charset\")\n\n    touch \"${files[@]}\"\n\n    case \"$target_command\" in\n    undelete)\n        if (( $is_lhsm == 0 )); then\n            echo \"Lustre/HSM test only: skipped\"\n            set_skipped\n            return 1\n        fi\n\n        command=\"$UNDELETE -f $RBH_CFG_DIR/$config_file -R {}\"\n        # Archive -> scan -> remove -> scan\n        for file in \"${files[@]}\"; do\n            $LFS hsm_archive \"$file\"\n        done\n        for file in \"${files[@]}\"; do\n            wait_hsm_state \"$file\" 0x00000009\n        done\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n        rm -f \"${files[@]}\"\n        sleep 1 # Garbage collection is based on scan epoch time\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n        ;;\n    report)\n        command=\"$REPORT -f $RBH_CFG_DIR/$config_file -P {} --fs-info\"\n        $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n        ;;\n    *)\n        # Unknown command\n        error \"unknown command '$command'\"\n        ;;\n    esac\n\n    for file in \"${files[@]}\"; do\n        printf '%q' \"$file\" | xargs -I{} $command > out.log 2> err.log ||\n            error \"'$command' failed to run on '$file'\"\n\n        case \"$target_command\" in\n        undelete)\n            [ -e \"$file\" ] || error \"'$file' was not undeleted\"\n            ;;\n        report)\n            check_db_error out.log\n            ;;\n        *)\n            # Nothing to check\n            ;;\n        esac\n    done\n    rm -f out.log err.log\n}\n\nfunction test_lhsm_archive\n{\n    # test_lhsm1.conf \"check sql query string in case of multiple AND/OR\"\n\n    if (( $is_lhsm == 0 )); then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    config_file=$1\n    rm -f rh_archive.log\n\n    # run one pass lhsm_archive - need full scan first\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n    $RH -f $RBH_CFG_DIR/$config_file --run=lhsm_archive -L rh_archive.log -l FULL -O\n\n    # check\n    grep \"AS id FROM ENTRIES\" rh_archive.log |\n      grep \"AND (((ENTRIES.lhsm_lstarc=0\" |\n      grep -q \"ENTRIES.last_mod IS NULL))) AND (ENTRIES.last_access\" ||\n      error \"lhsm_archive query begin blocks incorrect\"\n\n    grep \"Error 7 executing query\" rh_archive.log > /dev/null &&\n      error \"lhsm_archive DB query failure\"\n\n    return 0\n}\n\nfunction test_hsm_invalidate\n{\n    #  test_hsm_invalidate.conf \"HSM invalidate deleted files\"\n    if [[ $is_lhsm == 0 ]] ; then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    config_file=$1\n    rm -f rh_archive.log\n\n    # Create test files\n    for i in `seq 1 5`; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=2 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n    # run one pass lhsm_archive - need full scan first\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n\n    # delete 2 files, rename 1\n    for i in 4 5 ; do\n        rm -f $RH_ROOT/file.$i\n    done\n    mv $RH_ROOT/file.3 $RH_ROOT/file.a\n    sleep 2\n    # archive files\n    $RH -f $RBH_CFG_DIR/$config_file --run=lhsm_archive -O 2>&1 > /dev/null\n\n    # get number of entries in invalid state in DB\n    LC=$(mysql $RH_DB -B -e \"select count(*) from ENTRIES where invalid = 1\" | tail -1)\n    if [[ $LC != 2 ]] ; then\n        error \"Number of files in invalid entries incorrect - expected 2, found $LC\"\n    fi\n\n    return 0\n}\n\nfunction test_hsm_remove_order\n{\n    if [[ $is_lhsm == 0 ]] ; then\n        echo \"Lustre/HSM test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    config_file=$1\n    my_log_file=rh_hsm_remove.log\n    rm -f $my_log_file\n\n    # test_hsm_remove_noorder.conf\n    # test_hsm_remove_order.conf\n\n    # need full scan first, even if FS is empty\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n\n    # run hsm_remove\n    $RH -f $RBH_CFG_DIR/$config_file --run=lhsm_remove -O -l FULL -L $my_log_file 2>&1 > /dev/null\n\n    grep \"SELECT\" $my_log_file | grep \"SOFT_RM\" | grep \"ORDER BY\" > /dev/null\n    ORDERRC=$?\n    rm -f $my_log_file\n\n    echo \"$config_file\" | grep \"noorder\" > /dev/null\n    CFGRC=$?\n\n    if [[ ${CFGRC} -eq 0 ]] && [[ ${ORDERRC} -ne 0 ]] ; then\n        # OK\n        return 0\n    elif [[ ${CFGRC} -ne 0 ]] && [[ ${ORDERRC} -eq 0 ]] ; then\n        # OK\n        return 0\n    elif [[ ${CFGRC} -eq 0 ]] && [[ ${ORDERRC} -eq 0 ]] ; then\n      # error\n        error \"Unexpected ORDER BY found in SOFT_RM select\"\n    else\n      # error\n        error \"ORDER BY not found in SOFT_RM select\"\n    fi\n\n    return 0\n}\n\nfunction test_multirule_select\n{\n    # test doesnt work for POSIX as there are harcoded /mnt/lustre path in\n    # config\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"Lustre test only: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    # test_multirule.conf \"check sql query string in case of multiple rules\"\n\n    config_file=$1\n    policy=$2\n    logfile=rh_multirule.log\n    rm -f $logfile\n\n    if [ $policy = migration ] && (( $is_hsmlite + $is_lhsm == 0 )); then\n       echo \"hsmlite or HSM test only: skipped\"\n       set_skipped\n       return 1\n    fi\n\n    touch -d \"now-1day\" $RH_ROOT/file.foo\n    touch -d \"now-1day\" $RH_ROOT/file.bar\n    touch -d \"now-1day\" $RH_ROOT/root_owned\n    touch -d \"now-1day\" $RH_ROOT/noroot\n    chown testuser $RH_ROOT/noroot\n    mkdir -p $RH_ROOT/scratch/tmp\n    touch -d \"now-1day\" $RH_ROOT/scratch/file\n    touch -d \"now-1day\" $RH_ROOT/scratch/noroot\n    chown testuser $RH_ROOT/scratch/noroot\n    touch -d \"now-1day\" $RH_ROOT/scratch/tmp/file\n    touch -d \"now-1day\" $RH_ROOT/file.1\n    touch -d \"now-1day\" $RH_ROOT/file.2\n    touch -d \"now-1day\" $RH_ROOT/file.3\n\n    # scan test entries and run cleanup\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n    # set fake creation date 8day ago\n    create_date=$(date -d \"now-8day\" +%s)\n    mysql $RH_DB -e \"UPDATE ENTRIES SET creation_time=$create_date WHERE type='file'\"\n\n    $RH -f $RBH_CFG_DIR/$config_file --run=$policy -L $logfile -l FULL -O\n\n    # ignored: foo, bar\n\n    check_rule_and_class $logfile $RH_ROOT/root_owned \"scratch_tmp_$policy\" \"root_files\"\n    check_rule_and_class $logfile $RH_ROOT/noroot \"default\" \"\"\n    check_rule_and_class $logfile $RH_ROOT/scratch/file \"scratch_$policy\" \"scratch_files\"\n    check_rule_and_class $logfile $RH_ROOT/scratch/noroot \"scratch_$policy\" \"scratch_files\"\n    check_rule_and_class $logfile $RH_ROOT/scratch/tmp/file \"scratch_tmp_$policy\" \"scratch_tmp_files\"\n    check_rule_and_class $logfile $RH_ROOT/file.1 \"nocond_${policy}1\" \"files1\"\n    check_rule_and_class $logfile $RH_ROOT/file.2 \"nocond_${policy}2\" \"files2\"\n    check_rule_and_class $logfile $RH_ROOT/file.3 \"nocond_${policy}2\" \"files3\"\n\nif [ $policy = cleanup ]; then\n    # check\n    grep \"AS id FROM ENTRIES\" $logfile |\n    grep \"OR ENTRIES.invalid IS NULL) AND NOT (ENTRIES.fileclass LIKE\" |\n    grep \"OR ENTRIES.last_mod IS NULL) AND ENTRIES.fileclass LIKE BINARY '%+scratch_files+%\" |\n    grep \"OR (ENTRIES.fileclass LIKE BINARY '%+files1+%') OR\" |\n    grep \"ENTRIES.fileclass LIKE BINARY '%+files2+%' OR ENTRIES.fileclass LIKE BINARY '%+files3+%'\" ||\n    error \"multirule_select query block incorrect\"\nelif [ $policy = migration ]; then\n    grep \"AS id FROM ENTRIES\" $logfile |\n    grep \"OR ENTRIES.invalid IS NULL) AND NOT (ENTRIES.fileclass LIKE\" |\n    grep -e \"OR ENTRIES.last_mod IS NULL)) OR (ENTRIES..*_lstarc=0\" |\n    grep \"OR (ENTRIES.fileclass LIKE BINARY '%+files1+%') OR\" |\n    grep \"ENTRIES.fileclass LIKE BINARY '%+files2+%' OR ENTRIES.fileclass LIKE BINARY '%+files3+%'\" ||\n    error \"multirule_select query block incorrect\"\nfi\n\n    grep \"Error 7 executing query\" $logfile > /dev/null &&\n    error \"multirule_select DB query failure\"\n\n    return 0\n}\n\n\nfunction test_rmdir_depth\n{\n    # test_rmdir_depth.conf \"check sql query for rmdir with depth condition\"\n\n    config_file=$1\n    logfile=rh_rmdir.log\n    rm -f $logfile\n\n    export MATCH_PATH=\"$RH_ROOT\"\n\n    # run one pass lhsm_archive - need full scan first\n    $RH -f $RBH_CFG_DIR/$config_file --scan --once 2>&1 > /dev/null\n    $RH -f $RBH_CFG_DIR/$config_file --run=rmdir_empty -L $logfile -l FULL -O \\\n        2>/dev/null\n\n    # make sure query succeeds\n\n    grep -q \"SELECT ENTRIES.id AS id FROM ENTRIES WHERE ENTRIES.type='dir'\" $logfile ||\n      error \"rmdir depth check DB query failure\"\n\n    grep \"Error 7 executing query\" $logfile > /dev/null &&\n      error \"rmdir depth check DB query failure\"\n\n    return 0\n}\n\nfunction test_prepost_cmd\n{\n    local config_file=$1\n    local output\n\n    clean_logs\n\n    # genrate and export command line to the robinhood configuration\n    local testfile=$(mktemp)\n    export pre_command=\"./prepost_cmd.sh init $testfile ABC\"\n    export post_command=\"./prepost_cmd.sh append $testfile DEF\"\n\n    # scan and apply a purge policy\n    $RH -f $RBH_CFG_DIR/$config_file --scan --run=purge --target=all -l DEBUG \\\n        -L rh_purge.log --once  2>/dev/null\n\n    grep \"Executing pre_run_command\" rh_purge.log ||\n        error \"pre_run_command not executed\"\n    grep \"Executing post_run_command\" rh_purge.log ||\n        error \"post_run_command not executed\"\n\n    # check that pre/post run cmd have been run\n    output=\"$(head -n 1 $testfile)\"\n    [[ \"$output\" == \"ABC\" ]] ||\n        error \"Unexpected contents in test file: '$output' ('ABC' expected)\"\n    output=\"$(tail -n 1 $testfile)\"\n    [[ \"$output\" == \"DEF\" ]] ||\n        error \"Unexpected contents in test file: '$output' ('DEF' expected)\"\n\n    # check that a faulty precommand aborts the run\n    export pre_command=\"FOO BAR\"\n\n    :> rh_purge.log\n    $RH -f $RBH_CFG_DIR/$config_file --run=purge --target=all -l DEBUG \\\n        -L rh_purge.log --once  2>/dev/null\n\n    grep \"Aborting policy run because pre_run_commmand failed\" rh_purge.log ||\n        error \"Policy run should have been aborted\"\n    grep \" 0 successful actions\" rh_purge.log ||\n        error \"No successful action expected\"\n\n    rm -f $testfile\n}\n\n\n#############################################################################\n\n\nonly_test=\"\"\nquiet=0\njunit=0\n\nwhile getopts qj o\ndo\tcase \"$o\" in\n\tq)\tquiet=1;;\n\tj)\tjunit=1;;\n\t[?])\tprint >&2 \"Usage: $0 [-q] [-j] test_nbr ...\"\n\t\texit 1;;\n\tesac\ndone\nshift $(($OPTIND-1))\n\nif [[ -n \"$1\" ]]; then\n\tonly_test=$1\n\n    # prepare only_test variable\n    # 1,2 => ,1,2,\n    only_test=\",$only_test,\"\nfi\n\n\n# initialize tmp files for XML report\nfunction junit_init\n{\n\tcp /dev/null $TMPXML_PREFIX.stderr\n\tcp /dev/null $TMPXML_PREFIX.stdout\n\tcp /dev/null $TMPXML_PREFIX.tc\n}\n\n# report a success for a test\nfunction junit_report_success # (class, test_name, time)\n{\n\tclass=\"$1\"\n\tname=\"$2\"\n\ttime=\"$3\"\n\n\t# remove quotes in name\n\tname=`echo \"$name\" | sed -e 's/\"//g'`\n\n\techo \"<testcase classname=\\\"$class\\\" name=\\\"$name\\\" time=\\\"$time\\\" />\" >> $TMPXML_PREFIX.tc\n}\n\n# report a failure for a test\nfunction junit_report_failure # (class, test_name, time, err_type)\n{\n\tclass=\"$1\"\n\tname=\"$2\"\n\ttime=\"$3\"\n\terr_type=\"$4\"\n\n\t# remove quotes in name\n\tname=`echo \"$name\" | sed -e 's/\"//g'`\n\n\techo \"<testcase classname=\\\"$class\\\" name=\\\"$name\\\" time=\\\"$time\\\">\" >> $TMPXML_PREFIX.tc\n\techo -n \"<failure type=\\\"$err_type\\\"><![CDATA[\" >> $TMPXML_PREFIX.tc\n\tcat $TMPERR_FILE\t>> $TMPXML_PREFIX.tc\n\techo \"]]></failure>\" \t>> $TMPXML_PREFIX.tc\n\techo \"</testcase>\" \t>> $TMPXML_PREFIX.tc\n}\n\nfunction junit_write_xml # (time, nb_failure, tests)\n{\n\ttime=$1\n\tfailure=$2\n\ttests=$3\n\n\tcp /dev/null $XML\n\n\techo \"<?xml version=\\\"1.0\\\" encoding=\\\"ISO8859-2\\\" ?>\" > $XML\n\techo \"<testsuite name=\\\"robinhood.LustreTests\\\" errors=\\\"0\\\" failures=\\\"$failure\\\" tests=\\\"$tests\\\" time=\\\"$time\\\">\" >> $XML\n\tsed 's/[^[:print:]]//g' $TMPXML_PREFIX.tc >> $XML\n\techo -n \"<system-out><![CDATA[\" >> $XML\n\tsed 's/[^[:print:]]//g' $TMPXML_PREFIX.stdout >> $XML\n\techo \"]]></system-out>\"\t\t>> $XML\n\techo -n \"<system-err><![CDATA[\" >> $XML\n\tsed 's/[^[:print:]]//g' $TMPXML_PREFIX.stderr >> $XML\n\techo \"]]></system-err>\" \t>> $XML\n\techo \"</testsuite>\"\t\t>> $XML\n}\n\n\nfunction cleanup\n{\n\techo \"Filesystem cleanup...\"\n    if (( $quiet == 1 )); then\n            clean_fs | tee \"rh_test.log\" | egrep -i -e \"OK|ERR|Fail|skip|pass\"\n    else\n            clean_fs\n    fi\n\n    wait_low_usage 10\n}\n\nfunction run_test\n{\n    export index=$1\n    # last argument\n    title=${!#}\n\n\tshift\n\n\tindex_clean=`echo $index | sed -e 's/[a-z]//'`\n\n    if [[ -z \"$only_test\" || $only_test = *\",$index_clean,\"* || $only_test = *\",$index,\"* ]]; then\n\t\tcleanup\n\t\techo\n\t\techo \"==== TEST #$index $2 ($title) ====\"\n\n\t\terror_reset\n\n\t\tt0=`date \"+%s.%N\"`\n        echo \"Test start: `date +'%F %H:%M:%S.%N'`\"\n\n\t\tif (($junit == 1)); then\n\t\t\t# markup in log\n\t\t\techo \"==== TEST #$index $2 ($title) ====\" >> $TMPXML_PREFIX.stdout\n\t\t\techo \"==== TEST #$index $2 ($title) ====\" >> $TMPXML_PREFIX.stderr\n\t\t\t\"$@\" 2>> $TMPXML_PREFIX.stderr >> $TMPXML_PREFIX.stdout\n\t\telif (( $quiet == 1 )); then\n\t\t\t\"$@\" 2>&1 > rh_test.log\n\t\t\tegrep -i -e \"OK|ERR|Fail|skip|pass\" rh_test.log\n\t\telse\n\t\t\t\"$@\"\n\t\tfi\n\n\t\tt1=`date \"+%s.%N\"`\n\t\tdur=`echo \"($t1-$t0)\" | bc -l`\n        echo \"Test end: `date +'%F %H:%M:%S.%N'`\"\n\t\techo \"duration: $dur sec\"\n\n\t\tif (( $DO_SKIP )); then\n\t\t\techo \"(TEST #$index : skipped)\" >> $SUMMARY\n\t\t\tSKIP=$(($SKIP+1))\n\t\telif (( $NB_ERROR > 0 )); then\n\t\t\tgrep \"Failed\" ${LOGS[*]} 2>/dev/null\n\t\t\techo \"TEST #$index : *FAILED*\" >> $SUMMARY\n\t\t\tRC=$(($RC+1))\n\t\t\tif (( $junit )); then\n\t\t\t\tjunit_report_failure \"robinhood.$PURPOSE.Lustre\" \"Test #$index: $title\" \"$dur\" \"ERROR\"\n\t\t\tfi\n\t\telse\n\t\t\tgrep \"Failed\" ${LOGS[*]} 2>/dev/null\n\t\t\techo \"TEST #$index : OK\" >> $SUMMARY\n\t\t\tSUCCESS=$(($SUCCESS+1))\n\t\t\tif (( $junit )); then\n\t\t\t\tjunit_report_success \"robinhood.$PURPOSE.Lustre\" \"Test #$index: $title\" \"$dur\"\n\t\t\tfi\n\t\tfi\n\tfi\n}\n\n\n\n###############################################\n############### Alert Functions ###############\n###############################################\n\nfunction test_alerts\n{\n\t# send an alert in accordance to the input file and configuration\n\t# \ttest_alerts config_file testKey sleepTime\n\t#=>\n\t# config_file == config file name\n\t# testKey == 'extAttributes' for testing extended attributes\n\t# \t     'lastAccess' for testing last access\n\t# \t     'lastModif' for testing last modification\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep\n\n\t# get input parameters ....................\n\tconfig_file=$1\n\ttestKey=$2  #== key word for specific tests\n\tsleepTime=$3\n\n\tclean_logs\n\n\ttest -f \"/tmp/rh_alert.log\" || touch \"/tmp/rh_alert.log\"\n\n\techo \"1-Preparing Filesystem...\"\n\tif [ $testKey == \"extended_attribute\" ]; then\n\t\techo \" is for extended attributes\"\n\t\techo \"data\" > $RH_ROOT/file.1\n\t\techo \"data\" > $RH_ROOT/file.2\n\t\techo \"data\" > $RH_ROOT/file.3\n\t\techo \"data\" > $RH_ROOT/file.4\n\t\tsetfattr -n user.foo -v \"abc.1.log\" $RH_ROOT/file.1\n\t\tsetfattr -n user.foo -v \"abc.6.log\" $RH_ROOT/file.3\n\t\tsetfattr -n user.bar -v \"abc.3.log\" $RH_ROOT/file.4\n\telse\n\t\tmkdir -p $RH_ROOT/dir1\n\t\tdd if=/dev/zero of=$RH_ROOT/dir1/file.1 bs=1k count=11 >/dev/null 2>/dev/null || error \"writing file.1\"\n\n\t\tmkdir -p $RH_ROOT/dir2\n\t\tdd if=/dev/zero of=$RH_ROOT/dir2/file.2 bs=1k count=10 >/dev/null 2>/dev/null || error \"writing file.2\"\n  \t\tchown testuser $RH_ROOT/dir2/file.2 || error \"invalid chown on user 'testuser' for $RH_ROOT/dir2/file.2\"\n\t\tdd if=/dev/zero of=$RH_ROOT/dir2/file.3 bs=1k count=1 >/dev/null 2>/dev/null || error \"writing file.3\"\n\t\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/dir1/link.1 || error \"creating hardlink $RH_ROOT/dir1/link.1\"\n\n\t\tif  [ $testKey == \"nonempty_dir\" ]; then\n\t\t\t# add a folder with one file\n\t\t\tmkdir -p $RH_ROOT/dir3\n\t\t    dd if=/dev/zero of=$RH_ROOT/dir3/file.4 bs=1k count=1 >/dev/null 2>/dev/null || error \"writing file.4\"\n\t\tfi\n\tfi\n\t# optional sleep process ......................\n\tif [ $sleepTime != 0 ]; then\n\t\techo \"wait $sleepTime seconds ...\"\n\t\tsleep $sleepTime\n\tfi\n\t# specific optional action after sleep process ..........\n\tif [ $testKey == \"last_access_5s\" ]; then\n\t\thead $RH_ROOT/dir1/file.1 > /dev/null || error \"opening $RH_ROOT/dir1/file.1\"\n\telif [ $testKey == \"last_mod_5s\" ]; then\n\t\techo \"data\" > $RH_ROOT/dir1/file.1 || error \"writing in $RH_ROOT/dir1/file.1\"\n\tfi\n\n    export ALERT_CLASS=$testKey\n\n\techo \"2-Scanning filesystem...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=alert -l MAJOR --once || error \"scan+alert error\"\n\n\techo \"3-Checking results...\"\n\tlogFile=/tmp/rh_alert.log\n\tcase \"$testKey\" in\n\t\tfile1)\n\t\t\texpectedEntry=\"file.1 \"\n\t\t\toccur=1\n\t\t\t;;\n\t\ttype_file)\n\t\t\texpectedEntry=\"file.1;file.2;file.3\"\n\t\t\toccur=3\n\t\t\t;;\n\t\troot_owner)\n\t\t\texpectedEntry=\"file.1;file.3\"\n\t\t\toccur=2\n\t\t\t;;\n\t\tsize10k)\n\t\t\texpectedEntry=\"file.1;file.2\"\n\t\t\toccur=2\n\t\t\t;;\n\t\tlast_access_5s)\n\t\t\texpectedEntry=\"file.1 \"\n\t\t\toccur=1\n\t\t\t;;\n\t\tlast_mod_5s)\n\t\t\texpectedEntry=\"file.1 \"\n\t\t\toccur=1\n\t\t\t;;\n\t\tnonempty_dir)\n\t\t\texpectedEntry=\"dir1;dir2\"\n\t\t\toccur=2\n\t\t\t;;\n\t\textended_attribute)\n\t\t\texpectedEntry=\"file.1\"\n\t\t\toccur=1\n\t\t\t;;\n\t\t*)\n\t\t\terror \"unexpected testKey $testKey\"\n\t\t\treturn 1 ;;\n\tesac\n\n\t# launch the validation for all alerts\n\tcheck_alert $testKey $expectedEntry $occur $logFile\n\tres=$?\n\n\tif (( $res == 1 )); then\n\t\terror \"Test for $testKey failed\"\n\tfi\n\n\techo \"end....\"\n}\n\nfunction test_alerts_OST\n{\n\t# send an alert in accordance to the input file and configuration\n\t# \ttest_alerts_OST config_file\n\t#=>\n\t# config_file == config file name\n\n\t# get input parameters ....................\n\tconfig_file=$1\n\ttestKey=$2  #== key word for specific tests\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No OST support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n\tclean_logs\n\n\techo \"1-Create Pools ...\"\n\tcreate_pools\n\n\techo \"2-Create Files ...\"\n    for i in `seq 1 2`; do\n\t\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    for i in `seq 3 5`; do\n\t\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    export ALERT_CLASS=$testKey\n\n\techo \"2-Scanning filesystem...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=alert -l MAJOR -I --once || error \"scan+alert error\"\n\n\techo \"3-Checking results...\"\n\tlogFile=/tmp/rh_alert.log\n\texpectedEntry=\"file.3;file.4;file.5\"\n\toccur=3\n\n\t# launch the validation for all alerts\n\tcheck_alert $testKey $expectedEntry $occur $logFile\n\tres=$?\n\n\tif (( $res == 1 )); then\n\t\terror \"Test for $testKey failed\"\n\tfi\n}\n\nfunction check_alert\n{\n# return 0 if the $alertKey is found $occur times in the log $logFile; and if each entry of\n# $expectedEntries is found at least one time\n# return 1 otherwise and print an error message\n#    check_alert $alertKey $expectedEntry $occur $logFile\n# =>\n#\talertKey = alert name which is the string to find $occur times\n#\texpectedEntries = list of word to find at least one time if alertKey is found\n#\t\tex: expectedEntry=\"file.1;file.2;file.3\", expectedEntry=\"file.1\" ...\n#\toccur = expected nb of occurrences for alertKey\n#\tlogFile = name of the file to scan\n\n\t# get input parameters ......................\n\talertKey=$1\n\texpectedEntries=$2\n\toccur=$3\n\tlogFile=$4\n\n\t# set default output value .................\n\tout=1\n\t# get all entries separated by ';' ..........\n\tsplitEntries=$(echo $expectedEntries | tr \";\" \"\\n\")\n\n\t# get the nb of alertKey found in log ........\n\tnbOccur=`grep -c $alertKey $logFile`\n\tif [ $nbOccur == $occur ]; then\n\t\t# search the appropriated filename ...\n\t\tfor entry in $splitEntries\n    \t\tdo\n\t\t\t#  get the nb of filename found in log\n       \t\t\tnbOccur=`grep -c $entry $logFile`\n\t\t\tif [ $nbOccur != 0 ]; then\n\t\t\t\tout=0\n\t\t\telse\n\t\t\t\t# the entry has been not found\n\t\t\t\techo \"ERROR in check_alert: Entry $entry not found\"\n\t\t\t\treturn 1\n\t\t\tfi\n    \t\tdone\n\n\telse\n\t\t# the alertKey has been not found as expected\n\t\techo \"ERROR in check_alert: Bad number of occurrences for $alertKey: expected=$occur & found=$nbOccur\"\n\t\treturn 1\n\tfi\n\n\treturn $out\n}\n\n###################################################\n############### End Alert Functions ###############\n###################################################\n\n###################################################\n############### Migration Functions ###############\n###################################################\n\nfunction create_files_migration\n{\n\t# create all directory and files for migration tests\n\t#  create_files_migration\n\n\tmkdir $RH_ROOT/dir1\n\tmkdir $RH_ROOT/dir2\n\n\tfor i in `seq 1 5` ; do\n\t\tdd if=/dev/zero of=$RH_ROOT/dir1/file.$i bs=1K count=1 >/dev/null 2>/dev/null || error \"writing dir1/file.$i\"\n\tdone\n\n\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/dir1/link.1\n\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/dir1/link.2\n\n\tchown root:testgroup $RH_ROOT/dir1/file.2\n\tchown testuser:testgroup $RH_ROOT/dir1/file.3\n\n\tsetfattr -n user.foo -v 1 $RH_ROOT/dir1/file.4\n\tsetfattr -n user.bar -v 1 $RH_ROOT/dir1/file.5\n\n\tdd if=/dev/zero of=$RH_ROOT/dir2/file.6 bs=1K count=10 >/dev/null 2>/dev/null || error \"writing dir2/file.6\"\n\tdd if=/dev/zero of=$RH_ROOT/dir2/file.7 bs=1K count=11 >/dev/null 2>/dev/null || error \"writing dir2/file.7\"\n\tdd if=/dev/zero of=$RH_ROOT/dir2/file.8 bs=1K count=1 >/dev/null 2>/dev/null || error \"writing dir2/file.8\"\n}\n\nfunction update_files_migration\n{\n\t# Update several files for migration tests\n\t# \tupdate_files_migration\n\n    for i in `seq 1 500`; do\n\t\techo \"aaaaaaaaaaaaaaaaaaaa\" >> $RH_ROOT/dir2/file.8\n\tdone\n    dd if=/dev/zero of=$RH_ROOT/dir2/file.9 bs=1K count=1 >/dev/null 2>/dev/null || error \"writing dir2/file.9\"\n}\n\n# return an error count\nfunction check_migrate_arr\n{\n    errors=0\n\n    for n in $*\n    do\n        (( $is_lhsm > 0 )) && [[ $n = *\"link\"* ]] && continue\n\n        # lustre/HSM: search in backend by fid\n        if (( $is_lhsm > 0 )); then\n            x=$(find $RH_ROOT -name \"$n\" | xargs -n 1 -r $LFS path2fid | tr -d '[]')\n        else\n            x=\"$n\"\n        fi\n\n        [ \"$DEBUG\" = \"1\" ] && ls -R $BKROOT | grep $x\n\n        countMigrFile=`ls -R $BKROOT | grep $x | wc -l`\n        if (($countMigrFile == 0)); then\n            error \"********** TEST FAILED (File System): $x is not archived\"\n            ((errors++))\n\t    fi\n\n        [ \"$DEBUG\" = \"1\" ] && grep \"$ARCH_STR\" rh_migr.log | grep \"$n\"\n\n        countMigrLog=`grep \"$ARCH_STR\" rh_migr.log | grep \"$n\" | wc -l`\n        if (($countMigrLog == 0)); then\n            error \"********** TEST FAILED (Log): $n is not archived\"\n            ((errors++))\n\t    fi\n    done\n    return $errors\n}\n\n\nfunction test_migration\n{\n\t# Realise a unit test for migration functionalities\n\t# \ttest_migration config_file sleepTime countFinal migrate_list migrOpt\n\t#=>\n\t# config_file == config file name\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\t# migrOpt == an migrate option of robinhood : \"--migrate\" \"--migrate-ost=1\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tmigrate_list=$4\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n    migrOpt=$5\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_migration\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_migration\n    fi\n\n\techo \"Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan $migrOpt -l DEBUG -L rh_migr.log --once\n    (( $is_lhsm > 0 )) && wait_done 60\n\n    # no symlink archiving for Lustre/HSM\n    if (( $is_lhsm > 0 )); then\n        for x in $migrate_arr; do\n            if [[ $x = *\"link\"* ]]; then\n                ((countFinal=$countFinal-1))\n            fi\n        done\n    fi\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated ($countFile files, $countLink symlinks), but $countFinal expected\"\n    fi\n\n    check_migrate_arr $migrate_arr\n    nbError=$?\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_file_type\n{\n\t# Realise a unit test for migration functionalities based on file type\n\t# \tmigration_file_type config_file sleepTime countFinal migrate_list\n\t#=>\n\t# config_file == config file name\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tmigrate_list=$4\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n\n    if (( $is_hsmlite == 0 )); then\n\t\techo \"No symlink migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_migration\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_migration\n    fi\n\n\techo \"Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/link.1 -l DEBUG -L rh_migr.log\n\n    nbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n            ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/dir1/file.1 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n            ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_file_owner\n{\n\t# Realise a unit test for migration functionalities based on file owner\n\t# \tmigration_file_owner config_file sleepTime countFinal migrate_list\n\t#=>\n\t# config_file == config file name\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tmigrate_list=$4\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_migration\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_migration\n    fi\n\n\techo \"Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/file.1 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    nbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != 0)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 0 expected\"\n            ((nbError++))\n    fi\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/dir1/file.3 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n            ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_file_Last\n{\n\t# Realise a unit test for migration functionalities based on file last acces/modification\n\t# \tmigration_file_Last config_file sleepTime countFinal migrate_list\n\t#=>\n\t# config_file == config file name\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tmigrate_list=$4\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_migration\n\n\techo \"Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/file.1 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n\tnbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != 0)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 0 expected\"\n            ((nbError++))\n    fi\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_migration\n    fi\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=migration --target=file:$RH_ROOT/dir1/file.1 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n            ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_file_ExtendedAttribut\n{\n\t# Realise a unit test for migration functionalities based on Extended Attribut\n\t# \tmigration_file_ExtendedAttribut config_file sleepTime countFinal migrate_list\n\t#=>\n\t# config_file == config file name\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tmigrate_list=$4\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_migration\n\n\techo \"Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/file.4 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n\tnbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n       ((nbError++))\n    fi\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/file.5 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n       ((nbError++))\n    fi\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/dir1/file.1 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n       ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_OST\n{\n\t# Realise a unit test for migration functionalities for OST filesystem (Lustre)\n\t# \tmigration_OST config_file countFinal migrate_list migrOpt\n\t#=>\n\t# config_file == config file name\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\t# migrOpt == an migrate option of robinhood : \"--migrate\" \"--migrate-ost=1\"\n\n\tconfig_file=$1\n    countFinal=$2\n\tmigrate_list=$3\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n    migrOpt=$4\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"1-Create Pools ...\"\n\tcreate_pools\n\n\techo \"2-Create Files ...\"\n    for i in `seq 1 2`; do\n\t\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    for i in `seq 3 4`; do\n\t\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan $migrOpt -l DEBUG -L rh_migr.log --once\n    (( $is_lhsm > 0 )) && wait_done 60\n    nbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n        ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n\techo $nbError\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction migration_file_OST\n{\n\t# Realise a unit test for migration functionalities for OST filesystem (Lustre) based on file policy\n\t# \tmigration_file_OST config_file countFinal migrate_list\n\t#=>\n\t# config_file == config file name\n\t# countFinal == number of files migrated at the end\n\t# migrate_list == list of migrated files at the end : \"file.1;file.2;link.2\"\n\n\tconfig_file=$1\n    countFinal=$2\n\tmigrate_list=$3\n    migrate_arr=$(echo $migrate_list | tr \";\" \"\\n\")\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No Migration for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"1-Create Pools ...\"\n\tcreate_pools\n\n\techo \"2-Create Files ...\"\n    for i in `seq 1 2`; do\n\t\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    for i in `seq 3 4`; do\n\t\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n\techo \"3-Reading changelogs and Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/file.2 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    nbError=0\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != 0)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 0 expected\"\n        ((nbError++))\n    fi\n\n\techo \"Applying migration policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=file:$RH_ROOT/file.3 -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 10\n\n    countFile=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    countLink=`find $BKROOT -type l -not -name \"*.lov\" | wc -l`\n    count=$(($countFile+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files migrated, but $countFinal expected\"\n        ((nbError++))\n    fi\n\n    check_migrate_arr $migrate_arr\n    ((nbError+=$?))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\n###################################################\n############# End Migration Functions #############\n###################################################\n\nfunction fs_usage\n{\n    df -P \"$RH_ROOT\" | tail -n 1 | awk '{ print $(NF-1) }' | tr -d '%'\n}\n\nfunction inode_usage\n{\n\tdf -i $RH_ROOT/ | grep \"$RH_ROOT\" | xargs | awk '{print $(NF-3)}'\n}\n\n###########################################################\n############### Purge Trigger Functions ###################\n###########################################################\n\nfunction trigger_purge_QUOTA_EXCEEDED\n{\n\t# Function to test the trigger system when a quota is exceeded\n\t# \ttrigger_purge_QUOTA_EXCEEDED config_file\n\t#=>\n\t# config_file == config file name\n\n\tconfig_file=$1\n\n\tclean_logs\n\n    if [ -z \"$POSIX_MODE\" ]; then\n        $LFS setstripe -c 2 $RH_ROOT || echo \"error setting stripe count=2\"\n    fi\n    elem=$(fs_usage)\n\n\techo \"1-Create Files ...\"\n\tlimit=80\n    limit_init=$limit\n\tindice=1\n    while [ $elem -lt $limit ]\n    do\n        # write 2M to fulfill 2 stripes\n        dd if=/dev/zero of=$RH_ROOT/file.$indice bs=2M count=1 conv=sync\n        rc=$?\n        if (( $rc != 0 )); then\n            lfs df -h\n            df -h\n            echo \"WARNING: failed to write $RH_ROOT/file.$indice $rc\"\n            # give it a chance to end the loop\n            ((limit=$limit-1))\n        else\n            # reinitialize the limit on success\n            limit=$limit_init\n        fi\n\n        elem=$(fs_usage)\n        ((indice++))\n    done\n    lfs df -h\n    df -h\n\n    # wait df update\n    wait_high_usage 75\n\n    echo \"2-Reading changelogs and Applying purge trigger policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --check-thresholds=purge -l DEBUG -L rh_purge.log --once\n\n    cat rh_purge.log >> $TMPERR_FILE\n    countMigrLog=`grep \"High threshold reached on Filesystem\" rh_purge.log | wc -l`\n    if (($countMigrLog == 0)); then\n        error \"********** TEST FAILED **********\"\n    else\n        echo \"OK: test successful\"\n    fi\n}\n\nfunction trigger_purge_OST_QUOTA_EXCEEDED\n{\n\t# Function to test the trigger system when a quota is exceeded in OST filesytem (Lustre)\n\t# \ttrigger_purge_OST_QUOTA_EXCEEDED config_file\n\t#=>\n\t# config_file == config file name\n\n\tconfig_file=$1\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No OST support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n\tclean_logs\n    # make sure df is up to date\n    wait_stable_df\n\n\techo \"1-Fullfilling OST#0 up to 80%...\"\n\telem=`$LFS df $RH_ROOT | grep \"OST:0\" | awk '{ print $5 }' | sed 's/%//'`\n\tlimit=80\n\tindice=1\n    while [ $elem -lt $limit ]\n    do\n        $LFS setstripe -o 0 $RH_ROOT/file.$indice -c 1 >/dev/null 2>/dev/null\n        dd if=/dev/zero of=$RH_ROOT/file.$indice bs=10M count=1 \\\n            conv=sync >/dev/null 2>/dev/null\n        unset elem\n\t    elem=`$LFS df $RH_ROOT | grep \"OST:0\" | awk '{ print $5 }' | sed 's/%//'`\n        [ \"$DEBUG\" = \"1\" ] && echo \"used: $elem, target: $limit\"\n        ((indice++))\n    done\n\n    echo \"2-Applying purge trigger policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --check-thresholds=purge -l DEBUG \\\n        -L rh_purge.log --once 2>/dev/null\n\n    cat rh_purge.log >> $TMPERR_FILE\n    countMigrLog=`grep \"High threshold reached on OST #0\" rh_purge.log | wc -l`\n    if (($countMigrLog == 0)); then\n        error \"********** TEST FAILED **********\"\n    else\n        echo \"OK: test successful\"\n    fi\n}\n\nfunction trigger_purge_USER_GROUP_QUOTA_EXCEEDED\n{\n\t# Function to test the trigger system when a quota is exceeded for a group or an user\n\t# \ttrigger_purge_USER_GROUP_QUOTA_EXCEEDED config_file usage\n\t#=>\n\t# config_file == config file name\n\t# usage == \"User\" or \"Group\"\n\n\tconfig_file=$1\n\tusage=$2\n\n\tclean_logs\n\n\techo \"1-Create Files ...\"\n\tlimit=80\n    limit_init=$limit\n\tindice=1\n    last=1\n    dd_out=/tmp/dd.out.$$\n    one_error=\"\"\n    dd_err_count=0\n    elem=$(fs_usage)\n    while [ $elem -lt $limit ]\n    do\n        # write 2M to fulfill 2 stripes\n        dd if=/dev/zero of=$RH_ROOT/file.$indice bs=2M count=1 conv=sync >/dev/null 2>$dd_out\n        if (( $? != 0 )); then\n            [[ -z \"$one_error\" ]] && one_error=\"failed to write $RH_ROOT/file.$indice: $(cat $dd_out)\"\n            ((dd_err_count++))\n            ((limit=$limit-1))\n        else\n            # on success, reinitialize limit\n            limit=$limit_init\n        fi\n\n        if [[ -s $RH_ROOT/file.$indice ]]; then\n            ((last++))\n        fi\n\n    \t# force df update\n        clean_caches\n        elem=$(fs_usage)\n        ((indice++))\n    done\n    (($dd_err_count > 0)) && echo \"WARNING: $dd_err_count errors writing $RH_ROOT/file.*: first error: $one_error\"\n\n    rm -f $dd_out\n\n    # limit is 25% => leave half of files with owner root\n    ((limit=$last/2))\n    ((limit=$limit-1))\n    echo \"$last files created, changing $limit files to testuser:testgroup\"\n    df -h $RH_ROOT\n    ((indice=1))\n    while [ $indice -lt $limit ]\n    do\n        chown testuser:testgroup $RH_ROOT/file.$indice\n        ((indice++))\n    done\n\n\n    echo \"2-Reading changelogs and Applying purge trigger policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --check-thresholds=purge -l DEBUG -L rh_purge.log --once\n\n    countMigrLog=`grep \"$usage exceeds high threshold\" rh_purge.log | wc -l`\n    if (($countMigrLog == 0)); then\n        error \"********** TEST FAILED **********\"\n    else\n        echo \"OK: test successful\"\n    fi\n}\n\n###########################################################\n############# End Purge Trigger Functions #################\n###########################################################\n\n###################################################\n############### Purge Functions ###################\n###################################################\n\nfunction create_files_Purge\n{\n\t# create all directory and files for purge tests\n\t#  create_files_Purge\n\n    mkdir $RH_ROOT/dir1\n    mkdir $RH_ROOT/dir2\n\n    for i in `seq 1 5` ; do\n    \tdd if=/dev/zero of=$RH_ROOT/dir1/file.$i bs=1K count=1 >/dev/null 2>/dev/null || error \"writing dir1/file.$i\"\n\tdone\n\n\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/dir1/link.1\n\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/dir1/link.2\n\n\tchown root:testgroup $RH_ROOT/dir1/file.2\n    chown testuser:testgroup $RH_ROOT/dir1/file.3\n\n\tsetfattr -n user.foo -v 1 $RH_ROOT/dir1/file.4\n\tsetfattr -n user.bar -v 1 $RH_ROOT/dir1/file.5\n\n    dd if=/dev/zero of=$RH_ROOT/dir2/file.6 bs=1K count=10 >/dev/null 2>/dev/null || error \"writing dir2/file.6\"\n    dd if=/dev/zero of=$RH_ROOT/dir2/file.7 bs=1K count=11 >/dev/null 2>/dev/null || error \"writing dir2/file.7\"\n    dd if=/dev/zero of=$RH_ROOT/dir2/file.8 bs=1K count=1 >/dev/null 2>/dev/null || error \"writing dir2/file.8\"\n}\n\nfunction update_files_Purge\n{\n\t# update files for Purge tests\n\t#  update_files_migration\n\n    for i in `seq 1 500`; do\n\t\techo \"aaaaaaaaaaaaaaaaaaaa\" >> $RH_ROOT/dir2/file.8\n    done\n\tcat $RH_ROOT/dir2/file.8 >/dev/null 2>/dev/null\n}\n\nfunction test_purge\n{\n\t# Realise a unit test for purge functionalities\n\t# \ttest_migration config_file sleep_time countFinal purge_list purgeOpt\n\t#=>\n\t# config_file == config file name\n\t# sleep_time == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files not purged at the end\n\t# purge_list == list of purged files at the end : \"file.1;file.2;link.2\"\n\t# purgeOpt == an migrate option of robinhood : \"--purge\" \"--purge-ost=1\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tpurge_list=$4\n    purge_arr=$(echo $purge_list | tr \";\" \"\\n\")\n    purgeOpt=$5\n\n    # adapt the test for any root\n    export MATCH_PATH2=\"$RH_ROOT/dir2/*\"\n    export MATCH_PATH1=\"$RH_ROOT/dir1/*\"\n\n\tneedPurge=0\n\t((needPurge=10-countFinal))\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_Purge\n\n\tsleep 1\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once\n\n\t# use robinhood for flushing\n    if (( ($is_hsmlite != 0) || ($is_lhsm != 0) )); then\n\t\techo \"Archiving files\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing Archiving files\"\n\tfi\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_Purge\n\n        if (( ($is_hsmlite != 0) || ($is_lhsm != 0) )); then\n\t        echo \"Update and archiving files\"\n\t        $RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=all --once -l DEBUG  -L rh_migr.log\n            (( $is_lhsm > 0 )) && wait_done 60\n\t    fi\n    fi\n\n\techo \"Scan and apply purge policy ($purgeOpt)...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan  $purgeOpt --once -l DEBUG -L rh_purge.log\n\n\tnbError=0\n\tnb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\tif (( $nb_purge != $needPurge )); then\n\t    error \"********** TEST FAILED (Log): $nb_purge files purged, but $needPurge expected\"\n        ((nbError++))\n\tfi\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction test_purge_tmp_fs_mgr\n{\n\t# Realise a unit test for purge functionalities for TMP_FS_MGR mod\n\t# \ttest_migration_tmp_fs_mgr config_file sleep_time countFinal purge_list purgeOpt\n\t#=>\n\t# config_file == config file name\n\t# sleep_time == expected time in second to sleep for the test, if=0 no sleep and no update\n\t# countFinal == number of files not purged at the end\n\t# purge_list == list of purged files at the end : \"file.1;file.2;link.2\"\n\t# purgeOpt == an migrate option of robinhood : \"--purge\" \"--purge-ost=1\"\n\n    config_file=$1\n    sleep_time=$2\n    countFinal=$3\n\tpurge_list=$4\n    purge_arr=$(echo $purge_list | tr \";\" \"\\n\")\n    purgeOpt=$5\n\n\n    if (( $shook != 0 )) || (( $is_lhsm != 0 )); then\n\t\techo \"No link purge for HSM modes\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tneedPurge=0\n\t((needPurge=10-countFinal))\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n\tcreate_files_Purge\n\n\tsleep 1\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once\n\n\tif(($sleep_time != 0)); then\n\t    echo \"Sleep $sleep_time\"\n        sleep $sleep_time\n\n\t    echo \"update Files\"\n        update_files_Purge\n    fi\n\n\techo \"Reading changelogs and Applying purge policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan  $purgeOpt --once -l DEBUG -L rh_purge.log\n\n\tnbError=0\n\tnb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\tif (( $nb_purge != $needPurge )); then\n\t    error \"********** TEST FAILED (Log): $nb_purge files purged, but $needPurge expected\"\n        ((nbError++))\n\tfi\n\n    countFileDir1=`find $RH_ROOT/dir1 -type f | wc -l`\n    countFileDir2=`find $RH_ROOT/dir2 -type f | wc -l`\n    countLink=`find $RH_ROOT/dir1 -type l | wc -l`\n    count=$(($countFileDir1+$countFileDir2+$countLink))\n    if (($count != $countFinal)); then\n        error \"********** TEST FAILED (File System): $count files stayed in filesystem, but $countFinal expected\"\n        ((nbError++))\n    fi\n\n    for x in $purge_arr\n    do\n        if [ -e \"$RH_ROOT/dir1/$x\" -o -e \"$RH_ROOT/dir2/$x\" ]; then\n\t        error \"********** TEST FAILED (File System): $x is not purged\"\n            ((nbError++))\n        fi\n    done\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction purge_OST\n{\n\t# Realise a unit test for purge functionalities for OST fileSystem (Lustre)\n\t# \tmigration_OST config_file countFinal purge_list purgeOpt\n\t#=>\n\t# config_file == config file name\n\t# countFinal == number of files not purged at the end\n\t# purge_list == list of purged files at the end : \"file.1;file.2;link.2\"\n\t# purgeOpt == an migrate option of robinhood : \"--purge\" \"--purge-ost=1\"\n\n\tconfig_file=$1\n    countFinal=$2\n\tpurge_list=$3\n    purge_arr=$(echo $purge_list | tr \";\" \"\\n\")\n    purgeOpt=$4\n\n    if [ -n \"$POSIX_MODE\" ]; then\n        echo \"No OST support for POSIX mode\"\n        set_skipped\n        return 1\n    fi\n\n\tneedPurge=0\n\t((needPurge=4-countFinal))\n\n\tclean_logs\n\n\techo \"1-Create Pools ...\"\n\tcreate_pools\n\n\techo \"2-Create Files ...\"\n    for i in `seq 1 2`; do\n\t\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    for i in `seq 3 4`; do\n\t\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    # file with no pool (stripe to OST0 so it is not eligible to purge in test\n    # 614b)\n    $LFS setstripe -c 1 -o 0 $RH_ROOT/file.5  >/dev/null 2>/dev/null\n\n\tsleep 1\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once \\\n        2>/dev/null\n    # stripe info should not be missing to check entry pool name\n    grep \"attribute is missing for checking fileset\" rh_scan.log &&\n        error \"Missing stripe info to match pool-based fileclass\"\n\n\n\t# use robinhood for flushing\n\tif (( $is_hsmlite + $is_lhsm > 0 )); then\n\t\techo \"2bis-Archiving files\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log \\\n            2>/dev/null || error \"executing Archiving files\"\n        (( $is_lhsm > 0 )) && wait_done 60\n\tfi\n\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -L rh_chglogs.log \\\n        -l DEBUG 2>/dev/null\n    grep \"attribute is missing for checking fileset\" rh_chglogs.log &&\n        error \"Missing stripe info to match pool-based fileclass\"\n\n\techo \"Scan and apply purge policy...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan $purgeOpt -l FULL -L rh_purge.log \\\n        --once 2>/dev/null\n    # stripe info should not be missing to check entry pool name\n    grep \"attribute is missing for checking fileset\" rh_purge.log &&\n        error \"Missing stripe info to match pool-based fileclass\"\n\n    # stripe should not have been updated during the 2nd scan\n    grep \"INSERT INTO STRIPE\" rh_purge.log && error \"No stripe update expected\"\n\n\tnb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\tif (( $nb_purge != $needPurge )); then\n\t    error \"********** TEST FAILED (Log): $nb_purge files purged, but $needPurge expected\"\n\tfi\n\n\tif (($NB_ERROR == 0 )); then\n        echo \"OK: test successful\"\n    else\n        echo \"********** TEST FAILED **********\"\n    fi\n}\n\n###################################################\n############# End Purge Functions #################\n###################################################\n\n##################################################\n############# Removing Functions #################\n##################################################\n\nfunction test_removing\n{\n\t# remove directory/ies in accordance to the input file and configuration\n\t# \ttest_removing config_file forExtAttributes sleepTime mode_list\n\t#=>\n\t# config_file == config file name\n\t# testKey == 'emptyDir' for testing extended attributes\n\t# \t     'lastAction' for testing last access or modification\n\t# sleepTime == expected time in second to sleep for the test, if=0 no sleep\n\n\t# get input parameters ....................\n\tconfig_file=$1\n\ttestKey=$2  #== key word for specific tests\n\tsleepTime=$3\n\n\t#  clean logs ..............................\n\tclean_logs\n\n\t# prepare data..............................\n\techo \"1-Preparing Filesystem...\"\n\tmkdir -p $RH_ROOT/dir1\n\tmkdir -p $RH_ROOT/dir5\n\techo \"data\" > $RH_ROOT/dir5/file.5\n\n\tif [ $testKey == \"emptyDir\" ]; then\n\t\t# wait and write more data\n\t\tif [ $sleepTime != 0 ]; then\n\t\t\techo \"Please wait $sleepTime seconds ...\"\n\t\t\tsleep $sleepTime || error \"sleep time\"\n\t\tfi\n\t\tsleepTime=0\n\t\tmkdir -p $RH_ROOT/dir6\n\t\tmkdir -p $RH_ROOT/dir7\n\t\techo \"data\" > $RH_ROOT/dir7/file.7\n\n\telse\n\t\t# in dir1: manage folder owner and attributes\n\t\tchown testuser $RH_ROOT/dir1 || error \"invalid chown on user 'testuser' for $RH_ROOT/dir1 \"  #change owner\n\t\tsetfattr -n user.foo -v \"abc.1.test\" $RH_ROOT/dir1\n\t\techo \"data\" > $RH_ROOT/dir1/file.1\n\t\tmkdir -p $RH_ROOT/dir1/dir2\n\t\techo \"data\" > $RH_ROOT/dir1/dir2/file.2\n\t\tmkdir -p $RH_ROOT/dir1/dir3\n\t\techo \"data\" > $RH_ROOT/dir1/dir3/file.3\n\t \tmkdir -p $RH_ROOT/dir1/dir4\n\t\tchown testuser $RH_ROOT/dir1/dir4 || error \"invalid chown on user 'testuser' for $RH_ROOT/dir4\" #change owner\n\t\techo \"data\" > $RH_ROOT/dir1/dir4/file.41\n\t\techo \"data\" > $RH_ROOT/dir1/dir4/file.42\n\n\t\t# in dir5:\n\t\tsetfattr -n user.bar -v \"abc.1.test\" $RH_ROOT/dir5\n\t\techo \"data\" > $RH_ROOT/dir5/file.5\n\n\t\t# in dir6:\n\t\tmkdir -p $RH_ROOT/dir6\n\t\tchown testuser $RH_ROOT/dir6 || error \"invalid chown on user 'testuser' for $RH_ROOT/dir6\" #change owner\n\tfi\n\n\t# launch the scan ..........................\n\techo \"2-Scanning directories in filesystem ...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once || error \"scanning filesystem\"\n\n\t# optional sleep process ......................\n\tif [ $sleepTime != 0 ]; then\n\t\techo \"Please wait $sleepTime seconds ...\"\n\t\tsleep $sleepTime\n\tfi\n\t# specific optional action after sleep process ..........\n\tif [ $testKey == \"lastAccess\" ]; then\n\t#\tls -R $RH_ROOT/dir1 || error \"scaning $RH_ROOT/dir1\"\n\t\ttouch $RH_ROOT/dir1/file.touched || error \"touching file in $RH_ROOT/dir1\"\n\telif [ $testKey == \"lastModif\" ]; then\n\t\techo \"data\" > $RH_ROOT/dir1/file.12 || error \"writing in $RH_ROOT/dir1/file.12\"\n\tfi\n\n\t# launch the rmdir ..........................\n\techo \"3-Removing directories in filesystem ...\"\n\tif [ $testKey == \"lastAccess\" ]; then\n\t$RH -f $RBH_CFG_DIR/$config_file --run=rmdir_recurse --target=all -l DEBUG -L rh_rmdir.log --once || error \"performing FS removing\"\n\telse\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=rmdir_recurse --target=all -l DEBUG -L rh_rmdir.log --once || error \"performing FS removing\"\n\tfi\n\n\t# launch the validation ..........................\n\techo \"4-Checking results ...\"\n\tlogFile=/tmp/rh_alert.log\n\tcase \"$testKey\" in\n\t\tpathName)\n\t\t\texistedDirs=\"$RH_ROOT/dir5;$RH_ROOT/dir6\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir1\"\n\t\t\t;;\n\t\temptyDir)\n\t\t\texistedDirs=\"$RH_ROOT/dir6;$RH_ROOT/dir5;$RH_ROOT/dir7\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir1\"\n\t\t\t;;\n\t\towner)\n\t\t\texistedDirs=\"$RH_ROOT/dir5\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir1;$RH_ROOT/dir6\"\n\t\t\t;;\n\t\tlastAccess)\n\t\t\texistedDirs=\"$RH_ROOT/dir1\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir5;$RH_ROOT/dir6\"\n\t\t\t;;\n\t\tlastModif)\n\t\t\texistedDirs=\"$RH_ROOT/dir1\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir5;$RH_ROOT/dir6\"\n\t\t\t;;\n\t\tdircount)\n\t\t\texistedDirs=\"$RH_ROOT/dir5;$RH_ROOT/dir6\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir1\"\n\t\t\t;;\n\t\textAttributes)\n\t\t\texistedDirs=\"$RH_ROOT/dir5;$RH_ROOT/dir6\"\n\t\t\tnotExistedDirs=\"$RH_ROOT/dir1\"\n\t\t\t;;\n\t\t*)\n\t\t\terror \"unexpected testKey $testKey\"\n\t\t\treturn 1 ;;\n\tesac\n\t# launch the validation for all remove process\n\texist_dirs_or_not $existedDirs $notExistedDirs\n\tres=$?\n\n\tif (( $res == 1 )); then\n\t\terror \"Test for RemovingDir_$testKey failed\"\n    else\n        echo \"OK: Test successful\"\n\tfi\n}\n\nfunction test_rmdir_mix\n{\n    config_file=$1\n    sleepTime=$2 # for age_rm_empty_dirs\n\n    #  clean logs\n    clean_logs\n\n    export NO_RM_TREE=\"$RH_ROOT/no_rm\"\n\n    # prepare data\n    echo \"1-Preparing Filesystem...\"\n    # old dirempty\n    mkdir -p $RH_ROOT/no_rm/dirempty\n    mkdir -p $RH_ROOT/dirempty\n    sleep $sleepTime\n\n    # new dirs\n    mkdir -p $RH_ROOT/no_rm/dir1\n    mkdir -p $RH_ROOT/no_rm/dir2\n    mkdir -p $RH_ROOT/no_rm/dirempty_new\n    mkdir -p $RH_ROOT/dir1\n    mkdir -p $RH_ROOT/dir2\n    mkdir -p $RH_ROOT/dirempty_new\n    echo \"data\" >  $RH_ROOT/no_rm/dir1/file\n    echo \"data\" >  $RH_ROOT/no_rm/dir2/file\n    echo \"data\" >  $RH_ROOT/dir1/file\n    echo \"data\" >  $RH_ROOT/dir2/file\n\n    # launch the scan ..........................\n    echo \"2-Scanning directories in filesystem ...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once || error \"scanning filesystem\"\n\n    echo \"3-Checking old dirs report\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -l MAJOR -cq --oldest-empty-dirs > report.out\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    # must report empty dirs\n    grep \"no_rm/dirempty,\" report.out || error \"no_rm/dirempty not in empty dir report\"\n    grep \"no_rm/dirempty_new,\" report.out || error \"no_rm/dirempty_new not in empty dir report\"\n    grep \"$RH_ROOT/dirempty,\" report.out || error \"$RH_ROOT/dirempty not in empty dir report\"\n    grep \"$RH_ROOT/dirempty_new,\" report.out || error \"$RH_ROOT/dirempty_new not in empty dir report\"\n    # must no report other dirs\n    grep \"no_rm/dir1,\" report.out && error \"no_rm/dir1 in empty dir report\"\n    grep \"no_rm/dir1,\" report.out && error \"no_rm/dir2 in empty dir report\"\n    grep \"$RH_ROOT/dir2,\" report.out && error \"$RH_ROOT/dir1 in empty dir report\"\n    grep \"$RH_ROOT/dir2,\" report.out && error \"$RH_ROOT/dir2 in empty dir report\"\n\n    # launch the rmdir ..........................\n    echo \"4-Removing directories in filesystem ...\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=rmdir --target=all -l DEBUG -L rh_rmdir.log --once || error \"performing rmdir\"\n\n    echo \"5-Checking results ...\"\n    exist=\"$RH_ROOT/no_rm/dirempty;$RH_ROOT/no_rm/dir1;$RH_ROOT/no_rm/dir2;$RH_ROOT/no_rm/dirempty_new;$RH_ROOT/dir2;$RH_ROOT/dirempty_new\"\n    noexist=\"$RH_ROOT/dir1;$RH_ROOT/dirempty\"\n\n    # launch the validation for all remove process\n    exist_dirs_or_not $exist $noexist\n    res=$?\n\n    if (( $res == 1 )); then\n        error \"Test for RemovingDir_mixed failed\"\n    else\n        echo \"OK: Test successful\"\n    fi\n}\n\n\nfunction test_removing_ost\n{\n\t# remove directory/ies in accordance to the input file and configuration\n\t# \ttest_removing config_file mode_list\n\t#=>\n\t# config_file == config file name\n\n\t# get input parameters ....................\n\tconfig_file=$1\n\n    echo \"Directory stripe is not taken into account for rmdir policies: skipped\"\n\tset_skipped\n\treturn 1\n\n\tclean_logs\n\n\techo \"Create Pools ...\"\n\tcreate_pools\n\n\techo \"Create Files ...\"\n\tmkdir $RH_ROOT/dir1\n\n\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/dir1 >/dev/null 2>/dev/null\n\n\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/dir1/file.1 -c 1 >/dev/null 2>/dev/null\n\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/dir1/file.2 -c 1 >/dev/null 2>/dev/null\n\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/dir1/file.3 -c 1 >/dev/null 2>/dev/null\n\n\tmkdir $RH_ROOT/dir2\n\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/dir2 >/dev/null 2>/dev/null\n\n    $LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.1 -c 1 >/dev/null 2>/dev/null\n\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/dir2/file.2 -c 1 >/dev/null 2>/dev/null\n\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/dir2/file.3 -c 1 >/dev/null 2>/dev/null\n\n\techo \"Removing directories in filesystem ...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=rmdir_recurse --target=all -l DEBUG -L rh_rmdir.log --once || error \"performing FS removing\"\n\n\t# launch the validation ..........................\n\techo \"Checking results ...\"\n\tlogFile=/tmp/rh_alert.log\n\texistedDirs=\"$RH_ROOT/dir1\"\n\tnotExistedDirs=\"$RH_ROOT/dir2\"\n\t# launch the validation for all remove process\n\texist_dirs_or_not $existedDirs $notExistedDirs\n\tres=$?\n\n\tif (( $res == 1 )); then\n\t\terror \"Test for RemovingDir_ost failed\"\n\tfi\n\n\ttest -f $RH_ROOT/file.1\n\tres=$?\n\n\tif (( $res == 1 )); then\n\t\terror \"Test for RemovingDir_ost failed\"\n\tfi\n}\n\nfunction exist_dirs_or_not\n{\n    # read two lists of folders and check:\n    # 1- the first list must contain existed dirs\n    # 2- the first list must contain not existed dirs\n    #If the both conditions are realized, then the function returns 0, otherwise 1.\n    # \texist_dirs_or_not $existedDirs $notExistedDirs\n    #=> existedDirs & notExistedDirs list of dirs to check separated by ';'\n    # ex: \"$RH_ROOT/dir1;$RH_ROOT/dir5\"\n    # ex: Use \"/\" for giving an empty list\n\n    existedDirs=$1\n    notExistedDirs=$2\n\n    echo \"[$existedDirs] & [$notExistedDirs]\"\n    # launch the command which return 1 if one dir is not \"! -d\" (== does not exist)\n    check_cmd $existedDirs \"! -d\"\n    if [  $? -eq 1 ] ; then\n\t    echo \"error for $existedDirs\"\n\t    return 1\n    else\n    # launch the command which return 1 if one dir is not \"-d\" (== does exist)\n\t    check_cmd $notExistedDirs \"-d\"\n\t    if [  $? -eq 1 ] ; then\n\t\t    echo \"error for $notExistedDirs\"\n\t\t    return 1\n\t    fi\n    fi\n}\n\nfunction check_cmd\n{\n    # check if each dir respects the reverse of the given command.\n    # return 0 if it repects, 1 otherwise\n    # check_cmd $listDirs $commande\n    # =>\n    # \t$listDirs = list of dirs separated by ';'\n    #\tex: \"$RH_ROOT/dir1;$RH_ROOT/dir5\"  or \"/\" to no check command\n    #\t$commande = \"-d\" or \"! -d\"\n    #\tex: check_cmd $notExistedDirs \"-d\": checks that all dirs does not exist\n\n    existedDirs=$1\n    cmd=$2\n    # set default output value\n    out=1\n    #get the dirs which must exist\n    if [ $existedDirs != \"/\" ]; then\n\t    splitExDirs=$(echo $existedDirs | tr \";\" \"\\n\")\n\t    for entry in $splitExDirs\n        \tdo\n\t\t    # for each dir check the existence, otherwise return 1\n\t\t    if [ $cmd $entry ]; then\n\t\t\t    return 1\n\t\t    fi\n\t    done\n    fi\n}\n\n######################################################\n############# End Removing Functions #################\n######################################################\n\n###############################################################\n############### Report generation Functions ###################\n###############################################################\n\nfunction test_report_generation_1\n{\n\t# report many statistics in accordance to the input file and configuration\n\t# \ttest_report_generation_1 config_file\n\t#=>\n\t# config_file == config file name\n\n\t# get input parameters ....................\n\tconfig_file=$1\n\n    if [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n        echo \"Test needs adaptation for numerical UID/GID: skipped\"\n        set_skipped\n        return 1\n    fi\n\n\t#  clean logs ..............................\n\tclean_logs\n\n\t# prepare data..............................\n\techo -e \"\\n 1-Preparing Filesystem...\"\n\t# dir1:\n\tmkdir -p $RH_ROOT/dir1/dir2\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir1/file.1 bs=1k count=5 >/dev/null 2>/dev/null || error \"writing file.1\"\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir1/file.2 bs=1M count=1 >/dev/null 2>/dev/null || error \"writing file.2\"\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir1/file.3 bs=1k count=15 >/dev/null 2>/dev/null || error \"writing file.3\"\n\tprintf \".\" ; sleep 1\n\t# link from dir1:\n\tln -s $RH_ROOT/dir1/file.1 $RH_ROOT/link.1 || error \"creating symbolic link $RH_ROOT/link.1\"\n\tprintf \".\" ; sleep 1\n\t# dir2 inside dir1:\n\tln -s $RH_ROOT/dir1/file.3 $RH_ROOT/dir1/dir2/link.2 || error \"creating symbolic link $RH_ROOT/dir1/dir2/link.2\"\n\tprintf \".\" ; sleep 1\n\t# dir3 inside dir1:\n\tmkdir -p $RH_ROOT/dir1/dir3\n\tprintf \".\" ; sleep 1\n\t#dir4:\n\tmkdir -p $RH_ROOT/dir4\n\tprintf \".\" ; sleep 1\n\t#dir5:\n\tmkdir -p $RH_ROOT/dir5\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir5/file.4 bs=1k count=10 >/dev/null 2>/dev/null || error \"writing file.4\"\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir5/file.5 bs=1k count=20 >/dev/null 2>/dev/null || error \"writing file.5\"\n\tprintf \".\" ; sleep 1\n\tdd if=/dev/zero of=$RH_ROOT/dir5/file.6 bs=1k count=21 >/dev/null 2>/dev/null || error \"writing file.6\"\n\tprintf \".\" ; sleep 1\n\tln -s $RH_ROOT/dir1/file.2 $RH_ROOT/dir5/link.3 || error \"creating symbolic link $RH_ROOT/dir5/link.3\"\n\tprintf \".\" ; sleep 1\n\t#dir6 and dir8 inside dir5:\n\tmkdir -p $RH_ROOT/dir5/dir6\n\tprintf \".\" ; sleep 1\n\tmkdir -p $RH_ROOT/dir5/dir8\n\tprintf \".\" ; sleep 1\n\t# dir7:\n\tmkdir -p $RH_ROOT/dir7\n\tprintf \".\" ; sleep 1\n    #2links in dir.1\n    ln -s $RH_ROOT/dir1 $RH_ROOT/dir1/link.0 || error \"creating symbolic link $RH_ROOT/dir1/link.0\"\n    printf \".\" ; sleep 1\n    ln -s $RH_ROOT/dir1 $RH_ROOT/dir1/link.1 || error \"creating symbolic link $RH_ROOT/dir1/link.1\"\n    printf \".\" ; sleep 1\n\n    # make sure all data is on disk\n    sync\n\n\t# manage owner and group\n\tfilesList=\"$RH_ROOT/link.1 $RH_ROOT/dir1/dir2/link.2\"\n\tchgrp -h testgroup $filesList || error \"invalid chgrp on group 'testgroup' for $filesList \"\n\tchown -h testuser $filesList || error \"invalid chown on user 'testuser' for $filesList \"\n\tfilesList=\"$RH_ROOT/dir1/file.2 $RH_ROOT/dir1/dir2 $RH_ROOT/dir1/dir3 $RH_ROOT/dir5 $RH_ROOT/dir7 $RH_ROOT/dir5/dir6 $RH_ROOT/dir5/dir8\"\n\tchown testuser:testgroup $filesList || error \"invalid chown on user 'testuser' for $filesList \"\n\tfilesList=\"$RH_ROOT/dir1/file.1 $RH_ROOT/dir5/file.6\"\n\tchgrp testgroup $filesList || error \"invalid chgrp on group 'testgroup' for $filesList \"\n\n\t# launch the scan ..........................\n\techo -e \"\\n 2-Scanning Filesystem...\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"performing FS scan\"\n\n\t# launch another scan ..........................\n\techo -e \"\\n 3-Filesystem content statistics...\"\n\t#$REPORT -f $RBH_CFG_DIR/$config_file --fs-info -c || error \"performing FS statistics (--fs-info)\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --fs-info --csv > report.out || error \"performing FS statistics (--fs-info)\"\n\tlogFile=report.out\n\n    typeValues=\"dir;file;symlink\"\n    countValues=\"$((8+extra_dir));6;5\"\n    colSearch=2\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating FS statistics (--fs-info)\"\n\n\n\t# launch another scan ..........................\n\techo -e \"\\n 4-FileClasses summary...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --class-info --csv > report.out || error \"performing FileClasses summary (--class)\"\n    if (( $is_lhsm == 0 )); then\n        typeValues=\"test_file_type;test_link_type\"\n        countValues=\"6;5\"\n    else\n        # Lustre/HSM: no fileclass for symlinks\n        typeValues=\"test_file_type\"\n        countValues=\"6\"\n    fi\n\n    colSearch=2\n\t#echo \"arguments= $logFile $typeValues $countValues $colSearch**\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating FileClasses summary (--class)\"\n\t# launch another scan ..........................\n\techo -e \"\\n 5-User statistics of root...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --user-info -u root --csv > report.out || error \"performing User statistics (--user)\"\n    typeValues=\"root.*dir;root.*file;root.*symlink\"\n    countValues=\"$((2+extra_dir));5;3\"\n\tcolSearch=3\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating FS User statistics (--user)\"\n\n\t# launch another scan ..........................\n\techo -e \"\\n 6-Group statistics of testgroup...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --group-info -g testgroup --csv > report.out || error \"performing Group statistics (--group)\"\n\ttypeValues=\"testgroup.*dir;testgroup.*file;testgroup.*symlink\"\n\tcountValues=\"6;3;2\"\n\tcolSearch=3\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating Group statistics (--group)\"\n\n    # launch another scan ..........................\n    echo -e \"\\n 6b-Group statistics of testgroup per user...\"\n    $REPORT -f $RBH_CFG_DIR/$config_file -g testgroup --csv -S > report.out || error \"performing Group by-user statistics (-g -S)\"\n    typeValues=\"testgroup.*root.*file;testgroup.*testuser.*dir;testgroup.*testuser.*file;testgroup.*testuser.*symlink\"\n    countValues=\"2;6;1;2\"\n    colSearch=4\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    find_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validation Group by-user statistics (-g -S)\"\n\n\t# launch another scan ..........................\n\techo -e \"\\n 7-Four largest files of Filesystem...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --top-size=4 --csv > report.out || error \"performing Largest files list (--top-size)\"\n\ttypeValues=\"file\\.2;file\\.6;file\\.5;file\\.3\"\n\tcountValues=\"1;2;3;4\"\n\tcolSearch=1\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating Largest files list (--top-size)\"\n\n\techo -e \"\\n 8-Largest directories of Filesystem...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --top-dirs=3 --csv > report.out || error \"performing Largest folders list (--top-dirs)\"\n\t# 2 possible orders\n\ttypeValues=\"$RH_ROOT/dir1;$RH_ROOT/dir5;$RH_ROOT,\"\n\ttypeValuesAlt=\"$RH_ROOT/dir1;$RH_ROOT,;$RH_ROOT/dir5\"\n\tcountValues=\"1;2;3\"\n\tcolSearch=1\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || \\\n\tfind_allValuesinCSVreport $logFile $typeValuesAlt $countValues $colSearch || \\\n\terror \"validating Largest folders list (--top-dirs)\"\n\n\n\t# /!\\ scan/backup modifies files and symlink atime!\n\techo -e \"\\n 9-Four oldest purgeable entries of Filesystem...\"\n    echo \"FIXME: test is disturbed by file and symlink reading\"\n    if (( 0 )); then\n        if (( $is_hsmlite + $is_lhsm != 0 )); then\n        $RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG -L rh_migr.log  --once || error \"performing migration\"\n        $REPORT -f $RBH_CFG_DIR/$config_file --oldest-files=4 --csv > report.out || error \"performing Oldest entries list (--oldest-files)\"\n        typeValues=\"link\\.3;link\\.1;link\\.2;file\\.1\"\n        countValues=\"1;2;3;4\"\n        else\n        $REPORT -f $RBH_CFG_DIR/$config_file --oldest-files=4 --csv > report.out || error \"performing Oldest entries list (--oldest-files)\"\n        typeValues=\"file\\.3;file\\.4;file\\.5;link\\.3\"\n        countValues=\"1;2;3;4\"\n        fi\n        colSearch=1\n        find_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating Oldest entries list (--oldest-files)\"\n    fi\n\n    echo -e \"\\n 10-Oldest empty directories of Filesystem...\"\n    $REPORT -f $RBH_CFG_DIR/$config_file --oldest-empty-dirs --csv > report.out || error \"performing oldest empty folders list (--oldest-empty-dirs)\"\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n    nb_dir3=`grep \"dir3\" $logFile | wc -l`\n    if (( nb_dir3==0 )); then\n        error \"validating Oldest and empty folders list (--oldest-empty-dirs) : dir3 not found\"\n    fi\n    nb_dir4=`grep \"dir4\" $logFile | wc -l`\n    if (( nb_dir4==0 )); then\n        error \"validating Oldest and empty folders list (--oldest-empty-dirs) : dir4 not found\"\n    fi\n    nb_dir6=`grep \"dir6\" $logFile | wc -l`\n    if (( nb_dir6==0 )); then\n        error \"validating Oldest and empty folders list (--oldest-empty-dirs) : dir6 not found\"\n    fi\n    nb_dir7=`grep \"dir7\" $logFile | wc -l`\n    if (( nb_dir7==0 )); then\n        error \"validating Oldest and empty folders list (--oldest-empty-dirs) : dir7 not found\"\n    fi\n\n\t# launch another scan ..........................\n\techo -e \"\\n 11-Top disk space consumers of Filesystem...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --top-users --csv > report.out || error \"performing disk space consumers (--top-users)\"\n\ttypeValues=\"testuser;root\"\n\tcountValues=\"1;2\"\n\tcolSearch=1\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating disk space consumers (--top-users)\"\n\n\t# launch another scan ..........................\n\techo -e \"\\n 12-Dump entries for one user of Filesystem...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-user root --csv > report.out || error \"dumping entries for one user 'root'(--dump-user)\"\n\ttypeValues=\"root.*[root|testgroup].*dir1$;root.*[root|testgroup].*file\\.1;root.*[root|testgroup].*file\\.3;root.*[root|testgroup].*dir4$;\"\n\tcountValues=\"dir;file;file;dir\"\n\tcolSearch=1\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating entries for one user 'root'(--dump-user)\"\n\ttypeValues=\"root.*[root|testgroup].*file\\.4;root.*[root|testgroup].*file\\.5;root.*[root|testgroup].*file\\.6;root.*[root|testgroup].*link\\.3;\"\n\tcountValues=\"file;file;file;symlink\"\n\tcolSearch=1\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating entries for one user 'root'(--dump-user)\"\n\ttypeValue=\"root.*[root|testgroup]\"\n\tif (( $(grep $typeValue $logFile | wc -l) != 10 + extra_dir )) ; then\n\t\t error \"validating entries for one user 'root'(--dump-user)\"\n\tfi\n\t# launch another scan ..........................\n\techo -e \"\\n 13-Dump entries for one group of Filesystem...\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump-group testgroup --csv > report.out || error \"dumping entries for one group 'testgroup'(--dump-group)\"\n\t#$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log  --once || error \"performing FS scan\"\n\ttypeValues=\"testgroup.*link\\.1;testgroup.*file\\.1;testgroup.*file\\.2;testgroup.*link\\.2;testgroup.*file\\.6\"\n\tcountValues=\"symlink;file;file;symlink;file\"\n\tcolSearch=1\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating Group entries for one group 'testgroup'(--dump-group)\"\n\ttypeValues=\"testgroup.*dir2$;testgroup.*dir3$;testgroup.*dir5$;testgroup.*dir6$;testgroup.*dir7$\"\n\tcountValues=\"dir;dir;dir;dir;dir\"\n\tcolSearch=1\n\tfind_allValuesinCSVreport $logFile $typeValues $countValues $colSearch || error \"validating Group entries for one group 'testgroup'(--dump-group)\"\n\ttypeValue=\"testgroup\"\n\tif (( $(grep $typeValue $logFile | wc -l) != 11 )) ; then\n\t\t error \"validating Group entries for one group 'testgroup'(--dump-group)\"\n\tfi\n}\n\nfunction find_allValuesinCSVreport\n{\n    # The research is based on file CSV format generated by the report Robinhood method (--csv):\n    # one line per information; informations separeted by ','\n    # Search in the file logFile the given series (typeValue & countValue) in the column\n    # colSearch.\n    # return 0 if all is found, 0 otherwise\n    # \tfind_valueInCSVreport $logFile $typeValues $countValues $colSearch\n    # logFile = name of file to scan\n    # typeValues = list of words to extract the line. Each word must be separeted by ';'\n    # countValues = list of associated values (to typeValues) in the extracted line. Each word must be separeted by ';'\n    # colSearch =  column index to find the countValues (each column is separated by ',' in the file)\n\n    # get input parameters\n    logFile=$1\n    typeValues=$2\n    countValues=$3\n    colSearch=$4\n\n    # get typeValue and associated countvalue\n    splitTypes=$(echo $typeValues | tr \";\" \"\\n\")\n    tabTypes=\"\"\n    j=1\n    for entry in $splitTypes\n       do\n       \ttabTypes[$j]=$entry\n\t    j=$(($j+1))\n    done\n    iDataMax=$j\n\n    splitValues=$(echo $countValues | tr \";\" \"\\n\")\n    tabValues=\"\"\n    j=1\n    for entry in $splitValues\n       do\n       \ttabValues[$j]=$entry\n\t    j=$(($j+1))\n    done\n    if [ ${#tabValues[*]} != ${#tabTypes[*]} ]; then\n\t    echo \"Error: The given conditions have different length!!\"\n\t    return 1\n    fi\n    # treatement for each typeValue & countvalue\n    iData=1\n    #iDataMax=${#tabValues[*]}\n    #echo \"... length of conditions = $iDataMax\"\n    while (( $iData < $iDataMax ))\n    do\n\t    # get current typeValue & countvalue\n\t    typeValue=${tabTypes[$iData]}\n\t    countValue=${tabValues[$iData]}\n\n\t    find_valueInCSVreport $logFile \"$typeValue\" \"$countValue\" $colSearch\n\t    res=$?\n\t    if (( $res == 1 )); then\n\t\t    #error \"Test for $alertKey failed\"\n\t\t    iData=$iDataMax\n\t\t    return 1\n\t    fi\n\t    # go to next serie\n\t    iData=$(($iData+1))\n    done\n}\n\nfunction find_valueInCSVreport\n{\n    # The research is based on file CSV format generated by the report Robinhood method (--csv):\n    # one line per information; informations separated by ','\n    # Search in the same line the given words typeValue & countValue in the column\n    # colSearch in the file logFile.\n    # return 0 if all is found, 0 otherwise\n    # \tfind_valueInCSVreport $logFile $typeValues $countValues $colSearch\n    # logFile = name of file to scan\n    # typeValue = word to extract the line\n    # countValue = associated value to typeValue in the extracted line\n    # colSearch =  column index to find the countValue (each column is separated by ',')\n\n    # get input parameters\n    logFile=\"$1\"\n    typeValue=\"$2\"\n    countValue=\"$3\"\n    colSearch=\"$4\"\n    #echo '-------------------------------------'\n    #more $logFile\n    #echo \"colSearch=$colSearch\"\n    #echo '-------------------------------------'\n    # find line contains expected value type\n    line=$(grep \"$typeValue\" $logFile)\n    #echo $line\n    if (( ${#line} == 0 )); then\n\t    [ \"$DEBUG\" = \"1\" ] && echo \"=====> NOT found for $typeValue\" >&2\n\t    return 1\n    fi\n\n    # get found value count for this value type\n    foundCount=$(grep \"$typeValue\" $logFile | cut -d ',' -f $colSearch | tr -d ' ')\n    #echo \"foundCount=$foundCount**\"\n    if [[ \"$foundCount\" != \"$countValue\" ]]; then\n\t    [ \"$DEBUG\" = \"1\" ] && echo \"=====> NOT found for $typeValue : $countValue != $foundCount\" >&2\n\t    return 1\n    else\n\t    [ \"$DEBUG\" = \"1\" ] && echo \"=====> found for $typeValue (col $colSearch): $countValue \" >&2\n\t    return 0\n    fi\n}\n\nfunction report_generation2\n{\n\t# report many statistics for OST fileSystem\n\t# \treport_generation_2\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No report generation for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"1-Create Pools ...\"\n\tcreate_pools\n\n\techo \"2-Create Files ...\"\n    for i in `seq 1 2`; do\n\t\t$LFS setstripe  -p lustre.$POOL1 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n    for i in `seq 3 4`; do\n\t\t$LFS setstripe  -p lustre.$POOL2 $RH_ROOT/file.$i -c 1 >/dev/null 2>/dev/null\n\tdone\n\n\tsleep 1\n\t$RH -f $RBH_CFG_DIR/common.conf --scan -l DEBUG -L rh_scan.log --once\n\n\n\techo \"Generate report...\"\n\t$REPORT -f $RBH_CFG_DIR/common.conf --dump-ost 1 >> report.out\n\n\tnbError=0\n\tnb_report=`grep \"$RH_ROOT/file.\" report.out | wc -l`\n\tif (( $nb_report != 2 )); then\n\t    error \"********** TEST FAILED (Log): $nb_report files purged, but 2 expected\"\n        ((nbError++))\n\tfi\n\n\tnb_report=`grep \"$RH_ROOT/file.3\" report.out | wc -l`\n\tif (( $nb_report != 1 )); then\n\t    error \"********** TEST FAILED (Log): No report for file.3\"\n        ((nbError++))\n\tfi\n\n\tnb_report=`grep \"$RH_ROOT/file.4\" report.out | wc -l`\n\tif (( $nb_report != 1 )); then\n\t    error \"********** TEST FAILED (Log): No report for file.4\"\n        ((nbError++))\n\tfi\n\n\tif (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\n###################################################################\n############### End report generation Functions ###################\n###################################################################\n\n#######################################################\n############### Changelog functions ###################\n#######################################################\n\nfunction test_changelog\n{\n    config_file=$1\n\n    clean_logs\n\n\tif (( $no_log )); then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n    # create a single file and do several operations on it\n    # This will generate a CREATE+CLOSE+CLOSE+SATTR records\n    echo \"1. Creating initial objects...\"\n    touch $RH_ROOT/file.1 || error \"touch file.1\"\n\ttouch $RH_ROOT/file.1 || error \"touch file.1\"\n\tchmod +x $RH_ROOT/file.1 || error \"chmod file.1\"\n\n    # Reading changelogs\n    echo \"2. Scanning ...\"\n   \t$RH -f $RBH_CFG_DIR/$config_file --readlog --once -l FULL -L rh_scan.log || error \"reading changelog\"\n\tgrep ChangeLog rh_scan.log\n\n    # check that the MARK, CLOSE and SATTR have been ignored, but\n    # CREAT was processed. Some versions of Lustre (2.1) do not issue\n    # a close, so we check whether all the close seen have been ignored.\n    echo \"3. Checking ignored records...\"\n    ignore_mark=$(grep -E \"Ignoring event MARK\" rh_scan.log | wc -l)\n    seen_mark=$(grep -E \"ChangeLog.*00MARK\" rh_scan.log | grep -v \"Incoming record\" | wc -l)\n    ignore_creat=$(grep -E \"Ignoring event CREAT\" rh_scan.log | wc -l)\n    seen_close=$(grep -E \"ChangeLog.*11CLOSE\" rh_scan.log | grep -v \"Incoming record\" | wc -l)\n    ignore_close=$(grep -E \"Ignoring event CLOSE\" rh_scan.log | grep -v \"Incoming record\" | wc -l)\n    ignore_sattr=$(grep -E \"Ignoring event SATTR\" rh_scan.log | grep -v \"Incoming record\" | wc -l)\n\n    (( $seen_mark == $ignore_mark ))  || error \"MARK record not ignored\"\n    (( $ignore_creat == 0 )) || error \"CREATE record ignored\"\n    (( $seen_close == $ignore_close )) || error \"CLOSE record not ignored\"\n    (( $ignore_sattr == 1 )) || error \"SATTR record not ignored\"\n}\n\nfunction test_changelog_cancel\n{\n    config_file=$1\n\n    clean_logs\n    :>/tmp/cl.dump\n\n    if (( $no_log )); then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n    echo \"1. Create test cases...\"\n    # create multiple batchable operations (interleaved)\n    mkdir $RH_ROOT/dir.1\n    touch $RH_ROOT/file.1\n    touch $RH_ROOT/file.2\n    truncate -s 1024 $RH_ROOT/file.2\n    rm -f $RH_ROOT/file.2\n    rm -f $RH_ROOT/file.1\n    mknod $RH_ROOT/node p\n    rm -f $RH_ROOT/node\n    touch $RH_ROOT/dir.1/subfile\n    rm -f $RH_ROOT/dir.1/subfile\n    rmdir $RH_ROOT/dir.1\n\n    # these ones cannot be batched as there is a record in between\n    touch $RH_ROOT/file.3\n    mv $RH_ROOT/file.3 $RH_ROOT/file.4\n    rm -f $RH_ROOT/file.4\n    touch $RH_ROOT/file.5\n    ln $RH_ROOT/file.5 $RH_ROOT/file.6\n    rm -f $RH_ROOT/file.5 $RH_ROOT/file.6\n    if (( $is_lhsm != 0 )); then\n        echo 123 > $RH_ROOT/file.7\n        $LFS hsm_archive $RH_ROOT/file.7\n        wait_hsm_state $RH_ROOT/file.7 0x00000009\n        rm -f $RH_ROOT/file.7\n    fi\n\n    echo \"2. Read changelogs ...\"\n    $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l DEBUG -L rh_scan.log || error \"reading changelog\"\n    grep EntryProc rh_scan.log | grep RECORD > /tmp/cl_pushed\n\n    echo \"3. Checking cancelled records...\"\n    # no record about file1, file2, subfile, dir1 should have been pushed\n    grep file.1 /tmp/cl_pushed && error \"No record about file.1 should have been pushed\"\n    grep file.2 /tmp/cl_pushed && error \"No record about file.2 should have been pushed\"\n    grep subfile /tmp/cl_pushed && error \"No record about subfile should have been pushed\"\n    grep dir.1 /tmp/cl_pushed && error \"No record about dir.1 should have been pushed\"\n    grep node /tmp/cl_pushed && error \"No record about node should have been pushed\"\n\n    # 4 create/rm peers should have been cancelled\n    cancelled=$(grep \"dropped log peer\" /tmp/cl.dump | wc -l)\n    (( $cancelled == 5 )) || error \"5 changelog peers should have been cancelled, $cancelled found\"\n\n    # records about other entries should have been pushed\n    grep -q file.3 /tmp/cl_pushed || error \"Record about file.3 should have been pushed\"\n    grep -q file.4 /tmp/cl_pushed || error \"Record about file.4 should have been pushed\"\n    grep -q file.5 /tmp/cl_pushed || error \"Record about file.5 should have been pushed\"\n    grep -q file.6 /tmp/cl_pushed || error \"Record about file.6 should have been pushed\"\n    if (( $is_lhsm != 0 )); then\n    \tgrep -q file.7 /tmp/cl_pushed || error \"Record about file.7 should have been pushed\"\n    fi\n\n    rm -f /tmp/cl_pushed /tmp/cl.dump\n}\n\n\n# wait for changelog_clear until a given timeout\n# return 0 if change_clear occurs before the timeout\n# return 1 else\nfunction wait_changelog_clear\n{\n    local log=$1\n    local timeout=$2\n    local i=0\n\n    # changelog_clear indicate the changelog processing is done\n    while [ $i -lt $timeout ]; do\n        grep llapi_changelog_clear $log && return 0\n        sleep 1\n        ((i++))\n    done\n    # timeout\n    return 1\n}\n\nfunction test_commit_update\n{\n    local config_file=$1\n\n    clean_logs\n\n    if (( $no_log )); then\n            echo \"Changelogs not supported on this config: skipped\"\n            set_skipped\n            return 1\n    fi\n\n    # fill the changelog with 15 records\n    # as the max_delta is 5, we should have about 3 updates\n    echo \"1. Creating initial objects...\"\n    mkdir $RH_ROOT/dir.{1..15}\n\n    # count changelogs\n    local nb_log=$($LFS changelog lustre-MDT0000 | wc -l)\n    echo \"$nb_log pending changelogs\"\n\n    # extra dirs depending on the mode\n    ((nb_log+=$extra_dir))\n\n    # read the log and check last commit is updated every n records\n    echo \"2. Reading changelogs...\"\n        $RH -f $RBH_CFG_DIR/$config_file --readlog --once -l FULL \\\n            -L rh_chglogs.log 2>/dev/null || error \"reading changelog\"\n\n    # count the number of updates of last commit\n    local commit_count=$(grep CL_LastCommit rh_chglogs.log | \\\n                         grep \"INSERT INTO\" | wc -l)\n\n    # expected: nb change_log/5 (+ 1)\n    ((expect=$nb_log/5+1))\n    if (($commit_count != $expect)) && (($commit_count != $expect + 1)); then\n        error \"Unexpected count of commit id update in DB ($commit_count vs. $expect (+1))\"\n    else\n        echo \"OK: commit id updated $commit_count times\"\n    fi\n\n    :>rh_chglogs.log\n    # now start in daemon mode (queue 1 changelog to init the last commit time)\n    mkdir $RH_ROOT/dir.16\n    $RH -f $RBH_CFG_DIR/$config_file --readlog -l FULL -L rh_chglogs.log \\\n        -p rh.pid -d 2>/dev/null\n\n    # changelog_clear indicate the changelog processing is done\n    wait_changelog_clear rh_chglogs.log 5 ||\n        error \"No changelog_clear after 5s\"\n\n    :>rh_chglogs.log\n    # wait for the timeout delay and check the commit id is updated\n    # when a new changelog is read\n    sleep 3\n    touch $RH_ROOT/dir.17\n\n    wait_changelog_clear rh_chglogs.log 10 ||\n        error \"No changelog_clear after 10s\"\n\n    # 1 update expected\n    commit_count=$(grep CL_LastCommit_ rh_chglogs.log | \\\n                   grep \"INSERT INTO\" | wc -l)\n\n    if (($commit_count != 1)); then\n        error \"Unexpected count of commit id update in DB ($commit_count vs. 1)\"\n    else\n        echo \"OK: commit id updated\"\n    fi\n\n    kill_from_pidfile\n}\n\nfunction test_path_gc1\n{\n    local cfg=$RBH_CFG_DIR/$1\n\n    if [ -n \"$POSIX_MODE\" ]; then\n\t\techo \"Cannot fully determine id for POSIX\"\n\t\tset_skipped\n\t\treturn 1\n    fi\n\n    mkdir $RH_ROOT/dir.1\n    mkdir $RH_ROOT/dir.2\n    touch $RH_ROOT/dir.1/file.1\n\n    local fid=$(get_id $RH_ROOT/dir.1/file.1)\n\n    # make robinhood discover this file\n    $RH -f $cfg --scan --once -l DEBUG -L rh_scan.log 2>/dev/null ||\n        error \"scanning\"\n    check_db_error rh_scan.log\n\n    # entry path is known\n    $REPORT -f $cfg -e $fid --csv | grep \"^path,\" ||\n        error \"unknown path for $fid\"\n\n    # create a hardlink of it and run a partial scan\n    ln $RH_ROOT/dir.1/file.1 $RH_ROOT/dir.2/file.1 || error \"hardlink failed\"\n    $RH -f $cfg --scan=$RH_ROOT/dir.2 --once --no-gc -l DEBUG -L rh_scan.log \\\n        2>/dev/null || error \"scanning\"\n    check_db_error rh_scan.log\n\n    # the following request did fails without LIMIT 1\"\n    mysql $RH_DB -e \"select one_path(id) from  NAMES;\" ||\n        error \"one_path function fails\"\n\n    # query the DB to get all known entry paths\n    local paths=($(mysql $RH_DB -Bse \"SELECT this_path(parent_id,name) FROM NAMES WHERE id='$fid'\" | sort))\n\n    [[ ${paths[0]} == *\"dir.1/file.1\" ]] || error \"Missing path dir.1/file.1, found: ${paths[0]}\"\n    [[ ${paths[1]} == *\"dir.2/file.1\" ]] || error \"Missing path dir.2/file.1, found: ${paths[1]}\"\n\n    # partial GC is based on the timestamp of last path update\n    # this ensures the GC is not done the same second as entry discovery\n    # during first scan\n    sleep 1\n\n    # remove the first path and run a partial scan with GC\n    rm -f $RH_ROOT/dir.1/file.1\n    $RH -f $cfg --scan=$RH_ROOT/dir.1 --once -l DEBUG -L rh_scan.log \\\n        2>/dev/null || error \"scanning\"\n    check_db_error rh_scan.log\n\n    # a single path is expected now\n    local cnt=$(mysql $RH_DB -Bse \"SELECT count(*) FROM NAMES WHERE id='$fid'\")\n    [[ $cnt == 1 ]] || error \"unexpected path count: $cnt\"\n\n    # check this path\n    local path=$($REPORT -f $cfg -e $fid --csv | grep \"^path,\" | cut -d ',' -f 2 | tr -d \" \")\n    [[ $path == \"$RH_ROOT/dir.2/file.1\" ]] || error \"invalid remaining path $path\"\n}\n\nfunction test_path_gc2\n{\n    local cfg=$RBH_CFG_DIR/$1\n\n    mkdir $RH_ROOT/dir.1\n    touch $RH_ROOT/dir.1/file.1\n    touch $RH_ROOT/dir.1/file.2\n\n    # make robinhood discover this file\n    $RH -f $cfg --scan --once -l DEBUG -L rh_scan.log 2>/dev/null ||\n        error \"scanning\"\n    check_db_error rh_scan.log\n\n    # remove one file and rename directory\n    rm $RH_ROOT/dir.1/file.2\n    mv $RH_ROOT/dir.1 $RH_ROOT/dir.2\n\n    # make sure the moved entry is eligible for GC (path update < scan time)\n    sleep 1\n\n    # GC fails if stored function doesn't support multiple paths\n    $RH -f $cfg --scan --once -l DEBUG -L rh_scan.log 2>/dev/null ||\n            error \"scanning\"\n    check_db_error rh_scan.log\n}\n\nfunction test_scan_only\n{\n    local cfg=$RBH_CFG_DIR/$1\n\n    if [ -n \"$POSIX_MODE\" ]; then\n\t\techo \"Cannot fully determine id for POSIX\"\n\t\tset_skipped\n\t\treturn 1\n    fi\n\n    # filesystem contains multiple directories,\n    # but the config restricts the scan only to some of them\n\n    mkdir -p $RH_ROOT/dir.1/dir.{1..3}\n    mkdir -p $RH_ROOT/dir.2/dir.{1..3}\n    mkdir -p $RH_ROOT/dir.3/dir.{1..3}\n    touch  $RH_ROOT/dir.{1..3}/dir.{1..3}/file.{1..5}\n\n    export SCAN_ONLY1=$RH_ROOT/dir.1/dir.2\n    export SCAN_ONLY2=$RH_ROOT/dir.3\n\n    local SCAN_SET=($(find $SCAN_ONLY1) $(find $SCAN_ONLY2))\n\n    # scan all (initial scan)\n    $RH -f $cfg --scan --once -l DEBUG -L rh_scan.log 2>/dev/null ||\n        error \"scanning\"\n    check_db_error rh_scan.log\n\n    $REPORT -q -f $cfg --dump > rh_report.log\n    # check there are all expected entry from $SCAN_ONLY1 and $SCAN_ONLY2\n    for f in ${SCAN_SET[*]}; do\n        grep -q -e \" $f\\$\" rh_report.log || error \"Missing $f in robinhood DB\"\n    done\n    # only directories between root and scanned subdirs are also queried\n    # i.e. $RH_ROOT/dir.1\n    dir=$(grep -v \"$SCAN_ONLY1\" rh_report.log | grep -v \"$SCAN_ONLY2\" \\\n                | awk '{print $(NF)}')\n    [[ \"$dir\" == \"$RH_ROOT/dir.1\" ]] || error \"unexpected entries in dump: $dir\"\n\n    # GC needs 1s delay with previous scan\n    sleep 1\n    # scan again (causes GC)\n    $RH -f $cfg --scan --once -l DEBUG -L rh_scan.log 2>/dev/null ||\n        error \"scanning\"\n    check_db_error rh_scan.log\n\n    $REPORT -q -f $cfg --dump > rh_report.log\n    # check there are all expected entry from $SCAN_ONLY1 and $SCAN_ONLY2\n    for f in ${SCAN_SET[*]}; do\n        grep -q -e \" $f\\$\" rh_report.log || error \"Missing $f in robinhood DB\"\n    done\n    # only directories between root and scanned subdirs are also queried\n    dir=$(grep -v \"$SCAN_ONLY1\" rh_report.log | grep -v \"$SCAN_ONLY2\" \\\n                | awk '{print $(NF)}')\n    [[ \"$dir\" == \"$RH_ROOT/dir.1\" ]] || error \"unexpected entries in dump: $dir\"\n\n}\n\n\n###########################################################\n############### End changelog functions ###################\n###########################################################\n\n##############################################################\n############### Other Parameters Functions ###################\n##############################################################\n\nfunction TEST_OTHER_PARAMETERS_1\n{\n\t# Test for many parameters\n\t# \tTEST_OTHER_PARAMETERS_1 config_file\n\t#=>\n\t# config_file == config file name\n\n\tconfig_file=$1\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n    for i in `seq 1 10` ; do\n    \tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1K count=1 >/dev/null 2>/dev/null || error \"writing file.$i\"\n\t    setfattr -n user.foo -v $i $RH_ROOT/file.$i\n\tdone\n\n\techo \"Scan Filesystem\"\n\tsleep 1\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once\n\n\techo \"Report : --dump --filter-class test_purge\"\n\t$REPORT -f $RBH_CFG_DIR/$config_file --dump --filter-class test_purge > report.out\n\n    [ \"$DEBUG\" = \"1\" ] && cat report.out\n\tnbError=0\n    # match classes is \"no\"\n\tnb_entries=`grep \"0 entries\" report.out | wc -l`\n\tif (( $nb_entries != 1 )); then\n\t    error \"********** TEST FAILED (Log): not found line \\\" $nb_entries \\\" \"\n        ((nbError++))\n\tfi\n\n\n\t# use robinhood for flushing\n\tif (( ($is_hsmlite == 0 && $is_lhsm == 1 && $shook == 0) || ($is_hsmlite == 1 && $is_lhsm == 0 && $shook == 1) )); then\n\t\techo \"Archiving files\"\n\t\t$RH -f $RBH_CFG_DIR/$config_file $SYNC_OPT -l DEBUG  -L rh_migr.log || error \"executing Archiving files\"\n\tfi\n\n\tif (( $is_hsmlite == 0 || $shook != 0 || $is_lhsm != 0 )); then\n\t    echo \"Reading changelogs and Applying purge policy...\"\n\t    $RH -f $RBH_CFG_DIR/$config_file --scan --run=purge -l DEBUG -L rh_purge.log --once  &\n\n\t    sleep 1\n\n\t    echo \"wait robinhood\"\n\t    wait\n\n\t    nb_purge=`grep \"$REL_STR\" rh_purge.log | wc -l`\n\t    if (( $nb_purge != 10 )); then\n\t        error \"********** TEST FAILED (Log): $nb_purge files purged, but 10 expected\"\n            ((nbError++))\n\t    fi\n    else #backup mod\n\t    echo \"Launch Migration in background\"\n\t    $RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=all -l DEBUG -L rh_migr.log --once &\n\n\t    sleep 1\n\n\t    echo \"wait robinhood\"\n\t    wait\n\n        count=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n        if (($count != 10)); then\n            error \"********** TEST FAILED (File System): $count files migrated, but 10 expected\"\n            ((nbError++))\n        fi\n    fi\n\n\tif (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n}\n\nfunction get_nb_stat\n{\n    grep \"STATS\" $1 | grep \"Dumping stats at\" | wc -l\n}\n\nfunction TEST_OTHER_PARAMETERS_2\n{\n    # Test for many parameters\n    #     TEST_OTHER_PARAMETERS_2 config_file\n    #=>\n    # config_file == config file name\n\n    config_file=$1\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n        echo \"No TEST_OTHER_PARAMETERS_2 for this purpose: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    echo \"Create Files ...\"\n    for i in `seq 1 5` ; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=10M count=1 >/dev/null \\\n            2>/dev/null || error \"writing file.$i\"\n    done\n    for i in `seq 6 10` ; do\n        touch $RH_ROOT/file.$i\n    done\n\n    sleep 1\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once \\\n        2>/dev/null\n\n    echo \"Migrate files\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration -l DEBUG -L rh_migr.log \\\n        2>/dev/null &\n    pid=$!\n\n    t0=$(date +%s.%N)\n    if (( $is_lhsm > 0 )); then\n        sleep 2\n        wait_done 60\n    else\n        sleep 5\n    fi\n\n    nbError=0\n    count=`find $BKROOT -type f -not -name \"*.lov\" | wc -l`\n    if (( $count != 10 )); then\n        error \"********** TEST FAILED (File System): $count files migrated,\"\\\n              \"but 10 expected\"\n        ((nbError++))\n    fi\n\n    # Migration dans fs\n    countMigrLog=`grep \"$ARCH_STR\" rh_migr.log | wc -l`\n    if (( $countMigrLog != 10 )); then\n        error \"********** TEST FAILED (Log): $countMigrLog files migrated,\"\\\n              \"but 10 expected\"\n        ((nbError++))\n    fi\n    t1=$(date +%s.%N)\n\n    # count the number of \"STATS\" dump\n    nb_Stats=$(get_nb_stat rh_migr.log)\n\n    local stime=$(echo \"5.5-($t1-$t0)\"| bc -l)\n    echo \"Sleep $stime seconds\"\n    sleep $stime\n\n    # count the number of \"STATS\" dump\n    nb_Stats2=$(get_nb_stat rh_migr.log)\n    if (( $nb_Stats2 != $nb_Stats + 1 )); then\n        error \"********** TEST FAILED (Stats): $nb_Stats2 \\\"STATS\\\" detected,\"\\\n              \"but $nb_Stats + 1 \\\"STATS\\\" expected\"\n        ((nbError++))\n    fi\n\n    count=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n    if (( $count != 10 )); then\n        error \"********** TEST FAILED (File System): $count files migrated, \"\\\n              \"but 10 expected\"\n        ((nbError++))\n    fi\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n\n    kill -9 $pid\n}\n\nfunction TEST_OTHER_PARAMETERS_3\n{\n\t# Test for many parameters\n\t# \tTEST_OTHER_PARAMETERS_3 config_file\n\t#=>\n\t# config_file == config file name\n\n\tconfig_file=$1\n\n    if (( ($is_hsmlite == 0) && ($is_lhsm == 0) )); then\n\t\techo \"No TEST_OTHER_PARAMETERS_3 for this purpose: skipped\"\n\t\tset_skipped\n\t\treturn 1\n\tfi\n\n\tclean_logs\n\n\techo \"Create Files ...\"\n    for i in `seq 1 5` ; do\n    \tdd if=/dev/zero of=$RH_ROOT/file.$i bs=1K count=1 >/dev/null \\\n            2>/dev/null || error \"writing file.$i\"\n\tdone\n\n\techo \"Archives files\"\n\t$RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=all \\\n        --once -l DEBUG -L rh_migr.log 2>/dev/null\n    (( $is_lhsm > 0 )) && wait_done 60\n\n\tnbError=0\n\tcount=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n    if (( $count != 5 )); then\n        error \"********** TEST FAILED (File System): $count files migrated,\"\\\n              \"but 5 expected\"\n        ((nbError++))\n    fi\n\n    local rmd=()\n    for i in `seq 1 5` ; do\n        local f=$RH_ROOT/file.$i\n        (( $is_lhsm > 0 )) && f=$($LFS path2fid $f | tr -d '[]')\n    \trm -f $RH_ROOT/file.$i && rmd+=($f)\n\tdone\n\n\t$RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log \\\n        --once 2>/dev/null\n\n    # wait rm_time + 1\n\techo \"sleep 6 seconds\"\n\tsleep 6\n\n\techo \"HSM Remove\"\n\t$RH -f $RBH_CFG_DIR/$config_file --run=hsm_remove -l DEBUG \\\n        -L rh_purge.log 2>/dev/null &\n\tpid=$!\n\n    # make sure hsm remove pass finished\n\tsleep 2\n\n\tnb_Remove=`grep \"$HSMRM_STR\" rh_purge.log | wc -l`\n\tif (( $nb_Remove != 4 )); then\n        error \"********** TEST FAILED (LOG): $nb_Remove remove detected,\"\\\n              \"but 4 expected\"\n        ((nbError++))\n    fi\n\n    # 1 file out of 5 must remain in the backend\n    local countRemainFile=0\n\tfor i in ${rmd[*]}; do\n        bi=$(basename $i)\n        if (( $is_lhsm > 0 )); then # <fid>\n            [ \"$DEBUG\" = \"1\" ] && find $BKROOT -type f -name \"${bi}\"\n\t        local found=`find $BKROOT -type f -name \"${bi}\" | wc -l`\n        else # <name>__<fid>\n            [ \"$DEBUG\" = \"1\" ] && find $BKROOT -type f -name \"${bi}__*\"\n\t        local found=`find $BKROOT -type f -name \"${bi}__*\" | wc -l`\n        fi\n        (( $found != 0 )) && echo \"$i remains in backend\"\n        ((countRemainFile+=$found))\n\tdone\n    if (($countRemainFile != 1)); then\n        error \"********** TEST FAILED (File System): Wrong count of \"\\\n              \"remaining files: $countRemainFile (1 expected)\"\n        ((nbError++))\n    fi\n\n    # wait check_interval +1\n\techo \"sleep 11 seconds\"\n\tsleep 11\n\n\tnb_Remove=`grep \"$HSMRM_STR\" rh_purge.log | wc -l`\n\tif (( $nb_Remove != 5 )); then\n        error \"********** TEST FAILED (LOG): $nb_Remove remove detected,\"\\\n              \"but 5 expected\"\n        ((nbError++))\n    fi\n\n    # no file must remain in the backend\n    countRemainFile=0\n\tfor i in ${rmd[*]}; do\n        bi=$(basename $i)\n        [ \"$DEBUG\" = \"1\" ] && find $BKROOT -type f -name \"${bi}__*\"\n\t    local found=`find $BKROOT -type f -name \"${bi}__*\" | wc -l`\n        (( $found != 0 )) && echo \"$i remains in backend\"\n        ((countRemainFile+=$found))\n\tdone\n    if (($countRemainFile != 0)); then\n        error \"********** TEST FAILED (File System): Wrong count of \"\\\n              \"remaining files: $countRemainFile (0 expected)\"\n        ((nbError++))\n    fi\n\n\tif (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n\n    kill -9 $pid\n}\n\nfunction dismount_backend\n{\n    # test initial condition: backend must not be mounted\n    umount $BKROOT || fuser -k $BKROOT\n    grep -q $BKROOT /etc/mtab &&\n\t    umount $BKROOT || umount -f $BKROOT\n}\n\nfunction TEST_OTHER_PARAMETERS_4\n{\n    # Test for many parameters\n    #     TEST_OTHER_PARAMETERS_4 config_file\n    #=>\n    # config_file == config file name\n\n    config_file=$1\n\n    if (( $is_hsmlite == 0 )); then\n        echo \"No TEST_OTHER_PARAMETERS_4 for this purpose: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n\n    # test initial condition: backend must not be mounted\n    dismount_backend\n\n    echo \"Create Files ...\"\n    for i in `seq 1 11` ; do\n        dd if=/dev/zero of=$RH_ROOT/file.$i bs=1M count=10 >/dev/null 2>/dev/null || error \"writing file.$i\"\n    done\n\n    echo \"Migrate files (must fail)\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --run=migration --target=all --once -l DEBUG -L rh_migr.log\n    (( $is_lhsm > 0 )) && wait_done 60\n\n    nbError=0\n    count=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n    if (( $count != 0 )); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 0 expected\"\n        ((nbError++))\n    elif grep \"Failed to initialize status manager $STATUS_MGR\" rh_migr.log > /dev/null; then\n        echo \"OK: backend not initialized\"\n    else\n        error \"Backend initialization SHOULD have FAILED\"\n    fi\n    :> rh_migr.log\n\n    ensure_init_backend || error \"Error initializing backend $BKROOT\"\n\n    echo \"Migrate files (once)\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan -l DEBUG -L rh_scan.log --once\n    $RH -f $RBH_CFG_DIR/$config_file --run=\"migration(target=all)\" -l DEBUG -L rh_migr.log\n\n    nbError=0\n    count=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n    if (( $count != 0 )); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 0 expected\"\n        ((nbError++))\n    fi\n\n    echo \"Migrate files (daemon)\"\n    $RH -f $RBH_CFG_DIR/$config_file --run=migration -l DEBUG -L rh_migr.log &\n    pid=$!\n\n    # wait for runtime_interval\n    echo \"sleep 11 seconds\"\n    sleep 11\n        (( $is_lhsm > 0 )) && wait_done 60\n\n    nbError=0\n    count=`find $BKROOT -type f  -not -name \"*.lov\" | wc -l`\n    if (( $count != 10 )); then\n        error \"********** TEST FAILED (File System): $count files migrated, but 10 expected\"\n        ((nbError++))\n    fi\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n\n    kill -9 $pid\n    rm -rf $BKROOT/*\n    dismount_backend\n}\n\nfunction assert_nb_scan\n{\n    local log=$1\n    local expect=$2\n\n    grep -E \"Starting scan|Full scan of\" $log >&2\n\n    local nb_scan=`grep \"Full scan of\" $log | wc -l`\n    if (( $nb_scan != $expect )); then\n        error \"********** TEST FAILED (LOG): $nb_scan scan detected,\"\\\n              \"but $expect expected\"\n        return 1\n    else\n        echo \"OK: $nb_scan scan started\"\n    fi\n    return 0\n}\n\nfunction get_scan_interval\n{\n    local log=$1\n    local pid=$2\n    local timeout=$3\n    local interv=\"\"\n\n    local t=0\n    while  [ -z \"$interv\" ]; do\n        # make robinhood dump current scan interval in its log\n        kill -USR1 $pid\n        sleep 1\n        interv=$(grep \"scan interval\" $log | grep STATS | awk '{print $(NF)}' |\n                 sed -e \"s/s$//\" | sed -e \"s/^0\\([^0]\\)/\\1/g\")\n\n        ((t++))\n        if (( $t > $timeout )); then\n            interv=\"TIMEOUT\"\n            break\n        fi\n    done\n    echo \"current scan interval: $interv sec\" >&2\n    echo $interv\n}\n\nfunction TEST_OTHER_PARAMETERS_5\n{\n    # Test for many parameters\n    #     TEST_OTHER_PARAMETERS_5 config_file\n    #=>\n    # config_file == config file name\n\n    config_file=$1\n\n    if (( ($shook + $is_lhsm) == 0 )); then\n        echo \"No TEST_OTHER_PARAMETERS_5 for this purpose: skipped\"\n        set_skipped\n        return 1\n    fi\n\n    clean_logs\n    # make sure the initial scan interval in based on empty FS\n    wait_stable_df\n\n    echo \"Launch scan in background...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --check-thresholds=purge -l DEBUG \\\n        -L rh_scan.log 2>/dev/null &\n    local pid=$!\n\n    # wait for scan to actually start\n    sleep 1\n\n    nbError=0\n    assert_nb_scan rh_scan.log 1 || ((nbError++))\n\n    # make robinhood dump current scan interval in its log\n    local interv=$(get_scan_interval rh_scan.log $pid 30)\n    ((interv++))\n\n    echo \"sleeping $interv seconds\"\n    sleep $interv\n\n    # terminate the process and flush its log\n    kill $pid\n    sleep 1\n    # check there was a second scan\n    assert_nb_scan rh_scan.log 2 || ((nbError++))\n\n    kill -9 $pid 2>/dev/null\n\n    # create files to fullfill the FS\n    echo \"Create files\"\n    elem=`$LFS df $RH_ROOT | grep \"filesystem summary\" | awk '{ print $6 }' | sed 's/%//'`\n    limit=50\n    indice=1\n    while (( $elem < $limit ))\n    do\n        dd if=/dev/zero of=$RH_ROOT/file.$indice bs=10M count=1 >/dev/null 2>/dev/null\n        if (( $? != 0 )); then\n            echo \"WARNING: fail writing file.$indice (usage $elem/$limit)\"\n            # give it a chance to end the loop\n            ((limit=$limit-1))\n        fi\n        unset elem\n        elem=`$LFS df $RH_ROOT | grep \"filesystem summary\" | awk '{ print $6 }' | sed 's/%//'`\n        ((indice++))\n    done\n\n    :> rh_scan.log\n\n    echo \"Launch scan in background...\"\n    $RH -f $RBH_CFG_DIR/$config_file --scan --check-thresholds=purge -l DEBUG \\\n        -L rh_scan.log 2>/dev/null &\n    pid=$!\n\n    sleep 2\n    local interv2=$(get_scan_interval rh_scan.log $pid 30)\n    ((interv2++))\n\n    # interv2 is expected to be smaller than interv\n    # as the FS is more full\n    (( $interv2 <= $interv )) || error \"2nd scan interval should be smaller\"\n\n    echo \"sleep $interv2 seconds\"\n    sleep $interv2\n\n    # should start 2 scans (1 initial + 1 after 3sec)\n    assert_nb_scan rh_scan.log 2 || ((nbError++))\n\n    if (($nbError == 0 )); then\n        echo \"OK: test successful\"\n    else\n        error \"********** TEST FAILED **********\"\n    fi\n\n    kill -9 $pid\n}\n\n##################################################################\n############### End Other Parameters Functions ###################\n##################################################################\n\n# clear summary\ncp /dev/null $SUMMARY\n\n#init xml report\nif (( $junit )); then\n\tjunit_init\n\ttinit=`date \"+%s.%N\"`\nfi\n\n######### TEST FAMILIES ########\n# 1xx - collecting info and database\n# 2xx - policy matching\n# 3xx - triggers\n# 4xx - reporting\n# 5xx - internals, misc.\n# 6xx - Tests by Sogeti\n################################\n\n##### info collect. + DB tests #####\n\nrun_test 100\ttest_info_collect info_collect.conf 1 1 \"escape string in SQL requests\"\nrun_test 101a    test_info_collect2  info_collect2.conf  1 \"scan x3\"\nrun_test 101b \ttest_info_collect2  info_collect2.conf\t2 \"readlog/scan x2\"\nrun_test 101c \ttest_info_collect2  info_collect2.conf\t3 \"readlog x2 / scan x2\"\nrun_test 101d \ttest_info_collect2  info_collect2.conf\t4 \"scan x2 / readlog x2\"\nrun_test 101e \ttest_info_collect2  info_collect2.conf\t5 \"diff+apply x2\"\nrun_test 102\tupdate_test test_updt.conf 3 14 \"db update policy\"\nrun_test 103a    test_acct_table common.conf 5 \"\" \"Acct table and triggers creation (default)\"\nrun_test 103b    test_acct_table acct.conf 5 \"yes\" \"Acct table and triggers creation (accounting ON)\"\nrun_test 103c    test_acct_table acct.conf 5 \"no\" \"Acct table and triggers creation (accounting OFF)\"\nrun_test 104     test_size_updt test_updt.conf 1 \"test size update\"\nrun_test 105     test_enoent test_pipeline.conf \"readlog with continuous create/unlink\"\nrun_test 106a    test_diff info_collect2.conf \"diff\" \"rbh-diff\"\nrun_test 106b    test_diff info_collect2.conf \"diffapply\" \"rbh-diff --apply\"\nrun_test 106c    test_diff info_collect2.conf \"scan\" \"robinhood --scan --diff\"\nrun_test 106d    test_diff info_collect2.conf \"partdiff\" \"rbh-diff --scan=subdir\"\nrun_test 107a    test_completion test_completion.conf OK        \"scan completion command\"\nrun_test 107b    test_completion test_completion.conf unmatched \"wrong completion command (syntax error)\"\nrun_test 107c    test_completion test_completion.conf invalid_ctx_id \"wrong completion command (using id)\"\nrun_test 107d    test_completion test_completion.conf invalid_ctx_attr \"wrong completion command (using attr)\"\nrun_test 107e    test_completion test_completion.conf invalid_attr \"wrong completion command (unknown var)\"\nrun_test 108a    test_rename info_collect.conf scan \"rename cases (scan)\"\nrun_test 108b    test_rename info_collect.conf readlog \"rename cases (readlog)\"\nrun_test 108c    test_rename info_collect.conf partial \"rename cases (partial scans)\"\nrun_test 108d    test_rename info_collect.conf diff \"rename cases (diff+apply)\"\nrun_test 108e    test_rename info_collect.conf partdiff \"rename cases (partial diffs+apply)\"\nrun_test 109a    test_hardlinks info_collect.conf scan \"hardlinks management (scan)\"\nrun_test 109b    test_hardlinks info_collect.conf readlog \"hardlinks management (readlog)\"\nrun_test 109c    test_hardlinks info_collect.conf partial \"hardlinks management (partial scans)\"\nrun_test 109d    test_hardlinks info_collect.conf diff \"hardlinks management (diff+apply)\"\nrun_test 109e    test_hardlinks info_collect.conf partdiff \"hardlinks management (partial diffs+apply)\"\nrun_test 110     test_unlink info_collect.conf \"unlink (readlog)\"\nrun_test 111     test_layout info_collect.conf \"layout changes\"\nrun_test 112     test_hl_count info_collect.conf \"reports with hardlinks\"\nrun_test 113     test_diff_apply_fs info_collect2.conf  \"diff\"  \"rbh-diff --apply=fs\"\nrun_test 114     test_root_changelog info_collect.conf \"changelog record on root entry\"\nrun_test 115     partial_paths info_collect.conf \"test behavior when handling partial paths\"\nrun_test 116     test_mnt_point test_mnt_point.conf \"test with mount point != fs_path\"\n\nfunction runtest_117\n{\n    cfg=common.conf\n    char=a\n    for flavor in scan scandiff1 scandiff2 scandiff3 diff1 diff2 diff3 cl cldiff1 cldiff2 cldiff3; do\n        run_test 117$char stripe_update $cfg $flavor \"stripe information update (flavor=$flavor)\"\n        # increment char\n        char=$(echo $char | tr \"a-y\" \"b-z\")\n    done\n}\nruntest_117\n\nfunction runtest_118\n{\n    cfg=common.conf\n    char=a\n    for flavor in diffna1 diffna2 diffna3; do\n        run_test 118$char stripe_no_update $cfg $flavor \"stripe information => update (flavor=$flavor)\"\n        # increment char\n        char=$(echo $char | tr \"a-y\" \"b-z\")\n    done\n}\nruntest_118\n\nrun_test 119 uid_gid_as_numbers uidgidnum.conf \"Store UIDs and GIDs as numbers\"\nrun_test 120 posix_acmtime common.conf \"Test for posix ctimes\"\nrun_test 121 db_schema_convert \"\" \"Test DB schema conversion\"\nrun_test 122 random_names common.conf \"Test random file names\"\nrun_test 123 test_acct_borderline acct.conf \"yes\" \"Test borderline ACCT cases\"\nrun_test 124 test_commit_update commit_update.conf \"Update of last committed changelog\"\nrun_test 125a test_path_gc1 test_rm1.conf \"Test namespace garbage collection with partial scans\"\nrun_test 125b test_path_gc2 test_rm1.conf \"Test namespace garbage collection after rename\"\nrun_test 126  test_scan_only test_scan_only.conf \"Scan on a subset of directories\"\n\n#### policy matching tests  ####\n\nrun_test 200\tpath_test test_path.conf 2 \"path matching policies\"\nrun_test 201\tmigration_test test1.conf 11 6 \"last_mod>5s\"\nrun_test 202\tmigration_test test2.conf 5  6 \"last_mod>5s and name == \\\"*[0-5]\\\"\"\nrun_test 203\tmigration_test test3.conf 5  6 \"complex policy with filesets\"\n# test fileclasses that trigger later\nrun_test 204\tmigration_test test3.conf 10 11 \"complex policy with filesets\"\nrun_test 205\txattr_test test_xattr.conf 2 \"xattr-based fileclass definition\"\nrun_test 206\tpurge_test test_purge.conf 11 16 \"last_access > 15s\"\nrun_test 207\tpurge_size_filesets test_purge2.conf 2 3 \"purge policies using size-based filesets\"\nrun_test 208a\tperiodic_class_match_migr test_updt.conf 10 \"fileclass matching 1 (migration)\"\nrun_test 208b\tpolicy_check_migr test_check_migr.conf 10 \"fileclass matching 2 (migration)\"\nrun_test 209a\tperiodic_class_match_purge test_updt.conf 10 \"fileclass matching 1 (purge)\"\nrun_test 209b\tpolicy_check_purge test_check_purge.conf 10 \"fileclass matching 2 (purge)\"\nrun_test 210\tfileclass_test test_fileclass.conf 2 \"complex policies with unions and intersections of filesets\"\nrun_test 211\ttest_pools test_pools.conf 1 \"class matching with condition on pools\"\nrun_test 212a\tlink_unlink_remove_test test_rm1.conf 1 11 \"deferred hsm_remove\"\nrun_test 212b   test_hsm_remove         test_rm1.conf 2 11 \"deciding softrm for removed entries\"\nrun_test 212c   test_lhsm_remove        test_rm1.conf 4 11 \"test archive_id parameter for lhsm_remove\"\nrun_test 213\tmigration_test_single test1.conf 11 6 \"simple migration policy\"\nrun_test 214a  check_disabled  common.conf  purge      \"no purge if not defined in config\"\nrun_test 214b  check_disabled  common.conf  migration  \"no migration if not defined in config\"\nrun_test 214c  check_disabled  common.conf  rmdir      \"no rmdir if not defined in config\"\nrun_test 214d  check_disabled  common.conf  hsm_remove \"hsm_rm is enabled by default\"\nrun_test 214e  check_disabled  common_noclass.conf  class      \"no class matching if none defined in config\"\nrun_test 215\tmass_softrm    test_rm1.conf 11 1000    \"rm are detected between 2 scans\"\nrun_test 216   test_maint_mode test_maintenance.conf 30 45 5 \"pre-maintenance mode\"\nrun_test 217\tmigrate_symlink test1.conf 6 \t\t\"symlink migration\"\nrun_test 218\ttest_rmdir \trmdir.conf 6 \t\t\"rmdir policies\"\nrun_test 219    test_rmdir_mix RemovingDir_Mixed.conf 11 \"mixed rmdir policies\"\n# test sort order by last_archive, last_mod, creation\n# check order of application\n# check request splitting, optimizations, ...\nrun_test 220a test_lru_policy lru_sort_creation.conf \"\" \"0 1 2 3\" 20 \"lru sort on creation\"\nrun_test 220b test_lru_policy lru_sort_mod.conf \"\" \"0 1 5 8 9\" 10 \"lru sort on last_mod\"\nrun_test 220c test_lru_policy lru_sort_mod_2pass.conf \"\" \"0 1 2 3 4 5 6 7 8 9\" 30 \"lru sort on last_mod in 2 pass\"\nrun_test 220d test_lru_policy lru_sort_access.conf \"\" \"0 2 3 6 8 9\" 20 \"lru sort on last_access\"\nrun_test 220e test_lru_policy lru_sort_archive.conf \"0 1 2 3 4 5 6 7 8 9\" \"\" 15 \"lru sort on last_archive\"\nrun_test 220f test_lru_policy lru_sort_creat_last_arch.conf \"0 1 2 3\" \"4 5 6 7 8 9\" 10 \"lru sort on creation and last_archive==0\"\nrun_test 220g test_lru_policy lru_sort_size_desc.conf \"3 4 8 9\" \"1 2 6 7\" 10 \"lru sort on size\"\nrun_test 220h test_lru_policy lru_sort_size_asc.conf \"1 2 6 7\" \"3 4 8 9\" 10 \"lru sort on size\"\nrun_test 221  test_suspend_on_error migr_fail.conf  2 \"suspend migration if too many errors\"\nrun_test 222  test_custom_purge test_custom_purge.conf 2 \"custom purge command\"\nrun_test 223  test_default test_default_case.conf \"ignore entries if no default case is specified\"\nrun_test 224  test_undelete test_rm1.conf   \"undelete\"\nrun_test 225  test_compress compress.conf \"compressed archived files\"\nrun_test 226a  test_purge_lru lru_purge.conf last_access \"test purge order (lru_sort_attr=last_access)\"\nrun_test 226b  test_purge_lru lru_purge.conf none \"test purge order (lru_sort_attr=none)\"\nrun_test 227  test_action_params test_action_params.conf \"custom policy actions and parameters\"\nrun_test 228a  test_manual_run test_run.conf 5 run \"test manual policy runs (run)\"\nrun_test 228b  test_manual_run test_run.conf 5 run_all \"test manual policy runs (run all)\"\nrun_test 228c  test_manual_run test_run.conf 5 run_migr \"test manual policy runs (run migr.)\"\nrun_test 228d  test_manual_run test_run.conf 5 run_migr_tgt \"test manual policy runs (run migr with target)\"\nrun_test 228e  test_manual_run test_run.conf 5 run_migr_usage \"test manual policy runs (run migr with target usage)\"\nrun_test 228f  test_manual_run test_run.conf 5 run_both \"test manual policy runs (run migr and purge with targets)\"\nrun_test 229a  test_limits test_limits.conf trig_cnt \"test trigger limit on count\"\nrun_test 229b  test_limits test_limits.conf trig_vol \"test trigger limit on volume\"\nrun_test 229c  test_limits test_limits.conf param_cnt \"test parameter limit on count\"\nrun_test 229d  test_limits test_limits.conf param_vol \"test parameter limit on volume\"\nrun_test 229e  test_limits test_limits.conf run_cnt \"test run limit on count\"\nrun_test 229f  test_limits test_limits.conf run_vol \"test run limit on volume\"\nrun_test 229g  test_limits test_limits.conf trig_param \"test limit on both trigger and param\"\nrun_test 229h  test_limits test_limits.conf trig_run \"test limit on both trigger and run\"\nrun_test 229i  test_limits test_limits.conf param_run \"test limit on both param and run\"\nrun_test 230   test_checker test_checker.conf \"policies based on 'checker' module\"\nrun_test 231   test_action_check OtherParameters_4.conf \"check status of current actions\"\nrun_test 232a  test_sched_limits test_sched1.conf sched_max_cnt \"check max count enforced by scheduler\"\nrun_test 232b  test_sched_limits test_sched1.conf sched_max_vol \"check max vol enforced by scheduler\"\nrun_test 232c  test_sched_limits test_sched1.conf trigger \"check trigger vs. max_per_run scheduler\"\nrun_test 232d  test_sched_limits test_sched1.conf param \"check policy parameter vs. max_per_run scheduler\"\nrun_test 232e  test_sched_limits test_sched1.conf cmd \"check cmd line vs. max_per_run scheduler\"\nrun_test 233   test_basic_sm     test_basic.conf  \"Test basic status manager\"\nrun_test 234   test_modeguard_sm_dir test_modeguard_dir.conf \"Test modeguard status manager with directories\"\nrun_test 235   test_modeguard_sm_file test_modeguard_file.conf \"Test modeguard status manager with files\"\nrun_test 236a  test_prepost_sched test_prepost_sched.conf none none \\\n    common.max_per_run \"pre/post_sched_match=none\"\nrun_test 236b  test_prepost_sched test_prepost_sched.conf cache_only none \\\n    common.max_per_run \"pre_sched_match=cache_only\"\nrun_test 236c  test_prepost_sched test_prepost_sched.conf auto_update_attrs none \\\n    common.max_per_run \"pre_sched_match=auto_update_attrs\"\nrun_test 236d  test_prepost_sched test_prepost_sched.conf auto_update_attrs none \\\n    \"\" \"pre_sched_match=auto_update_attrs (no scheduler)\"\nrun_test 236e  test_prepost_sched test_prepost_sched.conf auto_update_all none \\\n    common.max_per_run \"pre_sched_match=auto_update_all\"\nrun_test 236f  test_prepost_sched test_prepost_sched.conf force_update none \\\n    common.max_per_run \"pre_sched_match=force_update\"\nrun_test 236g  test_prepost_sched test_prepost_sched.conf none cache_only \\\n    common.max_per_run \"post_sched_match=cache_only\"\nrun_test 236h  test_prepost_sched test_prepost_sched.conf none auto_update_attrs \\\n    common.max_per_run \"post_sched_match=auto_update_attrs\"\nrun_test 236i  test_prepost_sched test_prepost_sched.conf none auto_update_attrs \\\n    \"\" \"post_sched_match=auto_update_attrs (no scheduler)\"\nrun_test 236j  test_prepost_sched test_prepost_sched.conf none auto_update_all \\\n    common.max_per_run \"post_sched_match=auto_update_all\"\nrun_test 236k  test_prepost_sched test_prepost_sched.conf none force_update \\\n    common.max_per_run \"post_sched_match=force_update\"\nrun_test 237   test_sched_ratelim test_ratelim.conf \"Check action rate limitations\"\nrun_test 238   test_lhsm_archive test_lhsm1.conf \"check sql query string in case of multiple AND/OR\"\nrun_test 239a  test_multirule_select test_multirule.conf cleanup \"check sql query string in case of multiple rules\"\nrun_test 239b  test_multirule_select test_multirule_migr.conf migration \"check sql query string in case of multiple rules\"\nrun_test 240   test_rmdir_depth  test_rmdir_depth.conf \"check sql query for rmdir with depth condition\"\nrun_test 241   test_prepost_cmd  test_prepost_cmd.conf \"test pre/post_run_command\"\nrun_test 242   test_nlink_crit  test_nlink.conf \"test nlink criterion\"\nrun_test 243   test_iname       test_iname.conf \"test iname criterion\"\nrun_test 244   test_copy        test_copy.conf \"test common.copy specific parameters\"\nrun_test 245   test_move        test_move.conf \"test trash policy based on common.move\"\nrun_test 246   test_hsm_invalidate test_hsm_invalidate.conf \"HSM invalidate deleted files\"\nrun_test 247a   test_hsm_remove_order  test_hsm_remove_order.conf \"hsm_remove default order by\"\nrun_test 247b   test_hsm_remove_order  test_hsm_remove_noorder.conf \"hsm_remove override order by\"\n\n#### triggers ####\n\nrun_test 300\ttest_cnt_trigger test_trig.conf 151 21 \"trigger on file count\"\nrun_test 301    test_ost_trigger test_trig2.conf 150 110 \"trigger on OST usage\"\nrun_test 302\ttest_trigger_check test_trig3.conf 60 110 \"triggers check only\" 40 80 5 10 40\nrun_test 303    test_periodic_trigger test_trig4.conf 5 \"periodic trigger\"\nrun_test 304    test_ost_order test_trig2.conf \"OST purge order\"\nrun_test 305a   test_cntpct_trigger test_trig_cntpct.conf \"trigger on inode usage percentage\"\nrun_test 305b   test_cntpct_ost_trigger test_trig_cntpct.conf \"trigger on inode usage percentage on OSTs\"\n\n### projectid related tests\nrun_test 350    projectid_test_find info_collect.conf   \"Lustre project id and rbh-find\"\nrun_test 351    projectid_test_report info_collect.conf \"Lustre project id and rbh-report\"\nrun_test 352    projectid_test_chglog info_collect.conf \"Lustre project id update with changelogs\"\nrun_test 353    projectid_test_run test_trig3.conf      \"Lustre project id targetted policy run\"\n\n#### reporting ####\nrun_test 400\ttest_rh_report common.conf 3 1 \"reporting tool\"\nrun_test 401a   test_rh_acct_report common.conf 5 \"\" \"reporting tool: config file without acct param\"\nrun_test 401b   test_rh_acct_report acct.conf 5 \"yes\" \"reporting tool: config file with accounting=no\"\nrun_test 401c   test_rh_acct_report acct.conf 5 \"no\" \"reporting tool: config file with accounting=yes\"\nrun_test 402a   test_rh_report_split_user_group common.conf 5 \"\" \"report with split-user-groups option\"\nrun_test 402b   test_rh_report_split_user_group common.conf 5 \"--force-no-acct\" \"report with split-user-groups and force-no-acct option\"\nrun_test 403    test_sort_report common.conf 0 \"Sort options of reporting command\"\nrun_test 404   test_dircount_report common.conf 20  \"dircount reports\"\n\nrun_test 405    test_find   common.conf \"\"  \"rbh-find command\"\nrun_test 406    test_du   common.conf \"\"    \"rbh-du command\"\n\n#### misc, internals #####\nrun_test 500a\ttest_logs log1.conf file_nobatch \t\"file logging without alert batching\"\nrun_test 500b\ttest_logs log2.conf syslog_nobatch \t\"syslog without alert batching\"\nrun_test 500c\ttest_logs log3.conf stdio_nobatch \t\"stdout and stderr without alert batching\"\nrun_test 500d\ttest_logs log1b.conf file_batch \t\"file logging with alert batching\"\nrun_test 500e\ttest_logs log2b.conf syslog_batch \t\"syslog with alert batching\"\nrun_test 500f\ttest_logs log3b.conf stdio_batch \t\"stdout and stderr with alert batching\"\nrun_test 501a \ttest_cfg_parsing basic \t\"parsing of example basic.conf\"\nrun_test 501b \ttest_cfg_parsing generated \t\"parsing of generated template\"\nrun_test 501c \ttest_cfg_parsing example_alerts \"parsing of example_alerts\"\nrun_test 501d \ttest_cfg_parsing example_checksum \"parsing of example_checksum\"\nrun_test 501e \ttest_cfg_parsing example_cleanup \"parsing of example_cleanup\"\nrun_test 501f \ttest_cfg_parsing example_lhsm \t\"parsing of example_lhsm\"\nrun_test 501g \ttest_cfg_parsing example_modeguard \t\"parsing of example_modeguard\"\nrun_test 501h \ttest_cfg_parsing example_rmdir \t\"parsing of example_rmdir\"\nrun_test 502a    recovery_test\ttest_recov.conf  full    1 \"FS recovery\"\nrun_test 502b    recovery_test\ttest_recov.conf  delta   1 \"FS recovery with delta\"\nrun_test 502c    recovery_test\ttest_recov.conf  rename  1 \"FS recovery with renamed entries\"\nrun_test 502d    recovery_test\ttest_recov.conf  partial 1 \"FS recovery with missing backups\"\nrun_test 502e    recovery_test\ttest_recov.conf  mixed   1 \"FS recovery (mixed status)\"\nrun_test 503a    recovery_test\ttest_recov2.conf  full    0 \"FS recovery (archive_symlinks=FALSE)\"\nrun_test 503b    recovery_test\ttest_recov2.conf  delta   0 \"FS recovery with delta (archive_symlinks=FALSE)\"\nrun_test 503c    recovery_test\ttest_recov2.conf  rename  0 \"FS recovery with renamed entries (archive_symlinks=FALSE)\"\nrun_test 503d    recovery_test\ttest_recov2.conf  partial 0 \"FS recovery with missing backups (archive_symlinks=FALSE)\"\nrun_test 503e    recovery_test\ttest_recov2.conf  mixed   0 \"FS recovery (mixed status, archive_symlinks=FALSE)\"\nrun_test 504     import_test    test_recov.conf \"Import from backend\"\nrun_test 505a     recov_filters  test_recov.conf  ost    \"FS recovery with OST filter\"\nrun_test 505b     recov_filters  test_recov2.conf  ost    \"FS recovery with OST filter (archive_symlinks=FALSE)\"\nrun_test 506a     recov_filters  test_recov.conf  since    \"FS recovery with time filter\"\nrun_test 506b     recov_filters  test_recov2.conf  since    \"FS recovery with time filter (archive_symlinks=FALSE)\"\nrun_test 507a     recov_filters  test_recov.conf  dir    \"FS recovery with dir filter\"\nrun_test 507b     recov_filters  test_recov2.conf  dir    \"FS recovery with dir filter (archive_symlinks=FALSE)\"\nrun_test 508    test_tokudb \"Test TokuDB compression\"\nrun_test 509    test_cfg_overflow \"config options too long\"\nrun_test 510    test_rbh_find_printf test_checker.conf \"Test rbh-find with -printf option\"\nrun_test 511    archive_uuid1 test_uuid.conf \"Test UUID presence while scanning\"\nrun_test 512    archive_uuid2 test_uuid.conf \"Archive and undelete file with UUID using changelogs\"\nrun_test 513    test_reload   alert.conf \"Reloading configuration (with alert policy)\"\nrun_test 514    escape_chars    common.conf undelete    \"escape special characters in filters\"\nrun_test 515    escape_chars    common.conf report      \"escape special characters in filters\"\n\n#### Tests by Sogeti ####\nrun_test 600a test_alerts alert.conf \"file1\" 0 \"TEST_ALERT_PATH_NAME\"\nrun_test 600b test_alerts alert.conf \"type_file\" 0 \"TEST_ALERT_TYPE\"\nrun_test 600c test_alerts alert.conf \"root_owner\" 0 \"TEST_ALERT_OWNER\"\nrun_test 600d test_alerts alert.conf \"size10k\" 0 \"TEST_ALERT_SIZE\"\nrun_test 600e test_alerts alert.conf \"last_access_5s\" 6 \"TEST_ALERT_LAST_ACCESS\"\nrun_test 600f test_alerts alert.conf \"last_mod_5s\" 6 \"TEST_ALERT_LAST_MODIFICATION\"\nrun_test 600g test_alerts_OST alert_ost.conf \"ost1\" \"TEST_ALERT_OST\"\nrun_test 600h test_alerts alert.conf \"extended_attribute\" 0 \"TEST_ALERT_EXTENDED_ATTRIBUT\"\nrun_test 600i test_alerts alert.conf \"nonempty_dir\" 0 \"TEST_ALERT_DIRCOUNT\"\n\nrun_test 601a test_migration MigrationStd_Path_Name.conf 0 3 \"file.6;file.7;file.8\" \"--run=migration --target=all\" \"TEST_test_migration_PATH_NAME\"\nrun_test 601b test_migration MigrationStd_Type.conf 0 8 \"file.1;file.2;file.3;file.4;file.5;file.6;file.7;file.8\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_TYPE\"\nrun_test 601c test_migration MigrationStd_Owner.conf 0 1 \"file.3\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_OWNER\"\nrun_test 601d test_migration MigrationStd_Size.conf 0 2 \"file.6;file.7\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_SIZE\"\nrun_test 601e test_migration MigrationStd_LastAccess.conf 6 9  \"file.1;file.2;file.3;file.4;file.5;file.6;file.7;link.1;link.2\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_LAST_ACCESS\"\nrun_test 601f test_migration MigrationStd_LastModification.conf 6 2 \"file.8;file.9\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_LAST_MODIFICATION\"\nrun_test 601g test_migration MigrationStd_ExtendedAttribut.conf 0 1 \"file.4\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_EXTENDED_ATTRIBUT\"\nrun_test 601h test_migration MigrationClass_Path_Name.conf 0 3 \"file.6;file.7;file.8\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_PATH_NAME\"\nrun_test 601i test_migration MigrationClass_Type.conf 0 2 \"link.1;link.2\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_TYPE\"\nrun_test 601j test_migration MigrationClass_Owner.conf 0 1 \"file.3\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_OWNER\"\nrun_test 601k test_migration MigrationClass_Size.conf 0 2 \"file.6;file.7\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_SIZE\"\nrun_test 601l test_migration MigrationClass_LastAccess.conf 11 8 \"file.1;file.2;file.4;file.5;file.6;file.7;link.1;link.2\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_LAST_ACCESS\"\nrun_test 601m test_migration MigrationClass_LastModification.conf 11 2 \"file.8;file.9\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_LAST_MODIFICATION\"\nrun_test 601n test_migration MigrationClass_ExtendedAttribut.conf 0 1 \"file.4\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_EXTENDED_ATTRIBUT\"\nrun_test 601o test_migration MigrationUser.conf 0 1 \"file.3\" \"--run=migration --target=user:testuser\" \"TEST_MIGRATION_USER\"\nrun_test 601p test_migration MigrationGroup.conf 0 2 \"file.2;file.3\" \"--run=migration --target=group:testgroup\" \"TEST_MIGRATION_GROUP\"\nrun_test 601q test_migration MigrationFile_Path_Name.conf 0 1 \"file.1\" \"--run=migration --target=file:$RH_ROOT/dir1/file.1\" \"TEST_MIGRATION_FILE_PATH_NAME\"\nrun_test 601r test_migration MigrationFile_Size.conf 1 1 \"file.8\" \"--run=migration --target=file:$RH_ROOT/dir2/file.8\" \"TEST_MIGRATION_FILE_SIZE\"\n\nrun_test 602a migration_OST MigrationStd_OST.conf 2 \"file.3;file.4\" \"--run=migration --target=all\" \"TEST_MIGRATION_STD_OST\"\nrun_test 602b migration_OST MigrationOST.conf 2 \"file.3;file.4\" \"--run=migration --target=ost:1\" \"TEST_MIGRATION_OST\"\nrun_test 602c migration_OST MigrationClass_OST.conf 2 \"file.3;file.4\" \"--run=migration --target=all\" \"TEST_MIGRATION_CLASS_OST\"\n\nrun_test 603 migration_file_type MigrationFile_Type.conf 0 1 \"link.1\" \"TEST_MIGRATION_FILE_TYPE\"\nrun_test 604 migration_file_owner MigrationFile_Owner.conf 0 1 \"file.3\" \"--run=migration --target=file:$RH_ROOT/dir1/file.3\" \"TEST_MIGRATION_FILE_OWNER\"\nrun_test 605 migration_file_Last MigrationFile_LastAccess.conf 12 1 \"file.1\" \"TEST_MIGRATION_FILE_LAST_ACCESS\"\nrun_test 606 migration_file_Last MigrationFile_LastModification.conf 12 1 \"file.1\" \"TEST_MIGRATION_FILE_LAST_MODIFICATION\"\nrun_test 607 migration_file_OST MigrationFile_OST.conf 1 \"file.3\" \"TEST_MIGRATION_FILE_OST\"\nrun_test 608 migration_file_ExtendedAttribut MigrationFile_ExtendedAttribut.conf 0 1 \"file.4\"  \"TEST_MIGRATION_FILE_EXTENDED_ATTRIBUT\"\n\nrun_test 609 trigger_purge_QUOTA_EXCEEDED TriggerPurge_QuotaExceeded.conf \"TEST_TRIGGER_PURGE_QUOTA_EXCEEDED\"\nrun_test 610 trigger_purge_OST_QUOTA_EXCEEDED TriggerPurge_OstQuotaExceeded.conf \"TEST_TRIGGER_PURGE_OST_QUOTA_EXCEEDED\"\nif [[ $RBH_NUM_UIDGID = \"yes\" ]]; then\n    run_test 611 trigger_purge_USER_GROUP_QUOTA_EXCEEDED TriggerPurge_UserQuotaExceeded.conf \"user '0'\" \"TEST_TRIGGER_PURGE_USER_QUOTA_EXCEEDED\"\n    run_test 612 trigger_purge_USER_GROUP_QUOTA_EXCEEDED TriggerPurge_GroupQuotaExceeded.conf \"group '0'\" \"TEST_TRIGGER_PURGE_GROUP_QUOTA_EXCEEDED\"\nelse\n    run_test 611 trigger_purge_USER_GROUP_QUOTA_EXCEEDED TriggerPurge_UserQuotaExceeded.conf \"user 'root'\" \"TEST_TRIGGER_PURGE_USER_QUOTA_EXCEEDED\"\n    run_test 612 trigger_purge_USER_GROUP_QUOTA_EXCEEDED TriggerPurge_GroupQuotaExceeded.conf \"group 'root'\" \"TEST_TRIGGER_PURGE_GROUP_QUOTA_EXCEEDED\"\nfi\n\nrun_test 613a test_purge PurgeStd_Path_Name.conf 0 7 \"file.6;file.7;file.8\" \"--run=purge --target=all\" \"TEST_PURGE_STD_PATH_NAME\"\nrun_test 613b test_purge_tmp_fs_mgr PurgeStd_Type.conf 0 8 \"link.1;link.2\" \"--run=purge --target=all\" \"TEST_PURGE_STD_TYPE\"\nrun_test 613c test_purge PurgeStd_Owner.conf 0 9 \"file.3\" \"--run=purge --target=all\" \"TEST_PURGE_STD_OWNER\"\nrun_test 613d test_purge PurgeStd_Size.conf 0 8 \"file.6;file.7\" \"--run=purge --target=all\" \"TEST_PURGE_STD_SIZE\"\nrun_test 613e test_purge PurgeStd_LastAccess.conf 10 9 \"file.8\" \"--run=purge --target=all\" \"TEST_PURGE_STD_LAST_ACCESS\"\nrun_test 613f test_purge PurgeStd_LastModification.conf 30 9 \"file.8\" \"--run=purge --target=all\" \"TEST_PURGE_STD_LAST_MODIFICATION\"\nrun_test 613g test_purge PurgeStd_ExtendedAttribut.conf 0 9 \"file.4\" \"--run=purge --target=all\" \"TEST_PURGE_STD_EXTENDED_ATTRIBUT\"\nrun_test 613h test_purge PurgeClass_Path_Name.conf 0 9 \"file.1\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_PATH_NAME\"\nrun_test 613i test_purge PurgeClass_Type.conf 0 2 \"file.1;file.2;file.3;file.4;file.5;file.6;file.7;file.8\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_TYPE\"\nrun_test 613j test_purge PurgeClass_Owner.conf 0 3 \"file.1;file.2;file.4;file.5;file.6;file.7;file.8\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_OWNER\"\nrun_test 613k test_purge PurgeClass_Size.conf 0 8 \"file.6;file.7\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_SIZE\"\nrun_test 613l test_purge PurgeClass_LastAccess.conf 20 9 \"file.8\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_LAST_ACCESS\"\nrun_test 613m test_purge PurgeClass_LastModification.conf 20 9 \"file.8\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_LAST_MODIFICATION\"\nrun_test 613n test_purge PurgeClass_ExtendedAttribut.conf 0 9 \"file.4\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_EXTENDED_ATTRIBUT\"\n\nrun_test 614a purge_OST PurgeStd_OST.conf 2 \"file.3;file.4\" \"--run=purge --target=all\" \"TEST_PURGE_STD_OST\"\nrun_test 614b purge_OST PurgeOST.conf 2 \"file.3;file.4\" \"--run=purge --target=ost:1 --target-usage=0\" \"TEST_PURGE_OST\"\nrun_test 614c purge_OST PurgeClass_OST.conf 2 \"file.3;file.4\" \"--run=purge --target=all\" \"TEST_PURGE_CLASS_OST\"\n\nrun_test 615a test_removing RemovingEmptyDir.conf \"emptyDir\" 11 \"TEST_REMOVING_EMPTY_DIR\"\nrun_test 615b test_removing RemovingDir_Path_Name.conf \"pathName\" 0 \"TEST_REMOVING_DIR_PATH_NAME\"\nrun_test 615c test_removing RemovingDir_Owner.conf \"owner\" 0 \"TEST_REMOVING_DIR_OWNER\"\nrun_test 615d test_removing RemovingDir_LastAccess.conf \"lastAccess\" 11 \"TEST_REMOVING_DIR_LAST_ACCESS\"\nrun_test 615e test_removing RemovingDir_LastModification.conf \"lastModif\" 11 \"TEST_REMOVING_DIR_LAST_MODIFICATION\"\nrun_test 615f test_removing RemovingDir_ExtendedAttribute.conf \"extAttributes\" 0 \"TEST_REMOVING_DIR_EXTENDED_ATTRIBUT\"\nrun_test 615g test_removing RemovingDir_Dircount.conf \"dircount\" 0 \"TEST_REMOVING_DIR_DIRCOUNT\"\n\nrun_test 616 test_removing_ost RemovingDir_OST.conf \"TEST_REMOVING_DIR_OST\"\n\nrun_test 617 test_report_generation_1 Generation_Report_1.conf \"TEST_REPORT_GENERATION_1\"\nrun_test 618 report_generation2 \"TEST_REPORT_GENERATION_2\"\n\nrun_test 619 TEST_OTHER_PARAMETERS_1 OtherParameters_1.conf \"TEST_OTHER_PARAMETERS_1\"\nrun_test 620 TEST_OTHER_PARAMETERS_2 OtherParameters_2.conf \"TEST_OTHER_PARAMETERS_2\"\nrun_test 621 TEST_OTHER_PARAMETERS_3 OtherParameters_3.conf \"TEST_OTHER_PARAMETERS_3\"\nrun_test 622 TEST_OTHER_PARAMETERS_4 OtherParameters_4.conf \"TEST_OTHER_PARAMETERS_4\"\nrun_test 623 TEST_OTHER_PARAMETERS_5 OtherParameters_5.conf \"TEST_OTHER_PARAMETERS_5\"\n\nrun_test 700 test_changelog common.conf \"Changelog record suppression\"\nrun_test 701 test_changelog_cancel cl_batch.conf \"Changelog record cancellation\"\n\necho\necho \"========== TEST SUMMARY ($PURPOSE) ==========\"\ncat $SUMMARY\necho \"=============================================\"\n\n#init xml report\nif (( $junit )); then\n\ttfinal=`date \"+%s.%N\"`\n\tdur=`echo \"($tfinal-$tinit)\" | bc -l`\n\techo \"total test duration: $dur sec\"\n\tjunit_write_xml \"$dur\" $RC $(( $RC + $SUCCESS ))\n\trm -f $TMPXML_PREFIX.stderr $TMPXML_PREFIX.stdout $TMPXML_PREFIX.tc\nfi\n\nrm -f $SUMMARY\nif (( $RC > 0 )); then\n\techo \"$RC tests FAILED, $SUCCESS successful, $SKIP skipped\"\nelse\n\techo \"All tests passed ($SUCCESS successful, $SKIP skipped)\"\nfi\nrm -f $TMPERR_FILE\nexit $RC\n"
  },
  {
    "path": "tests/test_suite/3-tests-lustre.sh",
    "content": "FILE=/mnt/lustre/file_test$$\n\nfunction test1\n{\n\techo \"Writing file\"\n\tdd if=/dev/zero of=$FILE bs=1M count=10 >/dev/null 2>/dev/null || echo \"Error writing $FILE\"\n\techo \"Archiving file\"\n\tlfs hsm_archive $FILE || echo \"Error archiving file $FILE\"\n\techo \"Removing file\"\n\trm -f $FILE || echo \"Error removing file $FILE\"\n}\n\nDIRCOUNT=2\n\nfunction test2\n{\n\tfor d in `seq 1 $DIRCOUNT`; do\n\t\tdir=/mnt/lustre/dir.$d\n\t\tmkdir -p $dir\n\t\tfor f in `seq 1 15`; do\n\t\t\tdd if=/dev/zero of=$dir/file.$f bs=1M count=10\n\t\tdone\n\t\tlfs hsm_archive $dir/* || echo \"ERROR\"\n\tdone\n\tkill %1\n}\n\nfunction test2bis\n{\n\tfor d in `seq 1 $DIRCOUNT`; do\n\t\tdir=/mnt/lustre/dir.$d\n\t\tlfs hsm_release $dir/* || echo \"ERROR\"\n\tdone\n}\n\nfunction test3\n{\n\tFILE=/mnt/lustre/file.$$\n\t# try to release a file while it is opened\n\tdd if=/dev/zero of=$FILE bs=1M count=10\n\tlfs hsm_archive $FILE\n\t# wait for end of archive\n\twhile (( `lfs hsm_state $FILE | grep archived | wc -l` != 1 )); do\n\t\tsleep 1;\n\tdone\n\t# open the file\n\tperl -e \"open F, \\\" < $FILE\\\"; sleep(5); my \\$toto = <F>; if ( !\\$toto) { print \\\"EOF\\\\n\\\" };\" &\n\techo \"Process using file:\"\n\tlsof $FILE\n\tlfs hsm_release $FILE && echo \"ERROR: Should not allow releasing opened file\"\n\tif (( `lfs hsm_state $FILE | grep released | wc -l` != 0 )); then\n\t\techo \"File is released: Test failed\"\n\tfi\n}\n\n\ntest1\ntest2\ntest2bis\ntest3\n"
  },
  {
    "path": "tests/test_suite/Makefile.am",
    "content": "rbhtestsdir=@datarootdir@/@PACKAGE_NAME@/tests\nif COMMON_RPMS\nrbhtests_PROGRAMS = create-random\nendif\n"
  },
  {
    "path": "tests/test_suite/README.rst",
    "content": "==============================\nRobinhood testsuite for Lustre\n==============================\n\nHow to run the tests for Lustre\n-------------------------------\n\n\nHave a Lustre filesystem\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nLustre must be mounted as /mnt/lustre before running setup step and\ntests. The filesystem should have at least 4 OSTs.\n\nIf you don't have a Lustre filesystem handy, you can use llmount.sh to\ncreate the filesystem.\n\nFor instance, as root, run::\n\n  OSTSIZE=400000 OSTCOUNT=4 /usr/lib64/lustre/tests/llmount.sh\n\n\nSet the purpose environment variable\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe available values for PURPOSE are::\n\n  - TMP_FS_MGR\n  - LUSTRE_HSM\n  - BACKUP\n  - SHOOK\n\nIf unset, the default for PURPOSE is TMP_FS_MGR.\n\nThe PURPOSE variable can be exported like this::\n\n  export PURPOSE=LUSTRE_HSM\n\nor it can prefix the tests like that::\n\n  PURPOSE=LUSTRE_HSM ... <test>\n\n\nSetup the test environment\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAs root, run::\n\n  ./1-test_setup_lustre.sh\n\nThis will initialize the SQL database, create the changelog client,\nstart the Lustre posix copytool, and enable the HSM coordinator.\n\n\nRun the tests\n~~~~~~~~~~~~~\n\nFor brief output::\n\n  ./2-run-tests.sh -q\n\nFor full output::\n\n  ./2-run-tests.sh\n\nTo run only some tests (here, test 3 and 4)::\n\n  ./2-run-tests.sh 3,4\n\nTo get a debug output, set DEBUG to 1::\n\n  DEBUG=1 ./2-run-tests.sh\n\nRBH_NUM_UIDGID can be set to use numerical UIDs and GIDs instead of their\nstrings counterparts::\n\n  RBH_NUM_UIDGID=yes ./2-run-tests.sh 119\n\n\nRun the tests under Valgrind\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe test suite, and just a subset of the tests, can be run under\nvalgrind by setting the WITH_VALGRIND environment variable. For\ninstance::\n\n  WITH_VALGRIND=1 PURPOSE=LUSTRE_HSM ./2-run-tests.sh 301\n\nEach test instance will create a valgrind log file called\n`vg-test_<test number>-<pid>.log`\n\nIt is possible to pass some extra parameters to valgrind by setting\nits VALGRIND_OPTS::\n\n  WITH_VALGRIND=1 PURPOSE=LUSTRE_HSM VALGRIND_OPTS=\"--tool=cachegrind\" ./2-run-tests.sh 100\n\nBy default, the output in the log files include a suppression rule for\neach error. These suppressions can be added to `valgrind.supp` to\nsuppress the corresponding warning(s) in a subsequent run.\n\nHow to run the tests for non-lustre POSIX filesystem\n----------------------------------------------------\n\nSet environment variables\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nFor POSIX tests, set the following environment variables:\n\n* unset PURPOSE, or set PURPOSE=TMP_FS_MGR\n\n* set POSIX_MODE=1\n\n* set NOLOG=1\n\nSetup the test environment\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAs root, run::\n\n  ./1-test_setup_posix.sh\n\nThis will initialize the SQL database, create and mount a loopback filesystem in /tmp.\n\nRun the tests\n~~~~~~~~~~~~~\n\nRun the test as previously described in the Lustre section.\n\nRunning the tests with robinhood installed\n------------------------------------------\n\nAfter installing the three rpms robinhood, robinhood-adm and\nrobinhood-tests, create a test repository, cd into it, and run both\n/usr/share/robinhood/tests/test_suite/1-test_setup_lustre.sh and\n/usr/share/robinhood/tests/test_suite/2-run-tests.sh as shown in the\nprevious sections.\n"
  },
  {
    "path": "tests/test_suite/bench_rpc.sh",
    "content": "#!/bin/sh\n\n# This benchmark performs several kind of metadata changes:\n# pass 1) rename\n# pass 2) link/unlink\n# pass 3) time change\n# pass 4) other attr change (owner/group)\n\nfunction stats_init\n{\n\t/usr/sbin/lctl get_param mdc.*.stats > /tmp/rpc.init\n}\n\nfunction stats_end\n{\n\t/usr/sbin/lctl get_param mdc.*.stats > /tmp/rpc.end\n\n\techo \"Request summary:\"\n\n\tfor param in `grep -v '=' /tmp/rpc.end | awk '{print $1}'`; do\n\n\t\tv_init=`egrep \"$param \" /tmp/rpc.init | awk '{print $2}' | tail -n 1`\n\t\tv_end=`egrep \"$param \" /tmp/rpc.end | awk '{print $2}' | tail -n 1`\n\n\t\tif [[ $v_init = *.* ]]; then\n\t\t \techo \"    $param:\" `echo $v_end-$v_init | bc -l`\n\t\telse\n\t\t\techo \"    $param:\" $(($v_end-$v_init))\n\t\tfi\n\n\tdone\n\n}\n\nfunction err\n{\n\tmsg=$*\n\techo \"ERROR executing $msg: $?\"\n\texit 1\n}\n\nfunction run\n{\n\t$* || err \"$*\"\n}\n\n# rename operations\nfunction test_1\n{\n\tfile=$1\n\tdir=`dirname $file`\n\tname1=`basename $file`\n\tname2=$name1.rnm\n\techo \"#1: RENAME\"\n\tfor i in `seq 1 1000`; do\n\t\trun mv $dir/$name1 $dir/$name2\n\t\trun mv $dir/$name2 $dir/$name1\n\tdone\n}\n\n# link/unlink operations\nfunction test_2\n{\n\tfile=$1\n\tdir=`dirname $file`\n\tlink1=`basename $file`\n\tlink2=$name1.lnk\n\techo \"#2: LINK/UNLINK\"\n\tfor i in `seq 1 1000`; do\n\t\trun ln $dir/$link1 $dir/$link2\n\t\trun unlink $dir/$link1\n\t\trun ln $dir/$link2 $dir/$link1\n\t\trun unlink $dir/$link2\n\tdone\n}\n\n# read operations (atime change)\nfunction test_3a\n{\n\tfile=$1\n\techo \"#3a: atime change (read)\"\n\tfor i in `seq 1 100`; do\n\t\t# read operation (can cause atime change)\n\t\trun od -x $file > /dev/null\n\tdone\n}\n\n# time change operations\nfunction test_3b\n{\n\tfile=$1\n\techo \"#3b.1: mtime change (append)\"\n\tfor i in `seq 1 1000`; do\n\t\t# append operation (can cause mtime change)\n                run od -x -v -N 100 /dev/zero >> $file\n\tdone\n\techo \"#3b.2: all times change (touch)\"\n\tfor i in `seq 1 1000`; do\n\t\t# touch opration (change all times)\n                run touch $file\n\tdone\n}\n\n\n# attr change operations\nfunction test_4\n{\n\tfile=$1\n\techo \"#4.1: mode change (chmod)\"\n\tfor i in `seq 1 1000`; do\n\t\trun chmod 600 $file\n\t\trun chmod 644 $file\n\tdone\n\techo \"#4.2: owner change (chown)\"\n\tfor i in `seq 1 1000`; do\n                run chown bin $file\n                run chown root $file\n\tdone\n}\n\n\narg=$1\nif [ -z $arg ]; then\n\techo \"Usage: $0 <file>\"\n\texit 1\nfi\n\nstats_init\ntest_1 $arg\ntest_2 $arg\ntest_3a $arg\ntest_3b $arg\n#test_4 $arg\nstats_end\n"
  },
  {
    "path": "tests/test_suite/cfg/Generation_Report_1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# reportingt in accordance to the data\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nFileClass test_file_type\n{\n    definition\n    {\n        type == \"file\"\n    }\n}\n\nFileClass test_link_type\n{\n    definition\n    {\n        type == \"symlink\"\n    }\n}\n\nPurge_rules\n{\n    Policy test-generation-report-purge\n    {\n        target_fileclass = test_file_type;\n\t    target_fileclass = test_link_type;\n\t    condition {  last_mod >= 0 }\n    }\n\n    policy default\n    {\n\t    condition {  last_mod >= 1h }\n    }\n}\n\n\nMigration_rules\n{\n    Policy test-generation-report-migr\n    {\n        target_fileclass = test_file_type;\n\t    target_fileclass = test_link_type;\n\t    condition {  last_mod >= 0 }\n    }\n\n    policy default\n    {\n\t    condition {  last_mod >= 1h }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_ExtendedAttribut.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_xattr\n{\n\tdefinition\n\t{\n\t    xattr.user.foo == \"1\"\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_xattr\n    {\n        target_fileclass = test_xattr;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_LastAccess.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_last_access\n{\n\tdefinition\n\t{\n\t    last_access >= 10sec\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_last_access\n    {\n        target_fileclass = test_last_access;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_LastModification.conf",
    "content": "%include \"common.conf\"\n\nFilesets\n{\n    FileClass test_last_mod\n    {\n        definition\n        {\n            last_mod <= 10sec\n        }\n    }\n}\n\nmigration_rules\n{\n    policy migr_test_last_mod\n    {\n        target_fileclass = test_last_mod;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_OST.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_ost\n{\n\tdefinition\n\t{\n\t    ost_pool == \"ost1\"\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_ost\n    {\n        target_fileclass = test_ost;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_Owner.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_owner\n{\n\tdefinition\n\t{\n\t    owner == \"testuser\"\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_owner\n    {\n        target_fileclass = test_owner;\n        condition\n        {\n            last_mod >= 0\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_Path_Name.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_path\n{\n\tdefinition\n\t{\n\t    path == \"/mnt/lustre/dir2/*\"\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_path\n    {\n        target_fileclass = test_path;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_Size.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_size\n{\n\tdefinition\n\t{\n\t    size >= 10KB\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_size\n    {\n        target_fileclass = test_size;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationClass_Type.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_type\n{\n\tdefinition\n\t{\n\t    type == \"symlink\"\n\t}\n}\n\nmigration_rules\n{\n    policy migr_test_type\n    {\n        target_fileclass = test_type;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_ExtendedAttribut.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            xattr.user.foo == \"1\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_LastAccess.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            last_access >= 10sec\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_LastModification.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            last_mod >= 10sec\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_OST.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            ost_pool == \"ost1\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_Owner.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            owner == \"testuser\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_Path_Name.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            name == \"file.1\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_Size.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            size >= 10KB\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationFile_Type.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            type == \"symlink\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationGroup.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_mod >= 0sec\n        }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationOST.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            owner == \"root\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_ExtendedAttribut.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            xattr.user.foo == \"1\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_LastAccess.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            last_access >= 5sec\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_LastModification.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            last_mod <= 5sec\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_OST.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            ost_pool == \"ost1\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_Owner.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            owner == \"testuser\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_Path_Name.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            path == \"/mnt/lustre/dir2/*\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_Size.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            size >= 10KB\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationStd_Type.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            type != \"symlink\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/MigrationUser.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_mod >= 0sec\n        }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/OtherParameters_1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nEntryProcessor\n{\n    match_classes = FALSE;\n}\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/dev/null\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n%include \"$RBH_TEST_POLICIES\"\n\nFileClass test_purge\n{\n    Definition\n    {\n        xattr.user.foo == \"[1-5]\"\n    }\n}\n\npurge_rules\n{\n    policy default\n    {\n        condition { last_mod >= 0sec }\n    }\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_mod >= 0sec\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/OtherParameters_2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n\tstay_in_fs = TRUE;\n}\n\nChangeLog\n{\n    MDT\n    {\n        mdt_name  = \"MDT0000\" ;\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    debug_level = EVENT;\n    log_file = stdout;\n    report_file = \"/dev/null\";\n    alert_file = \"/dev/null\";\n    stats_interval = 5sec;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n        db_file = \"/tmp/robinhood_sqlite_db\" ;\n    \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n%include \"$RBH_TEST_POLICIES\"\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_mod >= 0sec\n        }\n    }\n}\n\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1min;\n}\n\nmigration_parameters {\n    max_action_count = 10;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/OtherParameters_3.conf",
    "content": "%include \"common.conf\"\n\nhsm_remove_rules\n{\n    rule default {\n        condition { rm_time > 5s }\n    }\n}\n\nhsm_remove_trigger {\n    trigger_on = periodic;\n    check_interval = 10s;\n}\n\nhsm_remove_parameters\n{\n    nb_threads = 2;\n    queue_size = 6;\n    max_action_count = 4;\n}\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            owner == \"root\"\n        }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/OtherParameters_4.conf",
    "content": "General\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n}\n\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = yes;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = yes;\n    recovery_action = common.copy;\n}\n\n\n%include \"$RBH_TEST_POLICIES\"\n\nmigration_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            last_mod >= 10\n        }\n    }\n}\n\nmigration_parameters\n{\n    max_action_volume = 100MB;\n    check_actions_on_startup = TRUE;\n    check_actions_interval = 10sec;\n    action_timeout = 4;\n}\n\nmigration_trigger {\n    trigger_on = scheduled;\n    check_interval = 10;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/OtherParameters_5.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/dev/null\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\nFS_Scan\n{\n    min_scan_interval = 3sec;\n    max_scan_interval = 10sec;\n\n    spooler_check_interval = 1s;\n}\n\n%include \"$RBH_TEST_POLICIES\"\n\npurge_trigger\n{\n    trigger_on = global_usage;\n    high_threshold_pct = 100%;\n    low_threshold_pct = 99%;\n    check_interval = 5sec;\n}\n\npurge_rules {\n    rule default {condition {last_mod > 1 }}\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_ExtendedAttribut.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_attr\n{\n\tdefinition\n\t{\n\t    xattr.user.foo == \"1\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_attr\n    {\n        target_fileclass = test_attr;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_LastAccess.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_lastAccess\n{\n\tdefinition\n\t{\n\t    owner == \"root\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_lastAccess\n    {\n        target_fileclass = test_lastAccess;\n        condition\n        {\n            last_access < 20s\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_access <= 10sec\n        }\n    }\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_LastModification.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_lastModif\n{\n\tdefinition\n\t{\n\t    owner == \"root\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_lastModif\n    {\n        target_fileclass = test_lastModif;\n        condition\n        {\n            last_mod < 20s\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_access <= 10sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_OST.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_ost\n{\n\tdefinition { type == file and ost_pool == \"ost1\" }\n}\n\nFileclass no_pool\n{\n\tdefinition { type == file and ost_pool == \"\" }\n}\n\npurge_rules\n{\n    ignore_fileclass = no_pool;\n\n    policy purge_test_ost\n    {\n        target_fileclass = test_ost;\n        condition\n        {\n            owner == \"root\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_Owner.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_owner\n{\n\tdefinition\n\t{\n\t    owner == \"root\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_owner\n    {\n        target_fileclass = test_owner;\n        condition\n        {\n            last_mod >= 0sec and type == \"file\"\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_Path_Name.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_purge_path\n{\n\tDefinition\n\t{\n\t    path == $MATCH_PATH1\n\t    and\n\t    owner == \"root\"\n\t}\n}\n\npurge_rules\n{\n    Policy purge_path\n    {\n        target_fileclass = test_purge_path;\n        condition\n        {\n            type == file\n            and\n            name == \"file.1\"\n        }\n    }\n\n    policy default\n    {\n       condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_Size.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_size\n{\n\tdefinition\n\t{\n\t    owner == \"root\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_size\n    {\n        target_fileclass = test_size;\n        condition\n        {\n            size >= 10KB\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeClass_Type.conf",
    "content": "%include \"common.conf\"\n\nFileClass test_type\n{\n\tdefinition\n\t{\n\t    type == \"file\"\n\t}\n}\n\npurge_rules\n{\n    policy purge_test_type\n    {\n        target_fileclass = test_type;\n        condition\n        {\n            last_mod >= 0sec\n        }\n    }\n\n    policy default\n    {\n        condition { last_mod > 1h }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeOST.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition { owner == \"root\" }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = OST_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_ExtendedAttribut.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition { xattr.user.foo == \"1\" }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_LastAccess.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition { type == file and last_access <= 10sec }\n    }\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            type == file\n            and\n            last_access <= 10sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_LastModification.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition { last_mod <= 30sec }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition\n        {\n            last_access >= 0sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_OST.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition { ost_pool == \"ost1\" }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = OST_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_Owner.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            owner == \"testuser\"\n        }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 1sec;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_Path_Name.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            path == $MATCH_PATH2\n        }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_Size.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            size >= 10KB\n        }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/PurgeStd_Type.conf",
    "content": "%include \"common.conf\"\n\npurge_rules\n{\n\tpolicy default\n\t{\n\t\tcondition\n        {\n            type == \"symlink\"\n        }\n    }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod >= 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\npurge_trigger\n{\n    trigger_on         = global_usage;\n    high_threshold_pct = 0%;\n    low_threshold_pct  = 0%;\n    check_interval     = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_Dircount.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to the number of data\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition{ dircount > 1 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_ExtendedAttribute.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to the attributes of data\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { xattr.user.foo == \"abc.[1-5].*\" }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_LastAccess.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to the last access\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { last_access > 10sec }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_LastModification.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to the last modification\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { last_mod > 10sec }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_Mixed.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to their name\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nfileclass empty_dir { definition{ type == directory and dircount == 0 } }\nfileclass dir1 { definition { type == directory and name == \"dir1\" } }\n\ndefine_policy rmdir\n{\n    scope { type == directory }\n    status_manager = none;\n    default_action = common.rmdir;\n    default_lru_sort_attr = last_mod;\n}\n\nrmdir_rules {\n    ignore { tree == $NO_RM_TREE }\n\n    rule rmdir_empty {\n        target_fileclass = empty_dir;\n        condition { last_mod > 10s }\n    }\n\n    rule rmdir_recurse {\n        target_fileclass = dir1;\n        action = cmd(\"rm -rf {fullpath}\");\n        condition { last_mod >= 0s }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_OST.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to their owner\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { ost_index = 1 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_Owner.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to their owner\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { owner == \"testuser\" }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingDir_Path_Name.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove directory in accordance to their name\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { name == \"dir1\" }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/RemovingEmptyDir.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# remove empty directory in accordance to a duration\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\nrmdir_recurse_rules\n{\n    rule default\n    {\n        condition { dircount == 0 and last_mod > 10sec }\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/TriggerPurge_GroupQuotaExceeded.conf",
    "content": "%include \"common.conf\"\n\nPurge_Trigger\n{\n    trigger_on = group_usage(root);\n    high_threshold_pct = 25%;\n    low_threshold_pct = 15%;\n    check_interval = 5min;\n}\n\npurge_rules { policy default { condition { last_mod >= 0 } } }\n"
  },
  {
    "path": "tests/test_suite/cfg/TriggerPurge_OstQuotaExceeded.conf",
    "content": "%include \"common.conf\"\n\nPurge_Trigger\n{\n    trigger_on = OST_usage;\n    high_threshold_pct = 75%;\n    low_threshold_pct = 65%;\n    check_interval = 5min;\n}\n\npurge_rules { policy default { condition { last_mod >= 0 } } }\n"
  },
  {
    "path": "tests/test_suite/cfg/TriggerPurge_QuotaExceeded.conf",
    "content": "%include \"common.conf\"\n\npurge_trigger\n{\n    trigger_on = global_usage;\n    high_threshold_pct = 75%;\n    low_threshold_pct = 74.5%;\n    check_interval = 5min;\n}\n\npurge_rules { policy default { condition { last_mod >= 0 } } }\n"
  },
  {
    "path": "tests/test_suite/cfg/TriggerPurge_UserQuotaExceeded.conf",
    "content": "%include \"common.conf\"\n\nPurge_Trigger\n{\n    trigger_on = user_usage(root);\n    high_threshold_pct = 25%;\n    low_threshold_pct = 15%;\n    check_interval = 5min;\n}\n\npurge_rules { policy default { condition { last_mod >= 0 } } }\n"
  },
  {
    "path": "tests/test_suite/cfg/acct.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/dev/null\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n    accounting = $ACCT_SWITCH;\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/alert.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# send alert in accordance to the number of data\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\n# alert policy definition\n%include \"../../../doc/templates/includes/alerts.inc\"\n\nfileclass nonempty_dir {\n    definition { type == directory and dircount > 1 }\n}\nfileclass extended_attribute {\n    definition { type == \"file\" and xattr.user.foo == \"abc.[1-5].*\" }\n}\nfileclass last_access_5s {\n    definition { type == \"file\" and last_access < 5s }\n}\nfileclass last_mod_5s {\n    definition {  type == \"file\" and last_mod < 5s }\n}\nfileclass root_owner {\n    definition { type == \"file\" and owner == \"root\" }\n}\nfileclass file1 {\n    definition { type == \"file\" and name == \"file.1\" }\n}\nfileclass size10k {\n    definition { type == \"file\" and size >= 10KB }\n}\nfileclass type_file {\n    definition { type == \"file\" }\n}\n\n### Alerts specification\nalert_rules {\n    ignore_fileclass = special;\n\n    rule raise_alert {\n        ## HERE, list all fileclasses that must raise an alert:\n        target_fileclass = $ALERT_CLASS;\n\n        # customize alert title:\n        action_params { title = \"entry matches '{fileclass}' ({rule})\"; }\n\n        # apply to all matching fileclasses in the policy scope\n        condition = true;\n    }\n\n    # clear alert status\n    rule default {\n        action = none;\n        action_params { alert = clear; }\n        # apply to all entries that don't match 'alert_check'\n        condition = true;\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/alert_ost.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n#-----------------------------------------------------\n# send alert in accordance to the number of data\n#-----------------------------------------------------\n\n%include \"common.conf\"\n\n# alert policy definition\n%include \"../../../doc/templates/includes/alerts.inc\"\n\nfileclass nonempty_dir {\n    definition { type == directory and dircount > 1 }\n}\nfileclass extended_attribute {\n    definition { type == \"file\" and xattr.user.foo == \"abc.[1-5].*\" }\n}\nfileclass last_access_1min {\n    definition { type == \"file\" and last_access < 1min }\n}\nfileclass last_mod_1min {\n    definition {  type == \"file\" and last_mod < 1min }\n}\nfileclass ost1 {\n    definition { type == \"file\" and ost_index == 1 }\n}\nfileclass root_owner {\n    definition { type == \"file\" and owner == \"root\" }\n}\nfileclass file1 {\n    definition { type == \"file\" and name == \"file.1\" }\n}\nfileclass size10k {\n    definition { type == \"file\" and size >= 10KB }\n}\nfileclass type_file {\n    definition { type == \"file\" }\n}\n\n### Alerts specification\nalert_rules {\n    rule raise_alert {\n        ## HERE, list all fileclasses that must raise an alert:\n        target_fileclass = $ALERT_CLASS;\n\n        # customize alert title:\n        action_params { title = \"entry matches '{fileclass}' ({rule})\"; }\n\n        # apply to all matching fileclasses in the policy scope\n        condition = true;\n    }\n\n    # clear alert status\n    rule default {\n        action = none;\n        action_params { alert = clear; }\n        # apply to all entries that don't match 'alert_check'\n        condition = true;\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/cl_batch.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n    uid_gid_as_numbers = $RBH_NUM_UIDGID;\n    last_access_only_atime = $RBH_TEST_LAST_ACCESS_ONLY_ATIME;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    queue_max_age = 5s;\n    queue_max_size = 1000;\n\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n    changelogs_file=\"/tmp/cl.dump\";\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = no;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\nfileclass special {\n\tdefinition { tree == \".shook\" }\n}\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n# always include rmdir policies (tested with all tests flavors)\n%include \"../../../doc/templates/includes/rmdir_old.inc\"\n"
  },
  {
    "path": "tests/test_suite/cfg/commit_update.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n    uid_gid_as_numbers = $RBH_NUM_UIDGID;\n    last_access_only_atime = $RBH_TEST_LAST_ACCESS_ONLY_ATIME;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n    queue_max_age = 1;\n\n    commit_update_max_delay = 3;                                                 \n    commit_update_max_delta = 5;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = no;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n# always include rmdir policies (tested with all tests flavors)\n%include \"../../../doc/templates/includes/rmdir_old.inc\"\n"
  },
  {
    "path": "tests/test_suite/cfg/common.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n    uid_gid_as_numbers = $RBH_NUM_UIDGID;\n    last_access_only_atime = $RBH_TEST_LAST_ACCESS_ONLY_ATIME;\n    lustre_projid = yes;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    queue_max_age = 1s;\n\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = no;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\nfileclass special {\n\tdefinition { tree == \".shook\" }\n}\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n# always include rmdir policies (tested with all tests flavors)\n%include \"../../../doc/templates/includes/rmdir_old.inc\"\n"
  },
  {
    "path": "tests/test_suite/cfg/common_noclass.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n    uid_gid_as_numbers = $RBH_NUM_UIDGID;\n    last_access_only_atime = $RBH_TEST_LAST_ACCESS_ONLY_ATIME;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    queue_max_age = 1s;\n\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = no;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n# always include rmdir policies (tested with all tests flavors)\n%include \"../../../doc/templates/includes/rmdir_old.inc\"\n"
  },
  {
    "path": "tests/test_suite/cfg/compress.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    compress = $compress;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    compress = $compress;\n    recovery_action = common.copy;\n}\n\n%include \"$RBH_TEST_POLICIES\"\n\nmigration_rules\n{\n    policy default\n    {\n        condition { last_access >= 0 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/info_collect.conf",
    "content": "%include \"common.conf\"\n\nEntryProcessor\n{\n\tmatch_classes = FALSE;\n}\n\nMigration_rules\n{\n    policy default\n    {\n        condition { last_access >= 0 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/info_collect2.conf",
    "content": "%include \"common.conf\"\n\nEntryProcessor\n{\n\tmatch_classes = TRUE;\n}\n\nMigration_rules\n{\n\tPolicy default\n\t{\n\t\tcondition { last_mod > 1h }\n\t}\n}\n\nPurge_rules\n{\n\tPolicy default\n\t{\n\t\tcondition { last_access > 1h }\n\t}\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = /tmp/test_log.1 ;\n\n    # File for reporting purge events\n    report_file = /tmp/test_report.1 ;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = /tmp/test_alert.1 ;\n\n    batch_alert_max = 1; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log1b.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = /tmp/test_log.1 ;\n\n    # File for reporting purge events\n    report_file = /tmp/test_report.1 ;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = /tmp/test_alert.1 ;\n\n    batch_alert_max = 10; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    syslog_facility = local1.info;\n\n    # Log file\n    log_file = syslog;\n\n    # File for reporting purge events\n    report_file = syslog;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = syslog;\n\n    batch_alert_max = 1; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log2b.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    syslog_facility = local1.info;\n\n    # Log file\n    log_file = syslog;\n\n    # File for reporting purge events\n    report_file = syslog;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = syslog;\n\n    batch_alert_max = 10; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log3.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stderr;\n\n    # File for reporting purge events\n    report_file = stdout;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = stdout;\n\n    batch_alert_max = 1; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log3b.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"log_common.conf\"\n\n# test log1: logging to files\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stderr;\n\n    # File for reporting purge events\n    report_file = stdout;\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = stdout;\n\n    batch_alert_max = 10; # no batching\n    alert_show_attrs = FALSE ; # no attrs\n}\n\nPurge_rules {\n    Policy default { condition { last_access >= 0s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/log_common.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n\t\tuser = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\nFS_Scan {\n\tscan_interval = 100;\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n# \"legacy policies\"\n%include \"$RBH_TEST_POLICIES\"\n# alert policy definition\n%include \"../../../doc/templates/includes/alerts.inc\"\n\n# define common alert rules for all log* tests\nfileclass file1 { definition { name == \"file.1\" } }\nfileclass file2 { definition { name == \"file.2\" } }\n\n### Alerts specification\nalert_rules {\n    # don't check entries more frequently than daily\n    ignore { last_check < 1d }\n    # don't check entries while they are modified\n    ignore { last_mod < 1h }\n\n    rule raise_alert {\n        ## HERE, list all fileclasses that must raise an alert:\n        target_fileclass = file1;\n        target_fileclass = file2;\n\n        # customize alert title:\n        action_params { title = \"entry matches '{fileclass}' ({rule})\"; }\n\n        # apply to all matching fileclasses in the policy scope\n        condition = true;\n    }\n\n    # clear alert status\n    rule default {\n        action = none;\n        action_params { alert = clear; }\n        # apply to all entries that don't match 'alert_check'\n        condition = true;\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_purge.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\trule default { condition { last_mod > 0 } }\n}\n\npurge_rules\n{\n\trule default { condition { last_mod > 0 } }\n}\n\npurge_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 2;\t\n\n\tlru_sort_attr = $SORT_PARAM;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_access.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { last_access > 29s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 5;\t\n\n\tlru_sort_attr = last_access;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_archive.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { last_archive == 0 or last_archive > 10s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 4;\t\n\n\tlru_sort_attr = last_archive;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_creat_last_arch.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { last_archive == 0 and creation > 19s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 5;\t\n\n\tlru_sort_attr = creation;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_creation.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { creation > 39s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 5;\t\n\n\tlru_sort_attr = creation;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_mod.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { last_mod > 24s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 3;\t\n\n\tlru_sort_attr = last_mod;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_mod_2pass.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { last_mod > 30s } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 5;\t\n\n\tlru_sort_attr = last_mod;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_size_asc.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { size >= 2M } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 3;\n\n\tmax_action_count = 4;\n\n\tlru_sort_attr = size(asc);\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/lru_sort_size_desc.conf",
    "content": "%include \"common.conf\"\n\nmigration_rules\n{\n\tpolicy default { condition { size >= 2M } }\n}\n\nmigration_parameters {\n\t# serialize processing to make the check easy in test output\n\tnb_threads = 1;\n\tqueue_size = 1;\n\n\t# set a small result size to check request continuation\n\tdb_result_size_max = 3;\n\n\tmax_action_count = 4;\n\n\tlru_sort_attr = size(desc);\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/migr_fail.conf",
    "content": "%include \"common.conf\"\n\nFileclass tofail { definition { name == \"*.fail\" } }\n\nmigration_parameters {\n    # suspend migration if error rate > 50% and nb errors > 5\n    suspend_error_pct = 50%;\n    suspend_error_min = 5;\n\n    max_action_count = 10;\n\n\tlru_sort_attr = creation;\n\n    action = cmd(\"./cfg/migr_failer.sh {arg}\");\n    action_params { arg = \"ok\"; }\n}\n\nmigration_rules {\n\trule fail_me {\n                     target_fileclass = tofail;\n                     condition { creation > 1s }\n                     action_params { arg = \"fail\"; }\n                 }\n\trule default { condition { creation > 1s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/migr_failer.sh",
    "content": "#!/bin/bash\n\nhints=$1\n\n[ \"$hints\" = \"fail\" ] && exit 1\nexit 0\n"
  },
  {
    "path": "tests/test_suite/cfg/overflow.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n    fs_key = $FS_KEY ;\n}\n\nListManager\n{\n    MySQL\n    {\n        server = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n        # password or password_file are mandatory\n        password = \"robinhood\";\n    }\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/rbh25.sql",
    "content": "-- MySQL dump 10.13  Distrib 5.1.73, for redhat-linux-gnu (x86_64)\n--\n-- Host: localhost    Database: robinhood_lustre\n-- ------------------------------------------------------\n-- Server version\t5.1.73\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n/*!40103 SET TIME_ZONE='+00:00' */;\n/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;\n/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n\n--\n-- Table structure for table `ACCT_STAT`\n--\n\nDROP TABLE IF EXISTS `ACCT_STAT`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `ACCT_STAT` (\n  `owner` varchar(127) NOT NULL DEFAULT '',\n  `gr_name` varchar(127) NOT NULL DEFAULT '',\n  `type` enum('symlink','dir','file','chr','blk','fifo','sock') NOT NULL DEFAULT 'symlink',\n  `status` int(11) NOT NULL DEFAULT '0',\n  `size` bigint(20) unsigned DEFAULT NULL,\n  `blocks` bigint(20) unsigned DEFAULT NULL,\n  `count` bigint(20) unsigned DEFAULT NULL,\n  `sz0` bigint(20) unsigned DEFAULT '0',\n  `sz1` bigint(20) unsigned DEFAULT '0',\n  `sz32` bigint(20) unsigned DEFAULT '0',\n  `sz1K` bigint(20) unsigned DEFAULT '0',\n  `sz32K` bigint(20) unsigned DEFAULT '0',\n  `sz1M` bigint(20) unsigned DEFAULT '0',\n  `sz32M` bigint(20) unsigned DEFAULT '0',\n  `sz1G` bigint(20) unsigned DEFAULT '0',\n  `sz32G` bigint(20) unsigned DEFAULT '0',\n  `sz1T` bigint(20) unsigned DEFAULT '0',\n  PRIMARY KEY (`owner`,`gr_name`,`type`,`status`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `ACCT_STAT`\n--\n\nLOCK TABLES `ACCT_STAT` WRITE;\n/*!40000 ALTER TABLE `ACCT_STAT` DISABLE KEYS */;\nINSERT INTO `ACCT_STAT` VALUES ('root','root','file',1,0,0,0,0,0,0,0,0,0,0,0,0,0);\n/*!40000 ALTER TABLE `ACCT_STAT` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `ANNEX_INFO`\n--\n\nDROP TABLE IF EXISTS `ANNEX_INFO`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `ANNEX_INFO` (\n  `id` varchar(64) NOT NULL,\n  `creation_time` int(10) unsigned DEFAULT NULL,\n  `last_archive` int(10) unsigned DEFAULT NULL,\n  `last_restore` int(10) unsigned DEFAULT NULL,\n  `link` text,\n  `archive_id` int(10) unsigned DEFAULT NULL,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `ANNEX_INFO`\n--\n\nLOCK TABLES `ANNEX_INFO` WRITE;\n/*!40000 ALTER TABLE `ANNEX_INFO` DISABLE KEYS */;\n/*!40000 ALTER TABLE `ANNEX_INFO` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `ENTRIES`\n--\n\nDROP TABLE IF EXISTS `ENTRIES`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `ENTRIES` (\n  `id` varchar(64) NOT NULL,\n  `owner` varchar(127) DEFAULT 'unknown',\n  `gr_name` varchar(127) DEFAULT 'unknown',\n  `size` bigint(20) unsigned DEFAULT NULL,\n  `blocks` bigint(20) unsigned DEFAULT NULL,\n  `last_access` int(10) unsigned DEFAULT NULL,\n  `last_mod` int(10) unsigned DEFAULT NULL,\n  `type` enum('symlink','dir','file','chr','blk','fifo','sock') DEFAULT NULL,\n  `mode` smallint(5) unsigned DEFAULT NULL,\n  `nlink` int(10) unsigned DEFAULT NULL,\n  `status` int(11) DEFAULT '0',\n  `md_update` int(10) unsigned DEFAULT NULL,\n  `no_release` tinyint(1) DEFAULT NULL,\n  `no_archive` tinyint(1) DEFAULT NULL,\n  `archive_class` varchar(127) DEFAULT NULL,\n  `arch_cl_update` int(10) unsigned DEFAULT NULL,\n  `release_class` varchar(127) DEFAULT NULL,\n  `rel_cl_update` int(10) unsigned DEFAULT NULL,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `ENTRIES`\n--\n\nLOCK TABLES `ENTRIES` WRITE;\n/*!40000 ALTER TABLE `ENTRIES` DISABLE KEYS */;\n/*!40000 ALTER TABLE `ENTRIES` ENABLE KEYS */;\nUNLOCK TABLES;\n/*!50003 SET @saved_cs_client      = @@character_set_client */ ;\n/*!50003 SET @saved_cs_results     = @@character_set_results */ ;\n/*!50003 SET @saved_col_connection = @@collation_connection */ ;\n/*!50003 SET character_set_client  = latin1 */ ;\n/*!50003 SET character_set_results = latin1 */ ;\n/*!50003 SET collation_connection  = latin1_swedish_ci */ ;\n/*!50003 SET @saved_sql_mode       = @@sql_mode */ ;\n/*!50003 SET sql_mode              = '' */ ;\nDELIMITER ;;\n/*!50003 CREATE*/ /*!50017 DEFINER=`robinhood`@`localhost`*/ /*!50003 TRIGGER ACCT_ENTRY_INSERT AFTER INSERT ON ENTRIES FOR EACH ROW BEGIN DECLARE val BIGINT UNSIGNED; SET val=FLOOR(LOG2(NEW.size)/5);INSERT INTO ACCT_STAT(owner,gr_name,type,status,size,blocks, count, sz0, sz1, sz32, sz1K, sz32K, sz1M, sz32M, sz1G, sz32G, sz1T) VALUES (NEW.owner,NEW.gr_name,NEW.type,NEW.status,NEW.size,NEW.blocks, 1, NEW.size=0, IFNULL(val=0,0), IFNULL(val=1,0), IFNULL(val=2,0), IFNULL(val=3,0), IFNULL(val=4,0), IFNULL(val=5,0), IFNULL(val=6,0), IFNULL(val=7,0), IFNULL(val>=8,0)) ON DUPLICATE KEY UPDATE size=CAST(size as SIGNED)+CAST(NEW.size as SIGNED) , blocks=CAST(blocks as SIGNED)+CAST(NEW.blocks as SIGNED) , count=count+1, sz0=CAST(sz0 as SIGNED)+CAST((NEW.size=0) as SIGNED), sz1=CAST(sz1 as SIGNED)+CAST(IFNULL(val=0,0) as SIGNED), sz32=CAST(sz32 as SIGNED)+CAST(IFNULL(val=1,0) as SIGNED), sz1K=CAST(sz1K as SIGNED)+CAST(IFNULL(val=2,0) as SIGNED), sz32K=CAST(sz32K as SIGNED)+CAST(IFNULL(val=3,0) as SIGNED), sz1M=CAST(sz1M as SIGNED)+CAST(IFNULL(val=4,0) as SIGNED), sz32M=CAST(sz32M as SIGNED)+CAST(IFNULL(val=5,0) as SIGNED), sz1G=CAST(sz1G as SIGNED)+CAST(IFNULL(val=6,0) as SIGNED), sz32G=CAST(sz32G as SIGNED)+CAST(IFNULL(val=7,0) as SIGNED), sz1T=CAST(sz1T as SIGNED)+CAST(IFNULL(val>=8,0) as SIGNED); END */;;\nDELIMITER ;\n/*!50003 SET sql_mode              = @saved_sql_mode */ ;\n/*!50003 SET character_set_client  = @saved_cs_client */ ;\n/*!50003 SET character_set_results = @saved_cs_results */ ;\n/*!50003 SET collation_connection  = @saved_col_connection */ ;\n/*!50003 SET @saved_cs_client      = @@character_set_client */ ;\n/*!50003 SET @saved_cs_results     = @@character_set_results */ ;\n/*!50003 SET @saved_col_connection = @@collation_connection */ ;\n/*!50003 SET character_set_client  = latin1 */ ;\n/*!50003 SET character_set_results = latin1 */ ;\n/*!50003 SET collation_connection  = latin1_swedish_ci */ ;\n/*!50003 SET @saved_sql_mode       = @@sql_mode */ ;\n/*!50003 SET sql_mode              = '' */ ;\nDELIMITER ;;\n/*!50003 CREATE*/ /*!50017 DEFINER=`robinhood`@`localhost`*/ /*!50003 TRIGGER ACCT_ENTRY_UPDATE AFTER UPDATE ON ENTRIES FOR EACH ROW BEGIN DECLARE val_old, val_new BIGINT UNSIGNED;SET val_old=FLOOR(LOG2(OLD.size)/5); SET val_new=FLOOR(LOG2(NEW.size)/5);\nIF NEW.owner=OLD.owner AND NEW.gr_name=OLD.gr_name AND NEW.type=OLD.type AND NEW.status=OLD.status THEN \n\t IF NEW.size<>OLD.size OR NEW.blocks<>OLD.blocks THEN \n\t\t UPDATE ACCT_STAT SET  size=size+CAST(NEW.size as SIGNED)-CAST(OLD.size as SIGNED) , blocks=blocks+CAST(NEW.blocks as SIGNED)-CAST(OLD.blocks as SIGNED) , sz0=CAST(sz0 as SIGNED)-CAST(((OLD.size=0)+(NEW.size=0)) as SIGNED), sz1=CAST(sz1 as SIGNED)-CAST(IFNULL(val_old=0,0) as SIGNED)+CAST(IFNULL(val_new=0,0) as SIGNED), sz32=CAST(sz32 as SIGNED)-CAST(IFNULL(val_old=1,0) as SIGNED)+CAST(IFNULL(val_new=1,0) as SIGNED), sz1K=CAST(sz1K as SIGNED)-CAST(IFNULL(val_old=2,0) as SIGNED)+CAST(IFNULL(val_new=2,0) as SIGNED), sz32K=CAST(sz32K as SIGNED)-CAST(IFNULL(val_old=3,0) as SIGNED)+CAST(IFNULL(val_new=3,0) as SIGNED), sz1M=CAST(sz1M as SIGNED)-CAST(IFNULL(val_old=4,0) as SIGNED)+CAST(IFNULL(val_new=4,0) as SIGNED), sz32M=CAST(sz32M as SIGNED)-CAST(IFNULL(val_old=5,0) as SIGNED)+CAST(IFNULL(val_new=5,0) as SIGNED), sz1G=CAST(sz1G as SIGNED)-CAST(IFNULL(val_old=6,0) as SIGNED)+CAST(IFNULL(val_new=6,0) as SIGNED), sz32G=CAST(sz32G as SIGNED)-CAST(IFNULL(val_old=7,0) as SIGNED)+CAST(IFNULL(val_new=7,0) as SIGNED), sz1T=CAST(sz1T as SIGNED)-CAST(IFNULL(val_old>=8,0) as SIGNED)+CAST(IFNULL(val_new>=8,0) as SIGNED) WHERE owner=NEW.owner AND gr_name=NEW.gr_name AND type=NEW.type AND status=NEW.status ; \n\t END IF; \nELSEIF NEW.owner<>OLD.owner OR NEW.gr_name<>OLD.gr_name OR NEW.type<>OLD.type OR NEW.status<>OLD.status THEN \n\tINSERT INTO ACCT_STAT(owner,gr_name,type,status,size,blocks, count, sz0, sz1, sz32, sz1K, sz32K, sz1M, sz32M, sz1G, sz32G, sz1T) VALUES (NEW.owner,NEW.gr_name,NEW.type,NEW.status,NEW.size,NEW.blocks, 1, NEW.size=0, IFNULL(val_new=0,0), IFNULL(val_new=1,0), IFNULL(val_new=2,0), IFNULL(val_new=3,0), IFNULL(val_new=4,0), IFNULL(val_new=5,0), IFNULL(val_new=6,0), IFNULL(val_new=7,0), IFNULL(val_new>=8,0)) \n\tON DUPLICATE KEY UPDATE size=CAST(size as SIGNED)+CAST(NEW.size as SIGNED) , blocks=CAST(blocks as SIGNED)+CAST(NEW.blocks as SIGNED) , count=count+1, sz0=CAST(sz0 as SIGNED)+CAST((NEW.size=0) as SIGNED), sz1=CAST(sz1 as SIGNED)+CAST(IFNULL(val_new=0,0) as SIGNED), sz32=CAST(sz32 as SIGNED)+CAST(IFNULL(val_new=1,0) as SIGNED), sz1K=CAST(sz1K as SIGNED)+CAST(IFNULL(val_new=2,0) as SIGNED), sz32K=CAST(sz32K as SIGNED)+CAST(IFNULL(val_new=3,0) as SIGNED), sz1M=CAST(sz1M as SIGNED)+CAST(IFNULL(val_new=4,0) as SIGNED), sz32M=CAST(sz32M as SIGNED)+CAST(IFNULL(val_new=5,0) as SIGNED), sz1G=CAST(sz1G as SIGNED)+CAST(IFNULL(val_new=6,0) as SIGNED), sz32G=CAST(sz32G as SIGNED)+CAST(IFNULL(val_new=7,0) as SIGNED), sz1T=CAST(sz1T as SIGNED)+CAST(IFNULL(val_new>=8,0) as SIGNED);\n\tUPDATE ACCT_STAT SET size=CAST(size as SIGNED)-CAST(OLD.size as SIGNED) , blocks=CAST(blocks as SIGNED)-CAST(OLD.blocks as SIGNED) , count=count-1 , sz0=CAST(sz0 as SIGNED)-CAST((OLD.size=0) as SIGNED), sz1=CAST(sz1 as SIGNED)-CAST(IFNULL(val_old=0,0) as SIGNED), sz32=CAST(sz32 as SIGNED)-CAST(IFNULL(val_old=1,0) as SIGNED), sz1K=CAST(sz1K as SIGNED)-CAST(IFNULL(val_old=2,0) as SIGNED), sz32K=CAST(sz32K as SIGNED)-CAST(IFNULL(val_old=3,0) as SIGNED), sz1M=CAST(sz1M as SIGNED)-CAST(IFNULL(val_old=4,0) as SIGNED), sz32M=CAST(sz32M as SIGNED)-CAST(IFNULL(val_old=5,0) as SIGNED), sz1G=CAST(sz1G as SIGNED)-CAST(IFNULL(val_old=6,0) as SIGNED), sz32G=CAST(sz32G as SIGNED)-CAST(IFNULL(val_old=7,0) as SIGNED), sz1T=CAST(sz1T as SIGNED)-CAST(IFNULL(val_old>=8,0) as SIGNED) WHERE owner=OLD.owner AND gr_name=OLD.gr_name AND type=OLD.type AND status=OLD.status ;\nEND IF;\n END */;;\nDELIMITER ;\n/*!50003 SET sql_mode              = @saved_sql_mode */ ;\n/*!50003 SET character_set_client  = @saved_cs_client */ ;\n/*!50003 SET character_set_results = @saved_cs_results */ ;\n/*!50003 SET collation_connection  = @saved_col_connection */ ;\n/*!50003 SET @saved_cs_client      = @@character_set_client */ ;\n/*!50003 SET @saved_cs_results     = @@character_set_results */ ;\n/*!50003 SET @saved_col_connection = @@collation_connection */ ;\n/*!50003 SET character_set_client  = latin1 */ ;\n/*!50003 SET character_set_results = latin1 */ ;\n/*!50003 SET collation_connection  = latin1_swedish_ci */ ;\n/*!50003 SET @saved_sql_mode       = @@sql_mode */ ;\n/*!50003 SET sql_mode              = '' */ ;\nDELIMITER ;;\n/*!50003 CREATE*/ /*!50017 DEFINER=`robinhood`@`localhost`*/ /*!50003 TRIGGER ACCT_ENTRY_DELETE BEFORE DELETE ON ENTRIES FOR EACH ROW BEGIN DECLARE val BIGINT UNSIGNED; SET val=FLOOR(LOG2(OLD.size)/5);UPDATE ACCT_STAT SET size=CAST(size as SIGNED)-CAST(OLD.size as SIGNED) , blocks=CAST(blocks as SIGNED)-CAST(OLD.blocks as SIGNED) , count=count-1, sz0=CAST(sz0 as SIGNED)-CAST((OLD.size=0) as SIGNED), sz1=CAST(sz1 as SIGNED)-CAST(IFNULL(val=0,0) as SIGNED), sz32=CAST(sz32 as SIGNED)-CAST(IFNULL(val=1,0) as SIGNED), sz1K=CAST(sz1K as SIGNED)-CAST(IFNULL(val=2,0) as SIGNED), sz32K=CAST(sz32K as SIGNED)-CAST(IFNULL(val=3,0) as SIGNED), sz1M=CAST(sz1M as SIGNED)-CAST(IFNULL(val=4,0) as SIGNED), sz32M=CAST(sz32M as SIGNED)-CAST(IFNULL(val=5,0) as SIGNED), sz1G=CAST(sz1G as SIGNED)-CAST(IFNULL(val=6,0) as SIGNED), sz32G=CAST(sz32G as SIGNED)-CAST(IFNULL(val=7,0) as SIGNED), sz1T=CAST(sz1T as SIGNED)-CAST(IFNULL(val>=8,0) as SIGNED) WHERE owner=OLD.owner AND gr_name=OLD.gr_name AND type=OLD.type AND status=OLD.status ; END */;;\nDELIMITER ;\n/*!50003 SET sql_mode              = @saved_sql_mode */ ;\n/*!50003 SET character_set_client  = @saved_cs_client */ ;\n/*!50003 SET character_set_results = @saved_cs_results */ ;\n/*!50003 SET collation_connection  = @saved_col_connection */ ;\n\n--\n-- Table structure for table `NAMES`\n--\n\nDROP TABLE IF EXISTS `NAMES`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `NAMES` (\n  `id` varchar(64) DEFAULT NULL,\n  `pkn` varchar(40) NOT NULL,\n  `parent_id` varchar(64) DEFAULT NULL,\n  `name` varchar(255) DEFAULT NULL,\n  `path_update` int(10) unsigned DEFAULT NULL,\n  PRIMARY KEY (`pkn`),\n  KEY `parent_id_index` (`parent_id`),\n  KEY `id_index` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `NAMES`\n--\n\nLOCK TABLES `NAMES` WRITE;\n/*!40000 ALTER TABLE `NAMES` DISABLE KEYS */;\n/*!40000 ALTER TABLE `NAMES` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `SOFT_RM`\n--\n\nDROP TABLE IF EXISTS `SOFT_RM`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `SOFT_RM` (\n  `fid` varchar(64) NOT NULL,\n  `fullpath` varchar(4095) DEFAULT NULL,\n  `soft_rm_time` int(10) unsigned DEFAULT NULL,\n  `real_rm_time` int(10) unsigned DEFAULT NULL,\n  `archive_id` int(10) unsigned DEFAULT NULL,\n  PRIMARY KEY (`fid`),\n  KEY `rm_time` (`real_rm_time`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `SOFT_RM`\n--\n\nLOCK TABLES `SOFT_RM` WRITE;\n/*!40000 ALTER TABLE `SOFT_RM` DISABLE KEYS */;\nINSERT INTO `SOFT_RM` VALUES ('0x200000400:0x1:0x0','/mnt/lustre/file.1',1467228579,1467228609,0);\n/*!40000 ALTER TABLE `SOFT_RM` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `STRIPE_INFO`\n--\n\nDROP TABLE IF EXISTS `STRIPE_INFO`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `STRIPE_INFO` (\n  `id` varchar(64) NOT NULL,\n  `validator` int(11) DEFAULT NULL,\n  `stripe_count` int(10) unsigned DEFAULT NULL,\n  `stripe_size` int(10) unsigned DEFAULT NULL,\n  `pool_name` varchar(16) DEFAULT NULL,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `STRIPE_INFO`\n--\n\nLOCK TABLES `STRIPE_INFO` WRITE;\n/*!40000 ALTER TABLE `STRIPE_INFO` DISABLE KEYS */;\n/*!40000 ALTER TABLE `STRIPE_INFO` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `STRIPE_ITEMS`\n--\n\nDROP TABLE IF EXISTS `STRIPE_ITEMS`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `STRIPE_ITEMS` (\n  `id` varchar(64) DEFAULT NULL,\n  `stripe_index` int(10) unsigned DEFAULT NULL,\n  `ostidx` int(10) unsigned DEFAULT NULL,\n  `details` binary(20) DEFAULT NULL,\n  KEY `id_index` (`id`),\n  KEY `st_index` (`ostidx`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `STRIPE_ITEMS`\n--\n\nLOCK TABLES `STRIPE_ITEMS` WRITE;\n/*!40000 ALTER TABLE `STRIPE_ITEMS` DISABLE KEYS */;\n/*!40000 ALTER TABLE `STRIPE_ITEMS` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `VARS`\n--\n\nDROP TABLE IF EXISTS `VARS`;\n/*!40101 SET @saved_cs_client     = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `VARS` (\n  `varname` varchar(255) NOT NULL,\n  `value` text,\n  PRIMARY KEY (`varname`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `VARS`\n--\n\nLOCK TABLES `VARS` WRITE;\n/*!40000 ALTER TABLE `VARS` DISABLE KEYS */;\nINSERT INTO `VARS` VALUES ('ChangelogLastCommit_MDT0000','9'),('FS_Path','/mnt/lustre'),('VersionFunctionSet','1.1'),('VersionTriggerSet','1.1');\n/*!40000 ALTER TABLE `VARS` ENABLE KEYS */;\nUNLOCK TABLES;\n/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n\n-- Dump completed on 2016-06-29 21:30:38\n"
  },
  {
    "path": "tests/test_suite/cfg/rmdir.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: directory removal\n\n%include \"common.conf\"\n\nfileclass remove_it {\n    definition { path == $MATCH_PATH }\n}\n\nrmdir_empty_rules\n{\n    ignore_fileclass = special;\n\n    rule default {\n        condition { last_mod > 10s }\n    }\n}\n\nrmdir_recurse_rules {\n    ignore_fileclass = special;\n\n    rule recursive_rmdir\n    {\n        target_fileclass = remove_it;\n        condition { last_mod > 5s }\n    }\n}\n\nrmdir_empty_parameters { nb_threads = 1; }\nrmdir_recurse_parameters { nb_threads = 1; }\n"
  },
  {
    "path": "tests/test_suite/cfg/test1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration rule based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition { last_mod > 5sec }\n        action_params { archive_id=1; }\n    }\n}\n\n######## most basic space release rule ##########\n\npurge_rules\n{\n    rule default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: migration policy based on last_mod and filename\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition\n        {\n            last_mod > 5sec\n            and\n            name == \"*[1-5]\"\n        }\n        action_params { archive_id=1; }\n    }\n}\n\n######## most basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test3.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on filename\n#       - Migration action params\n#       - Migration rule using fileclasses\n\n%include \"common.conf\"\n\nFileClass even_files\n{\n        definition\n        {\n                name == \"*[02468]\"\n        }\n        migration_action_params {\n            case = \"{rule}\";\n            archive_id = 1;\n        }\n}\n\nFileClass odd_files\n{\n        definition\n        {\n                name == \"*[13579]\"\n        }\n        migration_action_params {\n            case = \"{rule}\";\n            archive_id = 1;\n        }\n}\n\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    # migrate even files 5s after they have been modified\n    rule even_migr\n    {\n        target_fileclass = even_files;\n        condition {\n                last_mod > 5s\n        }\n    }\n\n    # migrate odd files 10s after they have been modified\n    rule odd_migr\n    {\n        target_fileclass = odd_files;\n        condition {\n                last_mod > 10s\n        }\n    }\n\n    # migrate other files after 15s\n    rule default\n    {\n        condition {\n                last_mod > 15s\n        }\n        action_params {archive_id = 1;}\n    }\n\n}\n\n######## most basic space release rule ##########\n\npurge_rules\n{\n    rule default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test_action_params.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"common.conf\"\n\nfileclass class1a {\n    definition { name == \"*1a\" }\n    migration_action_params { prio = 4; }\n}\nfileclass class1b {\n    definition { name == \"*1b\" }\n    migration_action_params { prio = 5; }\n    purge_action_params { arg = 55; }\n}\n\nfileclass class2 {\n    definition { name == \"*2\" }\n    migration_action_params { cos = 4; }\n    purge_action_params { grouping = \"{parent_fid}\"; }\n}\nfileclass class3 {\n    definition { name == \"*3\" }\n}\n\nmigration_parameters {\n    # override policy default_action with a cmd\n    action = cmd(\"lfs hsm_archive -a '{archive_id}' '/mnt/lustre/.lustre/fid/{fid}' --data 'cos={cos}'\");\n    # default action params for the policy\n    action_params {\n        archive_id = 1;\n        cos = 1;\n        status = \"{lhsm.status}\";\n        status2 = \"{status}\";\n        prev_arch = \"{lhsm.archive_id}\";\n        prev_arch2 = \"{archive_id}\";\n    }\n}\n\nmigration_rules {\n    rule migr1 {\n        target_fileclass = class1a;\n        target_fileclass = class1b;\n        condition { last_mod > 1 }\n        # rule-specific action cmd\n        action = cmd(\"lfs hsm_archive -a '{archive_id}' '{fullpath}' --data 'cos={cos},class={fileclass}'\");\n        action_params { cos = 2; }\n    }\n\n    rule migr2 {\n        target_fileclass = class2;\n        condition { last_mod > 1 }\n        # rule-specific action function\n        action = lhsm.archive;\n        action_params { cos = 3; }\n    }\n\n    rule migr3 {\n        target_fileclass = class3;\n        condition { last_mod > 1 }\n        # use default policy action\n        action_params {\n            archive_id = 2; # override policy default\n            mode = over1; # override trigger param\n        }\n    }\n\n    rule default {\n        condition { last_mod > 1 }\n        action_params {\n            mode = over2; # override trigger param\n        }\n    }\n}\n\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 10s;\n    action_params { mode = trigger; }\n}\n\npurge_parameters {\n    # keep default policy action\n    # default action params\n    action_params { arg = 1; }\n}\n\npurge_rules {\n    rule purge1 {\n        target_fileclass = class1a;\n        target_fileclass = class1b;\n        action = cmd(\"rm -f '{fullpath}'\");\n        action_params { arg = 2; }\n        condition { last_access > 1 }\n    }\n    rule purge2 {\n        target_fileclass = class2;\n        action = cmd(\"echo '{fid}' '{rule}' '{parent_fid}' '{arg}' >> /tmp/purge.log\");\n        action_params { arg = 3; }\n        condition { last_access > 1 }\n    }\n    rule purge3 {\n        target_fileclass = class3;\n        condition { last_access > 1 }\n    }\n    rule default {\n        action = cmd(\"echo '{fid}' '{rule}' '{arg}' >> /tmp/purge.log\");\n        action_params { arg = 4; }\n        condition { last_access > 1 }\n    }\n}\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger {\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n\n    action_params {\n        arg = 66;\n        mode = trigger;\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_basic.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy touch {\n     status_manager = basic;\n     scope { type == file and status != 'ok' }\n     default_action = cmd(\"touch {fullpath}\");\n     default_lru_sort_attr = none;\n}\n\nfileclass tofail { definition { name == \"*.fail\" } }\n\ntouch_rules {\n\n      rule fail {\n        target_fileclass = tofail;\n        action = cmd(\"./cfg/migr_failer.sh fail\");\n\n        condition  { last_mod < 1h } \n      }\n\n      rule default {\n         condition { last_mod < 1h }\n      }\n}\n\ntouch_trigger {\n      trigger_on = scheduled;\n      check_interval = 5min;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test_check_migr.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - check on_event_periodic path update when processing changelogs\n#       - check on_event_periodic md update when processing changelogs\n#       - check periodic fileclass matching\n\n%include \"common.conf\"\n\ndb_update_params\n{\n    path_update = on_event_periodic(5s,30s);\n    md_update   = on_event_periodic(5s,30s);\n    fileclass_update = periodic(10s);\n}\n\nfileclass to_be_ignored\n{\n    definition { name == \"ign*\"}\n}\n\nfileclass to_be_migr\n{\n    definition { name == \"migr*\"}\n}\n\nfileclass to_be_released\n{\n    definition { name == \"purg*\"}\n}\n\nmigration_rules\n{\n   ignore { name == \"whitelist*\" }\n   ignore_fileclass = to_be_ignored;\n\n   policy migr_match\n   {\n        target_fileclass = to_be_migr;\n        condition { last_mod >= 0 }\n   }\n\n   policy default\n   {\n        condition { last_mod >= 0 }\n   }\n}\n\npurge_rules\n{\n   policy default\n   {\n        condition { last_access >= 0 }\n   }\n}\n\nEntryProcessor\n{\n    match_classes = TRUE;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_check_purge.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - check on_event_periodic path update when processing changelogs\n#       - check on_event_periodic md update when processing changelogs\n#       - check periodic fileclass matching\n\n%include \"common.conf\"\n\ndb_update_params\n{\n    path_update = on_event_periodic(5s,30s);\n    md_update   = on_event_periodic(5s,30s);\n    fileclass_update = periodic(10s);\n}\n\nfileclass to_be_ignored\n{\n    definition { name == \"ign*\"}\n}\n\nfileclass to_be_migr\n{\n    definition { name == \"migr*\"}\n}\n\nfileclass to_be_released\n{\n    definition { name == \"purg*\"}\n}\n\nmigration_rules\n{\n   policy default\n   {\n        condition { last_mod >= 0 }\n   }\n}\n\npurge_rules\n{\n   ignore { name == \"whitelist*\" }\n   ignore_fileclass = to_be_ignored;\n\n   policy purge_match\n   {\n        target_fileclass = to_be_released;\n        condition { last_access >= 0 }\n   }\n\n   policy default\n   {\n        condition { last_access >= 0 }\n   }\n}\n\nEntryProcessor\n{\n    match_classes = TRUE;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_checker.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n%include \"../../../doc/templates/includes/check.inc\"\n\nfileclass never_checked {\n    definition { checksum.last_check == 0 or checksum.output == \"\" }\n}\n\nchecksum_parameters {\n    # override default command path\n    action = cmd(\"rbh_cksum.sh '{output}' '{path}'\");\n}\n\nchecksum_rules {\n    ignore { last_check < 5 }\n    ignore { last_mod < 5 }\n\n    rule initial_check {\n        target_fileclass = never_checked;\n        condition { last_mod >= 5 }\n    }\n\n    rule default {\n       condition { last_mod >= 5 and last_check >= 5 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_checker_invert.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# invert common and checker definitions to test listmgr_init behavior in that case\n%include \"../../../doc/templates/includes/check.inc\"\n%include \"common.conf\"\n\nfileclass never_checked {\n    definition { checksum.last_check == 0 or checksum.output == \"\" }\n}\n\nchecksum_parameters {\n    # override default command path\n    action = cmd(\"rbh_cksum.sh '{output}' '{path}'\");\n}\n\nchecksum_rules {\n    ignore { last_check < 5 }\n    ignore { last_mod < 5 }\n\n    rule initial_check {\n        target_fileclass = never_checked;\n        condition { last_mod >= 5 }\n    }\n\n    rule default {\n       condition { last_mod >= 5 and last_check >= 5 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_completion.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\nFS_Scan\n{\n    completion_command = $TEST_CMD;\n}\n\n%include \"$RBH_TEST_POLICIES\"\n"
  },
  {
    "path": "tests/test_suite/cfg/test_copy.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\n#define a custom policy to backup files\ndefine_policy copy {\n     status_manager = basic;\n     scope { type == file and status != 'ok' }\n     default_action = common.copy;\n     default_lru_sort_attr = none;\n}\n\nfileclass f1 {definition { name == \"*.1\" }}\nfileclass f2 {definition { name == \"*.2\" }}\nfileclass f3 {definition { name == \"*.3\" }}\nfileclass f5 {definition { name == \"*.5\" }}\n\ncopy_rules {\n\trule copy_compress {\n\t\ttarget_fileclass = f1;\n\t\taction_params {\n\t\t\tcompress = yes;\n\t\t\ttargetpath = \"{path}.gz\";\n\t\t}\n\t\tcondition = true;\n\t}\n\n\trule copy_mkdir {\n\t\ttarget_fileclass = f2;\n\t\taction_params {\n\t\t\tmkdir = yes;\n\t\t\ttargetpath = \"{fsroot}/backup/{fid}/{name}\";\n\t\t}\n\t\tcondition = true;\n\t}\n\n\trule copy_link_to_dir {\n\t\ttarget_fileclass = f5;\n\t\taction_params {\n\t\t\tmkdir = yes;\n\t\t\ttargetpath = \"{fsroot}/one_link/{fid}/{name}\";\n\t\t}\n\t\tcondition = true;\n\t}\n\n\trule copy_nomkdir {\n\t\t# this should fail 'no mkdir'\n\t\ttarget_fileclass = f3;\n\t\taction_params {\n\t\t\tmkdir = no;\n\t\t\ttargetpath = \"{fsroot}/backup/{fid}/{name}\";\n\t\t}\n\t\tcondition = true;\n\t}\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_custom_purge.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\n######## basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than 1s\n        condition\n        {\n            last_access > 1s\n        }\n    }\n}\n\npurge_parameters\n{\n    action = cmd(\"./rm_script {fsname} {fid} {path}\");\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test_default_case.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nFileclass A_files { definition { name == \"*.A\" } }\nFileclass B_files { definition { name == \"*.B\" } }\n\nFileclass X_files { definition { name == \"X*\" } }\nFileclass Y_files { definition { name == \"Y*\" } }\n\nmigration_rules\n{\n    ignore_fileclass = A_files;\n\n    policy migr_B {\n        target_fileclass = B_files;\n        condition { last_mod > 1s }\n    }\n\n    # default: no migration\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\n######## basic space release policy ##########\n\npurge_rules\n{\n    ignore_fileclass = X_files;\n    \n    policy purge_Y {\n        target_fileclass = Y_files;\n        condition { last_access > 1s }\n    }\n\n    # default: no purge\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_fileclass.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on filename\n#       - Migration params\n#       - Migration policy using filesets\n\n%include \"common.conf\"\n\nFileClass even_files\n{\n    Definition { name == \"*[02468]\" }\n    migration_action_params {class=\"{fileclass}\";}\n    report = no;\n}\n\nFileClass odd_files\n{\n    Definition { name == \"*[13579]\" }\n    migration_action_params {class=\"{fileclass}\";}\n    report = no;\n}\n\nFileClass in_dir_A\n{\n    definition { tree == \"/mnt/lustre/dir_A/\" }\n    migration_action_params {class=\"{fileclass}\" ; archive_id=1;}\n    report = no;\n}\n\nFileClass in_dir_B\n{\n    definition { tree == \"/mnt/lustre/dir_B\" }\n    migration_action_params {class=\"{fileclass}\"; archive_id = 1; }\n    report = no;\n}\n\nFileClass odd_or_A\n{\n    definition { odd_files UNION in_dir_A }\n    migration_action_params {class=\"{fileclass}\"; archive_id = 1; }\n}\n\nFileClass even_and_B\n{\n    definition { even_files INTER in_dir_B }\n    migration_action_params {class=\"{fileclass}\"; archive_id = 1; }\n}\n\nFileClass even_and_not_B\n{\n    definition { even_files INTER NOT in_dir_B }\n    migration_action_params {class=\"{fileclass}\"; archive_id = 1; }\n}\n\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n\n    # migrate files based on INTER => matched first\n    policy inter_migr\n    {\n        target_fileclass = even_and_B;\n        condition { last_mod > 1s }\n    }\n\n    # migrate files based on inter not\n    policy not_migr\n    {\n        target_fileclass = even_and_not_B;\n        condition { last_mod > 1s }\n    }\n\n    # migrate files based on UNION\n    policy union_migr\n    {\n        target_fileclass = odd_or_A;\n        condition { last_mod > 1s }\n    }\n\n    # migrate other files after 45s\n    policy default\n    {\n        condition { last_mod > 1s }\n        action_params {class = \"unmatched\"; archive_id=1;}\n    }\n\n}\n\n######## most basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_hsm_invalidate.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy lhsm_archive {\n    scope { type == file\n            and no_archive != 1\n            and (status == new or status == modified) }\n    status_manager = lhsm(archive);\n    status_current = archiving;\n    default_action = lhsm.archive;\n    default_lru_sort_attr = last_mod;\n}\n\nlhsm_archive_parameters {\n    nb_threads = 8;\n\n    max_action_volume  = 10TB;\n    max_action_count   = 380000;\n    db_result_size_max = 100000;\n    # lru_sort_attr      = default_lru_sort_attr;  # (from 'define_policy' block)\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct  = 50%;\n    suspend_error_min  = 100;\n    report_actions     = yes;\n    queue_size         = 4096;\n\n    report_interval    = 10min;\n    action_timeout     = 2h;\n\n    # overrides policy default action\n    # action = cmd(\"lfs hsm_archive --archive {archive_id} /lustre/.lustre/fid/{fid}\");\n\n    # check_actions_on_startup= no\n    # check_actions_interval  = 0 # (disabled)\n    # recheck_ignored_entries = no\n    # pre_maintenance_window  = 0 # (disabled)\n    # maint_min_apply_delay   = 30min\n\n    # default action parameters\n    action_params {\n        archive_id = 1;\n    }\n}\n\nlhsm_archive_rules {\n    rule archive_std {\n        target_fileclass = lustre_files;\n        condition {\n            last_mod > 1\n        }\n    }\n}\n\nlhsm_archive_trigger {\n    trigger_on = periodic;\n    check_interval = 60min;\n}\n\nFileClass empty_files {\n    definition { type == file and size == 0}\n    # report = yes (default)\n}\n\nFileClass lustre_files {\n    definition { type == file and size > 0}\n    # report = yes (default)\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_hsm_remove_noorder.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy lhsm_remove {\n    # scope { type == file }\n    scope = all;\n    status_manager = lhsm(removed);\n    default_action = lhsm.hsm_remove;\n    default_lru_sort_attr = rm_time;\n}\n\nlhsm_remove_parameters {\n     nb_threads = 2;\n     max_action_count = 290000;\n     db_result_size_max = 295000;\n     lru_sort_attr      = none;\n}\n\n# Deferred HSM file removal\nlhsm_remove_rules {\n  rule default {\n    condition { rm_time > 10d }\n  }\n}\n\nlhsm_remove_trigger {\n    trigger_on = periodic;\n    check_interval = 10min;\n}\n\nFileClass empty_files {\n    definition { type == file and size == 0}\n    # report = yes (default)\n}\n\nFileClass lustre_files {\n    definition { type == file and size > 0}\n    # report = yes (default)\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_hsm_remove_order.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy lhsm_remove {\n    # scope { type == file }\n    scope = all;\n    status_manager = lhsm(removed);\n    default_action = lhsm.hsm_remove;\n    default_lru_sort_attr = rm_time;\n}\n\nlhsm_remove_parameters {\n     nb_threads = 2;\n     max_action_count = 290000;\n     db_result_size_max = 295000;\n}\n\n# Deferred HSM file removal\nlhsm_remove_rules {\n  rule default {\n    condition { rm_time > 10d }\n  }\n}\n\nlhsm_remove_trigger {\n    trigger_on = periodic;\n    check_interval = 10min;\n}\n\nFileClass empty_files {\n    definition { type == file and size == 0}\n    # report = yes (default)\n}\n\nFileClass lustre_files {\n    definition { type == file and size > 0}\n    # report = yes (default)\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_iname.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# test sensitive/insensitive name matching\n\n%include \"common.conf\"\n\nFileClass fmaj_name { definition { name == \"F*\" } }\nFileClass fmin_name { definition { name == \"f*\" } }\nFileClass fmaj_iname { definition { iname == \"F*\" } }\nFileClass fmin_iname { definition { iname == \"f*\" } }\nFileClass other_name { definition { NOT fmin_iname } }\n"
  },
  {
    "path": "tests/test_suite/cfg/test_lhsm1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy lhsm_archive {\n    scope { type == file\n            and no_archive != 1\n            and (status == new or status == modified) }\n    status_manager = lhsm(archive);\n    status_current = archiving;\n    default_action = lhsm.archive;\n    default_lru_sort_attr = last_mod;\n}\n\nlhsm_archive_parameters {\n    nb_threads = 8;\n\n    max_action_volume  = 10TB;\n    max_action_count   = 380000;\n    db_result_size_max = 100000;\n    # lru_sort_attr      = default_lru_sort_attr;  # (from 'define_policy' block)\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct  = 50%;\n    suspend_error_min  = 100;\n    report_actions     = yes;\n    queue_size         = 4096;\n\n    report_interval    = 10min;\n    action_timeout     = 2h;\n\n    # overrides policy default action\n    # action = cmd(\"lfs hsm_archive --archive {archive_id} /lustre/.lustre/fid/{fid}\");\n\n    # check_actions_on_startup= no\n    # check_actions_interval  = 0 # (disabled)\n    # recheck_ignored_entries = no\n    # pre_maintenance_window  = 0 # (disabled)\n    # maint_min_apply_delay   = 30min\n\n    # default action parameters\n    action_params {\n        archive_id = 1;\n    }\n}\n\nlhsm_archive_rules {\n    rule archive_std {\n        target_fileclass = lustre_files;\n        condition {\n            last_mod > 1 AND\n            ((last_archive == 0 and creation > 4h)\n              OR (last_archive > 1d and last_mod > 8h))\n             AND\n             last_access > 1\n        }\n    }\n}\n\nlhsm_archive_trigger {\n    trigger_on = periodic;\n    check_interval = 60min;\n}\n\nFileClass empty_files {\n    definition { type == file and size == 0}\n    # report = yes (default)\n}\n\nFileClass lustre_files {\n    definition { type == file and size > 0}\n    # report = yes (default)\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_limits.conf",
    "content": "%include \"common.conf\"\n\nfileclass file1 {\n\tdefinition { name == \"file*1\" }\n}\nfileclass file2 {\n\tdefinition { name == \"file*2\" }\n}\nfileclass file3 {\n\tdefinition { name == \"file*3\" }\n}\nfileclass file4 {\n\tdefinition { name == \"file*4\" }\n}\n\nmigration_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n\tmax_action_count = $trig_cnt;\n\tmax_action_volume = $trig_vol;\n}\n\nmigration_parameters {\n\tmax_action_count = $param_cnt;\n\tmax_action_volume = $param_vol;\n}\n\nmigration_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n\npurge_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n}\n\npurge_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_maintenance.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on filename\n#       - Migration params\n#       - Migration policy using fileclasses\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule default\n    {\n        condition { last_mod > 45s }\n    }\n}\n\nmigration_parameters\n{\n    pre_maintenance_window = 30s;\n    maint_min_apply_delay = 5s;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_mnt_point.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $fs_path;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n%include \"$RBH_TEST_POLICIES\"\n\nmigration_rules\n{\n    policy default { condition { last_access > 1s } }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_modeguard_dir.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy modeguard {\n    status_manager = modeguard;\n    scope { type == directory }\n    default_action = modeguard.enforce_mode;\n    default_lru_sort_attr = last_mod;\n}\n\nmodeguard_config {\n        set_mask = \"2000\";\n        clear_mask = \"0002\";\n}\n\nmodeguard_rules {\n    ignore_fileclass = special;\n    rule default {\n        condition { modeguard.status != ok }\n    }\n}\n\nmodeguard_trigger {\n    trigger_on = scheduled;\n    check_interval = 5m;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_modeguard_file.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\ndefine_policy modeguard {\n    status_manager = modeguard;\n    scope { type != directory }\n    default_action = modeguard.enforce_mode;\n    default_lru_sort_attr = last_mod;\n}\n\nmodeguard_config {\n        set_mask = \"0000\";\n        clear_mask = \"0007\";\n}\n\nmodeguard_rules {\n    ignore_fileclass = special;\n    rule default {\n        condition { modeguard.status != ok }\n    }\n}\n\nmodeguard_trigger {\n    trigger_on = scheduled;\n    check_interval = 5m;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_move.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n%include \"common.conf\"\n\n#define a custom policy to backup files\ndefine_policy trash {\n     status_manager = basic;\n     scope { type == file and status != 'ok' }\n     default_action = common.move;\n     default_lru_sort_attr = none;\n}\n\nfileclass logs {definition { name == \"*.log\" }}\nfileclass over {definition { name == \"*.1\" }}\n\ntrash_rules {\n\n    ignore { tree == \"{fsroot}/.trash\" }\n\n    rule movelogs {\n        target_fileclass = logs;\n        action_params {\n            # append fid to filename to avoid conflicts\n            targetpath = \"{fsroot}/.trash/{path}__{fid}\";\n        }\n        condition = true;\n    }\n\n    rule moveover {\n        target_fileclass = over;\n        action_params {\n            # to test if move overwrites previous file\n            targetpath = \"{fsroot}/.trash/{path}\";\n        }\n        condition = true;\n    }\n\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_multirule.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"common.conf\"\n\n# used to be rbh 2.5 \"purge\" policy in TMPFS mode\ndefine_policy cleanup {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = last_access;\n}\n\n######## file classes ##########\nFileClass scratch_files {\n    definition { type == file and tree == \"/mnt/lustre/scratch\" \n                 and not tree == \"/mnt/lustre/scratch/tmp\" }\n    report = yes; # (default)\n}\n\nFileClass scratch_tmp_files {\n    definition { type == file and tree == \"/mnt/lustre/scratch/tmp\" }\n    report = yes; # (default)\n}\n\nFileClass files1 {\n    definition { type == file and name == \"*1\" }\n    report = yes; # (default)\n}\n\nFileClass files2 {\n    definition { type == file and name == \"*2\" }\n    report = yes; # (default)\n}\n\nFileClass files3 {\n    definition { type == file and name == \"*3\" }\n    report = yes; # (default)\n}\n\nFileClass foo_files {\n    definition { type == file and name == \"*.foo\" }\n    report = yes; # (default)\n}\n\nFileClass bar_files {\n    definition { type == file and name == \"*.bar\" }\n    report = yes; # (default)\n}\n\nFileClass root_files {\n    definition { type == file and owner == root }\n    report = yes; # (default)\n}\n\nFileClass default_files {\n    definition { type == file and tree != \"/mnt/lustre/scratch\" and\n        name != \"*.foo\" and name != \"*.bar\" and owner != root }\n    report = yes; # (default)\n}\n\n#### Deleting old unused files in scratch#######\n\ncleanup_parameters {\n    nb_threads = 4;\n\n    # max_action_volume  = 10TB;\n    # max_action_count   = 380000;\n    # db_result_size_max = 300000;\n    lru_sort_attr      = none;\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct  = 50%;\n    suspend_error_min  = 100;\n    report_actions     = yes;\n    queue_size         = 4096;\n\n    report_interval    = 10min;\n    # action_timeout     = 2h;\n\n\n    # check_actions_on_startup= no\n    # check_actions_interval  = 0 # (disabled)\n    # recheck_ignored_entries = no\n    # pre_maintenance_window  = 0 # (disabled)\n    # maint_min_apply_delay   = 30min\n\n}\n\ncleanup_rules {\n    ignore { owner == tutu or owner == babar }\n    ignore_fileclass = foo_files;\n    ignore_fileclass = bar_files;\n\n    rule scratch_cleanup {\n        target_fileclass = scratch_files;\n        condition { last_access > 1h and last_mod > 1h }\n    }\n\n    rule nocond_cleanup1 {\n        target_fileclass = files1;\n        condition = true;\n    }\n\n    rule nocond_cleanup2 {\n        target_fileclass = files2;\n        target_fileclass = files3;\n        condition = true;\n    }\n\n    rule scratch_tmp_cleanup {\n        target_fileclass = scratch_tmp_files;\n        target_fileclass = root_files;\n        condition { last_access > 1h and last_mod > 1h }\n    }\n\n    rule default {\n        condition {last_mod > 1h}\n    }\n}\n\ncleanup_trigger {\n    trigger_on = periodic;\n    check_interval = 24h;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test_multirule_migr.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"common.conf\"\n\n######## file classes ##########\nFileClass scratch_files {\n    definition { type == file and tree == \"/mnt/lustre/scratch\" \n                 and not tree == \"/mnt/lustre/scratch/tmp\" }\n    report = yes; # (default)\n}\n\nFileClass scratch_tmp_files {\n    definition { type == file and tree == \"/mnt/lustre/scratch/tmp\" }\n    report = yes; # (default)\n}\n\nFileClass files1 {\n    definition { type == file and name == \"*1\" }\n    report = yes; # (default)\n}\n\nFileClass files2 {\n    definition { type == file and name == \"*2\" }\n    report = yes; # (default)\n}\n\nFileClass files3 {\n    definition { type == file and name == \"*3\" }\n    report = yes; # (default)\n}\n\nFileClass foo_files {\n    definition { type == file and name == \"*.foo\" }\n    report = yes; # (default)\n}\n\nFileClass bar_files {\n    definition { type == file and name == \"*.bar\" }\n    report = yes; # (default)\n}\n\nFileClass root_files {\n    definition { type == file and owner == root }\n    report = yes; # (default)\n}\n\nFileClass default_files {\n    definition { type == file and tree != \"/mnt/lustre/scratch\" and\n        name != \"*.foo\" and name != \"*.bar\" and owner != root }\n    report = yes; # (default)\n}\n\n#### Deleting old unused files in scratch#######\n\nmigration_parameters {\n    nb_threads = 4;\n\n    # max_action_volume  = 10TB;\n    # max_action_count   = 380000;\n    # db_result_size_max = 300000;\n    lru_sort_attr      = creation;\n\n    # suspend policy run if action error rate > 50% (after 100 errors)\n    suspend_error_pct  = 50%;\n    suspend_error_min  = 100;\n    report_actions     = yes;\n    queue_size         = 4096;\n\n    report_interval    = 10min;\n    # action_timeout     = 2h;\n\n\n    # check_actions_on_startup= no\n    # check_actions_interval  = 0 # (disabled)\n    # recheck_ignored_entries = no\n    # pre_maintenance_window  = 0 # (disabled)\n    # maint_min_apply_delay   = 30min\n\n}\n\nmigration_rules {\n    ignore { owner == tutu or owner == babar }\n    ignore_fileclass = foo_files;\n    ignore_fileclass = bar_files;\n\n    rule scratch_migration {\n        target_fileclass = scratch_files;\n        condition { (last_access > 1h and last_mod > 1h) or last_archive == 0 }\n    }\n\n    rule nocond_migration1 {\n        target_fileclass = files1;\n        condition = true;\n    }\n\n    rule nocond_migration2 {\n        target_fileclass = files2;\n        target_fileclass = files3;\n        condition = true;\n    }\n\n    rule scratch_tmp_migration {\n        target_fileclass = scratch_tmp_files;\n        target_fileclass = root_files;\n        condition {(last_archive == 0 and creation > 7d and last_mod > 1h)\n\t\t   or last_archive > 7d}\n    }\n\n    rule default {\n        condition {(last_archive == 0 and creation > 7d and last_mod > 1h)\n\t\t   or last_archive > 7d}\n    }\n}\n\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 24h;\n}\n\n"
  },
  {
    "path": "tests/test_suite/cfg/test_nlink.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"common.conf\"\n\ndefine_policy cleanup {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = none;\n}\n\n######## file classes ##########\nFileClass dual {\n    definition { type == file and nlink > 1 }\n}\n\nFileClass single {\n    definition { type == file and nlink == 1 }\n}\n\nFileClass other {\n    definition { type == file and nlink == 0 }\n}\n\n\ncleanup_parameters {\n    nb_threads = 4;\n}\n\ncleanup_rules {\n    rule link_cleanup {\n        target_fileclass = dual;\n        condition = true;\n    }\n\n    rule file_cleanup {\n        target_fileclass = single;\n        target_fileclass = other;\n        condition = true;\n    }\n}\n\ncleanup_trigger {\n    trigger_on = periodic;\n    check_interval = 24h;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_path.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on xattrs\n#       - Migration params\n#       - Migration policies using fileclasses\n\n%include \"common.conf\"\n\nFileClass absolute_path\n{\n        definition { path == \"/mnt/lustre/dir1/*/A\" }\n}\n\nFileClass absolute_tree\n{\n        definition { tree == \"/mnt/lustre/dir2\" }\n}\n\nFileClass path_depth2\n{\n        definition { path == \"*/dir3/*\" }\n}\n\nFileClass tree_depth2\n{\n        definition { tree == \"*/dir4\" }\n}\n\nFileClass relative_path\n{\n        definition { path == \"dir5/*\" }\n}\n\nFileClass relative_tree\n{\n        definition { tree == \"dir6\" or tree == \"file.6\" }\n}\n\nFileClass any_root_tree\n{\n        definition { tree == \"**/dir7\" }\n}\n\nFileClass any_root_path\n{\n        definition { path == \"**/dir8/file.1\" }\n}\n\nFileClass any_level_tree\n{\n        definition { tree == \"/mnt/lustre/dir9/**/dir10\" }\n}\n\nFileClass any_level_path\n{\n        definition { path == \"/mnt/lustre/dir11/**/file\" }\n}\n\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule all\n    {\n\ttarget_fileclass = absolute_path;\n\ttarget_fileclass = absolute_tree;\n\ttarget_fileclass = path_depth2;\n\ttarget_fileclass = tree_depth2;\n\ttarget_fileclass = relative_path;\n\ttarget_fileclass = relative_tree;\n\ttarget_fileclass = any_root_tree;\n\ttarget_fileclass = any_root_path;\n\ttarget_fileclass = any_level_tree;\n\ttarget_fileclass = any_level_path;\n\n\tcondition { last_mod > 1s }\n    action_params {class = \"{fileclass}\";}\n    }\n\n    # migrate even files 15s after they have been modified\n    rule default\n    {\n        condition { last_mod > 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n\nmigration_parameters\n{\n    nb_threads = 1;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_pipeline.conf",
    "content": "%include \"common.conf\"\n\nEntryProcessor\n{\n\tmatch_classes = TRUE;\n}\n\n# test all possible criteria on files\nFileClass TestCrit\n{\nDefinition { name == \"*[02468]\"\n\t     or\n\t     tree == \"/mnt/lustre/match\"\n\t     or\n\t     path == \"**/file.match\"\n\t     or\n\t     type == symlink\n\t     or\n\t     size > 1MB\n\t     or\n\t     owner == \"testuser\"\n\t     or\n\t     group == \"group*\"\n\t     or\n\t     depth > 5\n\t     or\n\t     ost_index == 1\n\t     or\n\t     ost_pool == \"pool1\"\n\t     or\n\t     xattr.user.test == \"test\" }\n}\n\nmigration_rules\n{\n\n    # migrate files based on INTER => matched first\n    policy migr_match\n    {\n        target_fileclass = TestCrit;\n        condition { last_mod > 1s }\n    }\n\n    # migrate other files after 45s\n    policy default\n    {\n        condition { last_mod > 1s }\n    }\n}\npurge_rules\n{\n    policy purge_match\n    {\n        target_fileclass = TestCrit;\n        condition { last_access > 1s }\n    }\n\n    policy default\n    {\n        condition { last_access > 1h }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_pools.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on xattrs\n#       - Migration params\n#       - Migration policies using Filesets\n\n%include \"common.conf\"\n\nfileclass pool_1\n{\n    definition { ost_pool == \"ost0\" }\n}\n\nfileclass pool_2\n{\n    definition { ost_pool == \"ost1\" }\n}\n\nPurge_rules\n{\n    rule PoolFiles\n    {\n        target_fileclass = pool_1;\n        target_fileclass = pool_2;\n        condition { last_access > 5s }\n    }\n    rule default\n    {\n        condition { last_access > 5s }\n    }\n}\n\ndb_update_params\n{\n    fileclass_update = always;\n}\n\nMigration_rules\n{\n    rule PoolFiles\n    {\n        target_fileclass = pool_1;\n        target_fileclass = pool_2;\n        condition { last_mod > 5s }\n    }\n    rule default\n    {\n        condition { last_mod > 5s }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_prepost_cmd.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n######## basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than 10s\n        condition\n        {\n            last_access > 15s\n        }\n    }\n}\n\npurge_trigger {\n    trigger_on = global_usage;\n    high_threshold_pct = 80%;\n    low_threshold_pct = 70%;\n    check_interval = 1s;\n}\n\npurge_parameters\n{\n    pre_run_command = $pre_command ;\n    post_run_command = $post_command ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_prepost_sched.conf",
    "content": "%include \"common.conf\"\n\nfileclass file1 {\n    definition { name == \"file*1\" }\n}\nfileclass file2 {\n    definition { name == \"file*2\" }\n}\nfileclass file3 {\n    definition { name == \"file*3\" }\n}\nfileclass file4 {\n    definition { name == \"file*4\" }\n}\n\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 5s;\n}\n\nmigration_parameters {\n    schedulers = $sched;\n    recheck_ignored_entries = true;\n\n    pre_sched_match  = $pre_sched;\n    post_sched_match = $post_sched;\n\n    # not really a limit... just make sure pre/post/check is taken into\n    # account\n    max_per_run {\n        max_count = 1000;\n        max_vol   = 1TB;\n    }\n}\n\nmigration_rules {\n    rule all {\n    target_fileclass = file1;\n    target_fileclass = file2;\n    target_fileclass = file3;\n    target_fileclass = file4;\n\n    # depth can't be translated to a DB request\n    # so we are sure to need matching in the program\n    # for this criteria\n    condition { last_mod >= 12h and depth == 0 }\n    action_params {class = \"{fileclass}\";}\n    }\n}\n\n# this second policy is to check the behavious for a single POSIX attr\ndefine_policy cleanup {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = last_access;\n}\n\ncleanup_rules {\n    rule default {condition {last_mod >= 12h}}\n}\n\ncleanup_trigger {\n    trigger_on = periodic;\n    check_interval = 5s;\n}\n\ncleanup_parameters {\n    schedulers = $sched;\n    recheck_ignored_entries = true;\n\n    pre_sched_match  = $pre_sched;\n    post_sched_match = $post_sched;\n\n    # not really a limit... just make sure pre/post/check is taken into\n    # account\n    max_per_run {\n        max_count = 1000;\n        max_vol   = 1TB;\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_purge.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\n######## basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than 10s\n        condition\n        {\n            last_access > 15s\n        }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_purge2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # migrate all files ASAP\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n# run migration every sec\nmigration_trigger {\n    trigger_on = periodic;\n    check_interval = 1s;\n}\n\n\n######## space release policy based on size ##########\n\nFileclass empty\n{\n    definition\n    {\n        size == 0\n    }\n}\n\nFileclass very_small\n{\n    definition\n    {\n        size <= 16kB\n    }\n}\n\nFileclass mid_file\n{\n    definition\n    {\n        size > 16kB\n        and\n        size < 1MB\n    }\n}\n\npurge_rules\n{\n    ignore_fileclass = empty;\n\n    policy very_small\n    {\n        target_fileclass = very_small;\n        condition { last_access > 1s }\n    }\n\n    policy mid_file\n    {\n        target_fileclass = mid_file;\n        condition { last_access > 1s }\n    }\n\n    policy default\n    {\n        condition { last_access > 1s }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_ratelim.conf",
    "content": "%include \"common.conf\"\n\nfileclass test1 {\n\tdefinition { name == file1.* }\n}\n\nfileclass test2 {\n\tdefinition { name == file2.* }\n}\n\nfileclass test3 {\n\tdefinition { name == file3.* }\n}\n\nmigration_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n}\n\nmigration_parameters {\n\tschedulers = common.rate_limit;\n\trate_limit {\n                # TBF sets a limit of <max_count> and <max_size> per <period_ms>\n\t\tmax_count = $ratelim_capacity;\n\t\tmax_size = $ratelim_size;\n\t\tperiod_ms = $ratelim_refill;\n\t}\n}\n\nmigration_rules {\n    rule all {\n\ttarget_fileclass = test1;\n\ttarget_fileclass = test2;\n\ttarget_fileclass = test3;\n\n\tcondition = true;\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition = true;\n        action_params {class = \"unmatched\";}\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_recov.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration policy based on last_mod\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n######## most basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_recov2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    archive_symlinks = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n######## Policies for this test ###########\n%include \"$RBH_TEST_POLICIES\"\n\nmigration_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition\n        {\n            last_mod > 1sec\n        }\n    }\n}\n\n######## most basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_rm1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: defered removal in HSM\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    policy default\n    {\n        # Archive 'dirty' files that have not been modified\n        # for more than 6 hours, or backup them daily\n        # if they are continuously appended.\n        condition\n        {\n            last_mod > 30sec\n        }\n    }\n}\n\n######## most basic space release policy ##########\n\npurge_rules\n{\n    policy default\n    {\n        # We can release files that have not been accessed\n        # for more than a day\n        condition\n        {\n            last_access > 1h\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger purge on OST if its usage exceeds 85%\npurge_trigger\n{\n    trigger_on         = OST_usage ;\n    high_threshold_pct = 85% ;\n    low_threshold_pct  = 80% ;\n    check_interval     = 5min ;\n}\n\n##### basic HSM remove policy ######\n\nhsm_remove_parameters {\n    # test the impact of this parameter on SOFT_RM table select\n    db_result_size_max = 2;\n}\n\nhsm_remove_rules\n{\n    rule default {\n        condition { rm_time >= 10 }\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_rmdir_depth.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: empty directory removal with depth constraint\n\n%include \"common.conf\"\n\nfileclass remove_it {\n    definition { path == $MATCH_PATH }\n}\n\nrmdir_empty_rules {\n    ignore { depth < 5 or owner == \"foo\" }\n    rule default\n    {\n        condition { last_mod > 10s }\n    }\n}\n\nrmdir_empty_parameters { nb_threads = 1; }\n\nrmdir_empty_trigger {\n    trigger_on = periodic;\n    check_interval = 24h;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_run.conf",
    "content": "%include \"common.conf\"\n\nfileclass file1 {\n\tdefinition { name == \"file*1\" }\n}\nfileclass file2 {\n\tdefinition { name == \"file*2\" }\n}\nfileclass file3 {\n\tdefinition { name == \"file*3\" }\n}\nfileclass file4 {\n\tdefinition { name == \"file*4\" }\n}\n\nmigration_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n}\n\nmigration_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n\npurge_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n}\n\npurge_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_scan_only.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n    uid_gid_as_numbers = $RBH_NUM_UIDGID;\n    last_access_only_atime = $RBH_TEST_LAST_ACCESS_ONLY_ATIME;\n}\n\nfs_scan {\n    scan_only = $SCAN_ONLY1;\n    scan_only = $SCAN_ONLY2;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    queue_max_age = 1s;\n\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type = ext4;\n    check_mounted = no;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    rebind_cmd = \"/usr/sbin/lhsmtool_posix --hsm_root=/tmp/backend --archive {archive_id} --rebind {oldfid} {newfid} {fsroot}\";\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n# always include rmdir policies (tested with all tests flavors)\n%include \"../../../doc/templates/includes/rmdir.inc\"\n"
  },
  {
    "path": "tests/test_suite/cfg/test_sched1.conf",
    "content": "%include \"common.conf\"\n\nfileclass file1 {\n\tdefinition { name == \"file*1\" }\n}\nfileclass file2 {\n\tdefinition { name == \"file*2\" }\n}\nfileclass file3 {\n\tdefinition { name == \"file*3\" }\n}\nfileclass file4 {\n\tdefinition { name == \"file*4\" }\n}\n\nmigration_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n\tmax_action_count = $trig_cnt;\n\tmax_action_volume = $trig_vol;\n}\n\nmigration_parameters {\n\tmax_action_count = $param_cnt;\n\tmax_action_volume = $param_vol;\n\n\tschedulers = common.max_per_run;\n\n\tmax_per_run {\n\t\tmax_count = $sched_max_cnt;\n\t\tmax_vol   = $sched_max_vol;\n\t}\n}\n\nmigration_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n\npurge_trigger {\n\ttrigger_on = periodic;\n\tcheck_interval = 5s;\n}\n\npurge_rules {\n    rule all {\n\ttarget_fileclass = file1;\n\ttarget_fileclass = file2;\n\ttarget_fileclass = file3;\n\ttarget_fileclass = file4;\n\n\tcondition { last_mod >= 1s }\n\taction_params {class = \"{fileclass}\";}\n    }\n\n    rule default {\n        condition { last_mod >= 1s }\n        action_params {class = \"unmatched\";}\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_trig.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: migration policy based on last_mod and filename\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_mod >= 0\n        }\n        action_params {archive_id = 1;}\n    }\n}\n\npurge_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_access >= 0\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger on inode count\npurge_trigger\n{\n    # purge if file count > $high_cnt\n    # until file count is $low_cnt\n    trigger_on         = global_usage;\n    high_threshold_cnt = $high_cnt;\n    low_threshold_cnt  = $low_cnt;\n    check_interval     = 5min ;\n    post_trigger_wait = 0;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_trig2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: migration policy based on last_mod and filename\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_mod >= 0\n        }\n        action_params { archive_id = 1; }\n    }\n}\n\npurge_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_access >= 0\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger on inode count\npurge_trigger\n{\n    # purge if volume on OST1 > 150MB\n    # until volume is 110MB\n    trigger_on         = OST_usage;\n    high_threshold_vol = $ost_high_vol;\n    low_threshold_vol  = $ost_low_vol;\n    check_interval     = 5min ;\n    post_trigger_wait = 10;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_trig3.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: migration policy based on last_mod and filename\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_mod >= 0\n        }\n        action_params { archive_id = 1; }\n    }\n}\n\npurge_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_access >= 0\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger on inode count\npurge_trigger\n{\n    # purge if file count > 50\n    trigger_on         = global_usage;\n    high_threshold_cnt = 50;\n    low_threshold_cnt  = 40;\n    check_interval     = 5min ;\n    alert_high = yes;\n    alert_low = yes;\n    post_trigger_wait = 0;\n}\n\npurge_trigger\n{\n    # purge if volume > 100M\n    trigger_on         = global_usage;\n    high_threshold_vol = 100MB;\n    low_threshold_vol  = 80MB;\n    check_interval     = 5min ;\n    alert_high = yes;\n    alert_low = yes;\n    post_trigger_wait = 0;\n}\n\npurge_trigger\n{\n    # purge root usage > 10M\n    trigger_on         = user_usage;\n    high_threshold_vol = 10MB;\n    low_threshold_vol  = 5MB;\n    check_interval     = 5min ;\n    alert_high = yes;\n    alert_low = yes;\n}\n\npurge_trigger\n{\n    # purge root usage > 50 inodes\n    trigger_on         = user_usage(root);\n    high_threshold_cnt = 50;\n    low_threshold_cnt  = 40;\n    check_interval     = 5min ;\n    alert_high = yes;\n    alert_low = yes;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_trig4.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: migration policy based on last_mod and filename\n\n%include \"common.conf\"\n\n######## Policies for this test ###########\n\nFileClass File1 {\n    definition { name == \"*.1\" }\n}\n\nFileClass File2 {\n    definition { name == \"*.2\" }\n}\n\nFileClass File3 {\n    definition { name == \"*.3\" }\n}\n\nmigration_rules\n{\n    policy default\n    {\n        condition { last_mod >= 1 }\n    }\n}\n\npurge_rules\n{\n    policy one_sec {\n        target_fileclass = File1;\n        condition { last_access >= 1s }\n    }\n\n    policy ten_sec {\n        target_fileclass = File2;\n        condition { last_access >= 5s }\n    }\n\n    policy twenty_sec {\n        target_fileclass = File3;\n        condition { last_access >= 10s }\n    }\n\n    policy default {\n        condition { last_access > 1h }\n    }\n}\n\n####### Purge trigger ########\n\n# periodic trigger\npurge_trigger\n{\n    trigger_on         = periodic;\n    check_interval     = 5s;\n    post_trigger_wait = 0;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_trig_cntpct.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n%include \"common.conf\"\n\ndefine_policy cleanup {\n    scope { type != directory }\n    status_manager = none;\n    default_action = common.unlink;\n    default_lru_sort_attr = last_access;\n}\n\n######## Policies for this test ###########\n\ncleanup_rules\n{\n    rule default\n    {\n        condition\n        {\n            last_access >= 0\n        }\n    }\n}\n\n####### Purge trigger ########\n\n# trigger on inode count\ncleanup_trigger\n{\n    trigger_on         = $trig_type;\n    high_threshold_cntpct = $high_pct;\n    low_threshold_cntpct  = $low_pct;\n    check_interval     = 5min ;\n    post_trigger_wait = 0;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_updt.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - check on_event_periodic path update when processing changelogs\n#       - check on_event_periodic md update when processing changelogs\n#       - check periodic fileclass matching\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n\n    # batch log records 1sec max\n    queue_max_age = 1;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = InnoDB;\n\t}\n\n\tSQLite {\n\t        db_file = \"/tmp/robinhood_sqlite_db\" ;\n        \tretry_delay_microsec = 1000 ;\n\t}\n}\n\n# for tests with backup purpose\nbackup_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n# for tests with shook purpose\nshook_config\n{\n    root = \"/tmp/backend\";\n    mnt_type=ext4;\n    check_mounted = FALSE;\n    recovery_action = common.copy;\n}\n\n\ndb_update_params\n{\n    # 2nd number must be > 4x(1st number)\n    # 14s > 4x3s\n    path_update = on_event_periodic(3s,14s);\n    md_update   = on_event_periodic(3s,14s);\n    fileclass_update = periodic(5s);\n}\n\n%include \"$RBH_TEST_POLICIES\"\n\nfileclass to_be_ignored\n{\n    definition { name == \"ign*\"}\n}\n\nfileclass to_be_migr\n{\n    definition { name == \"migr*\"}\n}\n\nfileclass to_be_released\n{\n    definition { name == \"purg*\"}\n}\n\nmigration_parameters\n{\n    recheck_ignored_entries = yes;\n}\n\nmigration_rules\n{\n   ignore { name == \"whitelist*\" }\n   ignore_fileclass = to_be_ignored;\n\n   policy migr_match\n   {\n        target_fileclass = to_be_migr;\n        condition { last_mod >= 0 }\n   }\n\n   policy default\n   {\n        condition { last_mod >= 0 }\n   }\n}\n\npurge_parameters\n{\n    recheck_ignored_entries = yes;\n}\n\npurge_rules\n{\n   ignore { name == \"whitelist*\" }\n   ignore_fileclass = to_be_ignored;\n\n   policy purge_match\n   {\n        target_fileclass = to_be_released;\n        condition { last_access >= 0 }\n   }\n\n   policy default\n   {\n        condition { last_access >= 0 }\n   }\n}\n\nEntryProcessor\n{\n    # don't match when scanning\n    match_classes = FALSE;\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/test_uuid.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: basic migration rule based on last_mod\n\n######## Policies for this test ###########\n\nGeneral\n{\n    fs_path = $RH_ROOT;\n    fs_type = $FS_TYPE;\n}\n\n# ChangeLog Reader configuration\n# Parameters for processing MDT changelogs :\nChangeLog\n{\n    # 1 MDT block for each MDT :\n    MDT\n    {\n        # name of the first MDT\n        mdt_name  = \"MDT0000\" ;\n\n        # id of the persistent changelog reader\n        # as returned by \"lctl changelog_register\" command\n        reader_id = \"cl1\" ;\n    }\n    force_polling = TRUE;\n    polling_interval = 1s;\n    mds_has_lu543 = FALSE;\n    mds_has_lu1331 = FALSE;\n}\n\nLog\n{\n    # Log verbosity level\n    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL\n    debug_level = EVENT;\n\n    # Log file\n    log_file = stdout;\n\n    # File for reporting purge events\n    report_file = \"/dev/null\";\n\n    # set alert_file, alert_mail or both depending on the alert method you wish\n    alert_file = \"/tmp/rh_alert.log\";\n\n}\n\nListManager\n{\n    MySQL\n    {\n        server = \"localhost\";\n        db = $RH_DB;\n        user = \"robinhood\";\n        # password or password_file are mandatory\n        password = \"robinhood\";\n        engine = InnoDB;\n    }\n}\n\n# Lustre/HSM specific configuration\nlhsm_config {\n    uuid {\n        xattr = \"trusted.lhsm.uuid\";\n    }\n}\n\n# this one is generated from original template\n%include \"$RBH_TEST_POLICIES\"\n"
  },
  {
    "path": "tests/test_suite/cfg/test_xattr.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n# TEST: - Fileset definition based on xattrs\n#       - Migration params\n#       - Migration policies using Filesets\n\n%include \"common.conf\"\n\nFileClass xattr_foo\n{\n        Definition\n        {\n                # xattr foo is set\n                xattr.user.foo != \"\"\n        }\n        migration_action_params { archive_id = 1; }\n}\n\nFileClass xattr_bar\n{\n        Definition\n        {\n                # xattr bar is set\n                xattr.user.bar != \"\"\n        }\n        migration_action_params { archive_id = 2; }\n}\n\n\n######## Policies for this test ###########\n\nmigration_rules\n{\n    rule xattr_foo_policy\n    {\n\t target_fileclass = xattr_foo;\n\t condition { last_mod >= 2s }\n     migration_action_params { class = \"{fileclass}\"; }\n    }\n\n    rule xattr_bar_policy\n    {\n\t target_fileclass = xattr_bar;\n\t condition { last_mod >= 2s }\n     migration_action_params { class = \"{fileclass}\"; }\n    }\n\n    rule default\n    {\n        condition {\n                last_mod >= 2s\n        }\n        action_params {archive_id = 3;}\n    }\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/tokudb1.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = TokuDB;\n\t}\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/tokudb2.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n        engine = TokuDB;\n        tokudb_compression = $RBH_TOKU_COMPRESS ;\n\t}\n}\n"
  },
  {
    "path": "tests/test_suite/cfg/uidgidnum.conf",
    "content": "# -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-\n# vim:expandtab:shiftwidth=4:tabstop=4:\n\n\nGeneral\n{\n\tfs_path = $RH_ROOT;\n\tfs_type = $FS_TYPE;\n\tuid_gid_as_numbers = yes;\n}\n\nListManager\n{\n\tMySQL\n\t{\n\t\tserver = \"localhost\";\n\t\tdb = $RH_DB;\n        user = \"robinhood\";\n\t\t# password or password_file are mandatory\n\t\tpassword = \"robinhood\";\n\t}\n}\n\nfileclass uroot1 { definition { owner == root } }\nfileclass uroot2 { definition { owner == 0 } }\nfileclass u7856568 { definition { owner == 7856568 } }\nfileclass g645767 { definition { group == 645767 } }\nfileclass groot { definition { group == root } }\nfileclass mix { definition { group == root or owner == 7856568 } }\n"
  },
  {
    "path": "tests/test_suite/cleanup.sh",
    "content": "#!/bin/sh\nrm -f rh_*.log\n"
  },
  {
    "path": "tests/test_suite/create-random.c",
    "content": "/* Copyright 2015 Cray Inc. All rights reserved.\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/* Create several files with random names. Names can include any byte\n * except NUL and /, which are not legal in filenames. */\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <unistd.h>\n#include <stdlib.h>\n\n#include <glib.h>\n\nstatic int urandom;\nstatic GString *filename;\n\n/* Create a filename. Not the fastest algorithm, but it's good enough\n * for our purpose. */\nstatic char *make_name(const char *dirname, size_t len)\n{\n\tint rc;\n\n\tg_string_printf(filename, \"%s/\", dirname);\n\twhile (len)\n\t{\n\t\tchar byte;\n\n\t\trc = read(urandom, &byte, 1);\n\t\tif (rc == -1)\n\t\t{\n\t\t\tfprintf(stderr, \"urandom read failed: %d\\n\", errno);\n\t\t\treturn NULL;\n\t\t}\n\n\t\tif (byte == '\\0' || byte == '/')\n\t\t\tcontinue;\n\n\t\tg_string_append_c(filename, byte);\n\n\t\tlen--;\n\t}\n\n\treturn filename->str;\n}\n\nint main(int argc, char *argv[])\n{\n\tint i;\n\tconst char *name;\n\tint rc;\n\tint num;\n\tsize_t length;\n\tchar *dirname;\n\n\tif (argc != 4)\n\t{\n\t\tfprintf(stderr, \"%s needs 3 arguments:\\n\", argv[0]);\n\t\tfprintf(stderr, \"  - number of files\\n\");\n\t\tfprintf(stderr, \"  - length of file names (1 to 255)\\n\");\n\t\tfprintf(stderr, \"  - directory where to create the files\\n\");\n\t\treturn EXIT_FAILURE;\n\t}\n\n\tnum = atoi(argv[1]);\n\tif (num <= 0)\n\t{\n\t\tfprintf(stderr, \"invalid number of files to create: %d\\n\", num);\n\t\treturn EXIT_FAILURE;\n\t}\n\n\tlength = atoi(argv[2]);\n\tif (length <= 0 || length >= 256)\n\t{\n\t\tfprintf(stderr,\n\t\t\t\t\"invalid length of file names to create: %zu\\n\", length);\n\t\treturn EXIT_FAILURE;\n\t}\n\n\tdirname = argv[3];\n\n\turandom = open(\"/dev/urandom\", O_RDONLY);\n\tif (urandom == -1)\n\t{\n\t\tfprintf(stderr, \"can't open urandom: %d\\n\", errno);\n\t\treturn EXIT_FAILURE;\n\t}\n\n\tfilename = g_string_sized_new(1000);\n\n\tfor (i = 0; i < num; i++)\n\t{\n\t\tchar buf[100];\n\t\tint fd;\n\n\t\tname = make_name(dirname, length);\n\t\tif (name == NULL)\n\t\t\treturn EXIT_FAILURE;\n\n\t\tfd = creat(name, S_IRUSR);\n\t\tif (fd == -1) {\n\t\t\tfprintf(stderr, \"creat failed: %d\\n\", errno);\n\t\t\treturn EXIT_FAILURE;\n\t\t}\n\n\t\tsprintf(buf, \"file with weird name #%d\", i);\n\t\trc = write(fd, buf, strlen(buf));\n\t\tif (rc == -1) {\n\t\t\tfprintf(stderr, \"write failed: %d\\n\", errno);\n\t\t\treturn EXIT_FAILURE;\n\t\t}\n\n\t\tclose(fd);\n\t}\n\n\tclose(urandom);\n\tg_string_free(filename, TRUE);\n\n\treturn EXIT_SUCCESS;\n}\n"
  },
  {
    "path": "tests/test_suite/lsetup.sh",
    "content": "#!/bin/bash\n\nfunction project_quota_supported()\n{\n\tif [ ! -f ./test-framework.sh ]; then\n\t\techo \"test-framework.sh not found\"\n\t\texit 1\n\tfi\n\tgrep \"ENABLE_PROJECT_QUOTAS\" ./test-framework.sh\n}\n\nexport OSTSIZE=400000\nexport OSTCOUNT=4\nexport ENABLE_QUOTA=true\nexport ENABLE_PROJECT_QUOTAS=true\nLUSTRE_SRC_DIR=${LUSTRE_SRC_DIR:-/usr/lib64}\n\nif [[ \"$1\" == \"mount\" || -z \"$1\" ]]; then\n\n\t# moving to lustre test dir\n\tif [[ -d $LUSTRE_SRC_DIR ]]; then\n\t\tcd $LUSTRE_SRC_DIR/lustre/tests\n\telse\n\t\techo \"$LUSTRE_SRC_DIR: no such directory\"\n\t\texit 1\n\tfi\n\n\t# first check if lustre is already mounted\n\tmounted=`mount | grep /mnt/lustre | wc -l`\n\tif (( $mounted > 0 )); then\n\t\techo \"Lustre is already mounted:\"\n\t\tmount | grep /mnt/lustre\n\t\techo \"Unmounting previous instance:\"\n\t\t./llmountcleanup.sh\n\t\tumount /mnt/lustre\n\tfi\n\n\texport QUOTA_USERS=$(head -n 3 /etc/passwd | cut -d ':' -f 1 | \\\n\t\tgrep -v root | xargs)\n\n\techo \"Mounting lustre...\"\n\t./llmount.sh\n\tif ! project_quota_supported; then\n\t\tlctl conf_param lustre.quota.ost=ugp ||\n\t\t\techo \"Could not enable project quota\"\n\t\tlctl conf_param lustre.quota.mdt=ugp ||\n\t\t\techo \"Could not enable project quota\"\n\n\t\techo \"Dismounting to apply project quota\"\n\t\t./llmountcleanup.sh\n\t\tfor t in /tmp/lustre-mdt1 /tmp/lustre-ost*; do\n\t\t\ttune2fs -O project,quota $t\n\t\t\ttune2fs -Q prjquota,usrquota,grpquota \"$t\"\n\n\t\t\tdumpe2fs $t | grep quota\n\t\tdone\n\n\t\techo \"Re-mounting lustre...\"\n\t\tNOFORMAT=1 ./llmount.sh\n\t\tlctl conf_param lustre.quota.ost=ugp ||\n\t\t\techo \"Could not enable project quota\"\n\t\tlctl conf_param lustre.quota.mdt=ugp ||\n\t\t\techo \"Could not enable project quota\"\n\tfi\n\n\tmount | grep /mnt/lustre\n    exit $?\n\nelif [[ \"$1\" == \"umount\" ]]; then\n\n\t# moving to lustre test dir\n\tif [[ -d $LUSTRE_SRC_DIR ]]; then\n\t\tcd $LUSTRE_SRC_DIR/lustre/tests\n\telse\n\t\techo \"$LUSTRE_SRC_DIR: no such directory\"\n\t\texit 1\n\tfi\n\n\t# first check if lustre is already mounted\n\tmounted=`mount | grep /mnt/lustre | wc -l`\n\tif (( $mounted > 0 )); then\n\t\techo \"Lustre is mounted:\"\n\t\tmount | grep /mnt/lustre\n\t\techo \"Unmounting Lustre filesystem:\"\n\t\t./llmountcleanup.sh\n\t\tumount /mnt/lustre\n\tfi\n        mount | grep \"/mnt/lustre\" && ( echo \"Filesystem is still mounted\"; exit 1 )\n        exit 0\nelse\n\techo \"Usage: $0 mount|umount\"\n\texit 1\nfi\n"
  },
  {
    "path": "tests/test_suite/prepost_cmd.sh",
    "content": "#!/bin/bash\nmode=$1\nfile=$2\ncontents=$3\n\nif [[ \"$mode\" == \"append\" ]]; then\n\techo \"$contents\" >> $file\nelse\n\techo \"$contents\" > $file\nfi\n"
  },
  {
    "path": "tests/test_suite/rm_script",
    "content": "#!/bin/bash\nfsname=$1\nfid=$2\npath=\"$3\"\n\nrm -f \"$path\"\n"
  },
  {
    "path": "tests/test_suite/valgrind.supp",
    "content": "# These leaks don't appear to be in Robinhood, but may be they are.\n# This file is a work in progress, and should be cleaned up once\n# Robinhood 3 stabilizes.\n\n# ???\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Leak\n   fun:calloc\n   fun:my_thread_init\n   fun:mysql_server_init\n   fun:mysql_init\n   fun:db_connect\n   fun:ListMgr_InitAccess\n   ...\n}\n\n# The threads started by start_worker_threads() or\n# scan_starter_thread() aren't stopped on exit. So silence the\n# allocations they are making.\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Leak\n   fun:calloc\n   fun:_dl_allocate_tls\n   fun:pthread_create@@GLIBC_2.2.5\n   ...\n}\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Leak\n   fun:calloc\n   fun:_dl_allocate_tls\n   fun:pthread_create@@GLIBC_2.2.5\n   ...\n}\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Leak\n   fun:calloc\n   fun:_dl_allocate_tls\n   fun:pthread_create@@GLIBC_2.2.5\n   ...\n}\n\n\n\n# ??\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Param\n   ioctl(generic)\n   fun:ioctl\n   fun:root_ioctl\n   fun:llapi_changelog_clear\n   fun:clear_changelog_records\n   fun:log_record_callback\n   fun:EntryProc_chglog_clr\n   fun:entry_proc_worker_thr\n   fun:start_thread\n   fun:clone\n}\n{\n   <insert_a_suppression_name_here>\n   Memcheck:Param\n   ioctl(generic)\n   fun:ioctl\n   fun:root_ioctl\n   fun:changelog_ioctl\n   fun:llapi_changelog_clear\n   fun:clear_changelog_records\n   fun:log_record_callback\n   fun:EntryProc_chglog_clr\n   fun:entry_proc_worker_thr\n   fun:start_thread\n   fun:clone\n}\n\n# test 214d - read after free\n\n"
  },
  {
    "path": "web_gui/Makefile.am",
    "content": "EXTRA_DIST=gui_v3 gui_v3/README.txt robinhood.conf\n\ndistname=robinhood-webgui-$(VERSION)\n\ndist: dist-webgui\n\ndist-webgui: dist-dir\n\ttar zcvf $(distname).tar.gz $(distname)\n\trm -rf $(distname)\n\ndist-dir:\n\trm -rf $(distname)\n\tcp -r gui_v3 $(distname)\n"
  },
  {
    "path": "web_gui/gui_v3/README.txt",
    "content": "Robinhood new web interface (gui_v3)\n\nI - License\n===========\n\nCopyright (C) 2016 CEA/DAM\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the CeCILL-C License.\n\nThe fact that you are presently reading this means that you have had\nknowledge of the CeCILL-C license (http://www.cecill.info) and that you\naccept its terms.\n\nlibs:\njquery: MIT\nDatatables.js: MIT\ngraphjs: MIT\nbootstrap: MIT\nbootstrap-treeview: Apache Licence 2\nMoment.js: MIT\n\nII - Installation\n=================\n\n2.1 Requirements\n\nphp, php-pdo, php-mysql\n\n2.2 Install from tarball\n\ncopy web_gui/gui_v3 to your webserver folder (ex: /var/www/robinhood)\nconfigure your web server (your can use the default configuration robinhood.conf in web_gui for apache)\n\n2.3 Install from RPM\n\nyum install robinhood-webgui\n\nIII - Configuration\n===================\n\nLocal configuration:\nYou can setup a local config_local.php.template file instead of the config.php with default settings\nRename the config_local.php.template in config_local.php and use it instead of config.php\n\nDatabase:\nIn \"config.php\" change the following fields with your db settings\n$DB_HOST     = \"\";\n$DB_NAME     = \"\";\n$DB_USER     = \"\";\n$DB_PWD      = \"\";\n\nSupported DB: Mysql/MariaDB\n\nAuthentication:\nYou need to authenticate the user with your web server (apache: basic auth, ...)\n\nAccess list:\nIn \"config.php\" add permission to $ACCESS_LIST array\n\n\"*\" is Everyone,\n\"$AUTH\" is Authenticated user only\n\"username\" Allow to give access to a specific person\n\"$SELF\" Allow user to access to his own data (match the remote username to uid)\n\nex:\n$ACCESS_LIST['api-ro'][] = '*'; // Give every access to the api in read only\n$ACCESS_LIST['webgui'][] = 'marian'; // Give marian access to the webgui\n$ACCESS_LIST['graphs'][] = '$SELF'; // Allow all users to see their own data as graphs\n\nExample of permissions for a self service dashboard (with apache auth):\n$ACCESS_LIST['webgui'][] = '*';\n$ACCESS_LIST['api-ro'][] = '*';\n$ACCESS_LIST['datatables'][] = '$SELF';\n$ACCESS_LIST['graphs'][] = '$SELF';\n$ACCESS_LIST['native_vars'][] = '$AUTH';\n$ACCESS_LIST['native_accts'][] = '$SELF';\n\n\nMisc:\nMAX_ROWS: SQL max results\nJSON_OPTIONS (default: JSON_PRETTY_PRINT): Set default json output\n    JSON_PRETTY_PRINT is slower but human readable, set to 0 to disable\n\nApache:\n\nThe rpm install the configuration robinhood.conf in /etc/httpd/conf.d/\nYou might need to customize this file.\n\nYou can access the webgui at http://yourservername/robinhood\n\n\nIV - API\n=========\n\nFunctions provided by the API\n    *<server-url>/api/robinhood\n        -return a Robinhood quote (just for testing purpose)\n    *<server-url>/api/test\n        -return args as json (debug purposes)\n    *<server-url>/api/current_auth\n        -return your current authentification\n    *<server-url>/api/db_info\n        -return database configuration with status and errors\n    *<server-url>/api/graph/(uid/gid/sizes/files/*_status)\n        -return datas as json using graphjs datasets format\n    *<server-url>/api/data/(uid/gid/files/*_status)\n        -return datas as json using datatables.js format\n    *<server-url>/api/native/table/fields.operator1.operator2/...\n            ex: native/acct/gid.group/size.avg/ #return average size by group\n            ex: ... (see bellow)\n        -return direct data from ACCT_STAT\n\nnative syntax:\n\nURL syntax: <server-url>/api/native/<table>/[field.operator[/operator...]]\nnative: request type\n<table>: table requested (acct, vars, files , entries or names)\nlist of request / separated:\n    *field.operator/operator_parameter\n    *...\noperators:\n    *group        -Group result\n    *groupbytime  -Floor a value in seconds by hour/day/week/month/year and group\n    *groupbylog2  -Floor(log2(value)) and group (return NULL if value is 0)\n    *max          -Get the max value when group is used\n    *min          -Get the min value when group is used\n    *count        -Get number of entries when group is used\n    *avg          -Get average when group is used\n    *sum          -Get sum when group is used\n    *concat       -Concat string\n    *remove       -Hide field from result\n    *filter       -Filter result with sql \"LIKE\" (mandatory parameter, wildcard: *)\n    *nfilter      -Filter result with sql \"NOT LIKE\" (mandatory parameter, wildcard: *)\n    *equal        -Filter result with sql \"=\" (mandatory parameter)\n    *less         -Filter result with sql \"<\" (mandatory parameter)\n    *bigger       -Filter result with sql \">\" (mandatory parameter)\n    *soundslike   -Filter result with sql \"SOUNDS LIKE\" (mandatory parameters)\n    *asc          -Sort asc by\n    *desc         -Sort desc by\n\nparameters:\n    *whitelist  -hide all field by default, you have to select them explicitly\n    *limit/int  -Limit the number of results\nthem\n\n<server-url>/api/native/acct request all the table:\n[\n {\n        \"uid\": \"jenkins\",\n        \"gid\": \"jenkins\",\n        \"type\": \"file\",\n        \"lhsm_status\": \"new\",\n        \"checksum_status\": \"\",\n        \"size\": \"9148416\",\n        \"blocks\": \"18280\",\n        \"count\": \"138\",\n        \"sz0\": \"1\",\n        \"sz1\": \"0\",\n        \"sz32\": \"0\",\n        \"sz1K\": \"32\",\n        \"sz32K\": \"105\",\n        \"sz1M\": \"0\",\n        \"sz32M\": \"0\",\n        \"sz1G\": \"0\",\n        \"sz32G\": \"0\",\n        \"sz1T\": \"0\"\n    },...\n\nSame, grouped by uid\n<server-url>/api/native/acct/uid.group\n[\n    {\n        \"uid\": \"jenkins\",\n        \"gid_set\": \"jenkins,mysql,testgroup,testuser\",\n        \"type_set\": \"file\",\n        \"lhsm_status_set\": \"new\",\n        \"checksum_status_set\": \",ok\",\n        \"size\": \"61790208\",\n        \"blocks\": \"123488\",\n        \"count\": \"938\",\n        \"sz0\": \"4\",\n        \"sz1\": \"0\",\n        \"sz32\": \"0\",\n        \"sz1K\": \"212\",\n        \"sz32K\": \"722\",\n        \"sz1M\": \"0\",\n        \"sz32M\": \"0\",\n        \"sz1G\": \"0\",\n        \"sz32G\": \"0\",\n        \"sz1T\": \"0\"\n    },...\nThe api automatically makes comma separated list from strings and sum numbers\n\n<server-url>/api/native/acct/uid.group/uid.filter/mysql (native/acct/uid.group.filter/mysql also works)\n[\n    {\n        \"uid\": \"mysql\",\n        \"gid_set\": \"jenkins,mysql,testgroup,testuser\",\n        \"type_set\": \"file\",\n        \"lhsm_status_set\": \"new\",\n        \"checksum_status_set\": \",ok\",\n        \"size\": \"62610432\",\n        \"blocks\": \"125072\",\n        \"count\": \"974\",\n        \"sz0\": \"10\",\n        \"sz1\": \"0\",\n        \"sz32\": \"0\",\n        \"sz1K\": \"231\",\n        \"sz32K\": \"733\",\n        \"sz1M\": \"0\",\n        \"sz32M\": \"0\",\n        \"sz1G\": \"0\",\n        \"sz32G\": \"0\",\n        \"sz1T\": \"0\"\n    }\n]\n\n\nAll together:\n<server-url>/api/native/acct/gid.group/size.avg/blocks.max/checksum_status.remove/lhsm_status.remove/type.remove.filter/file/\n[\n    {\n        \"gid\": \"jenkins\",\n        \"uid_set\": \"jenkins,tcpdump,testuser,sshd,puppet,oprofile,mysql\",\n        \"size_avg\": \"7059387.7333\",\n        \"blocks_max\": \"19464\",\n        \"count\": \"1617\",\n        \"sz0\": \"12\",\n        \"sz1\": \"0\",\n        \"sz32\": \"0\",\n        \"sz1K\": \"367\",\n        \"sz32K\": \"1238\",\n        \"sz1M\": \"0\",\n        \"sz32M\": \"0\",\n        \"sz1G\": \"0\",\n        \"sz32G\": \"0\",\n        \"sz1T\": \"0\"\n    },...\n\nSide notes on groupbytime and groupbylog2:\n\nYou can specify the interval in groupbytime by adding .week/.day/.hour\n\nThe following query return the number of files (and size sum) by modification date\n<server-url>/api/native/entries/whitelist/id.count/last_mod.groupbytime.day/size.sum\n[{\n        \"last_mod_by\": \"1495929600\",\n        \"id_count\": \"44\",\n        \"size\": \"244133029\"\n    },\n    {\n        \"last_mod_by\": \"1496016000\",\n        \"id_count\": \"168\",\n        \"size\": \"1747690453\"\n    },...\n]\n\nThe last_mod_by value is an unix timestamp round to an interval.\n\nYou can group entries by size range using groupbylog2 (with option .unit and .hunit to divide the log by 10 or 5).\n\nThe following query return the number of size in range <B, B to KB, KB to MB, ... >:\n<server-url>/api/native/entries/whitelist/id.count/size.groupbylog2.unit/size.max\n[\n    {\n        \"size_by\": null,\n        \"id_count\": \"126131\",\n        \"size_max\": \"0\"\n    },\n    {\n        \"size_by\": \"0\",\n        \"id_count\": \"172346\",\n        \"size_max\": \"1023\"\n    },\n    {\n        \"size_by\": \"1\",\n        \"id_count\": \"659586\",\n        \"size_max\": \"1048506\"\n    },\n    {\n        \"size_by\": \"2\",\n        \"id_count\": \"626440\",\n        \"size_max\": \"1041403917\"\n    },\n    {\n        \"size_by\": \"3\",\n        \"id_count\": \"3935\",\n        ...\n]\n\nYou can recreate accounting file size stats with:\n<server-url>/api/native/entries/whitelist/id.count/size.groupbylog2.hunit/\n[\n  {\n    \"size_by\": null,\n    \"id_count\": \"126131\"\n  },\n  {\n    \"size_by\": \"0\",\n    \"id_count\": \"21145\"\n  },\n  {\n    \"size_by\": \"1\",\n    \"id_count\": \"151201\"\n  },...\n]\n\"size_by 0\" count files from 1B to 32B, \"size_by 1\" 32B to 1K, \"size by 2\" 1K to 32KB ...\n\nCommon Robinhood queries:\n\nrbh-report command:\n    rbh-report -u foo -S\nEquivalent URL:\n    native/acct/uid.filter/foo\n\nrbh-report command:\n    rbh-report --top-users --by-count\nEquivalent URL:\n    native/acct/uid.group/count.desc/limit/20\n\nV - Web UD & Plugins\n====================\n5.1 Link to a specific graph with filters\n\nYou can set the filter by passing parameters:\nhttp://hostname/robinhood/index.php?formUID=ROBIN&formFilename=HOOD\nwill preset the UID and Filename filter to ROBIN and HOOD\n\nYou can choose the graph to display:\nhttp://hostname/robinhood/index.php?formUID=user&callGraph=Sizes (uid, gid, Sizes, Files, ...)\n\n5.2 Plugins\n\nPlease refer to the README.txt in plugins folder for development\n\nVI - TODO\n========\n\n*Improve REST api\n*Move from graph/data to native for graph and datatable\n*Allow custom query directly in the interface\n*Move builtin graphs in plugins (Files, ...)\n\nVII - Changelog\n==============\nv0.3 @ 18/09/2018\n     -Update GraphJS to 2.7.2 (previously 2.6)\n      -Add plugins\n        -tasks (Copy data in other databases)\n        -netauth (Give access by ip source/hostname)\n        -customgraph (create custom graphs)\n     -Add parameter to disable files view in the UI\n     -Improve console plugin\n     -Partial support for multidatabases\n\nv0.2 @ 25/09/2017\n    -Add plugins\n        -stackgraph (group small values)\n        -internal stats (display robinhood vars)\n        -browser (browse the filesystem)\n        -colorgraph (set readable colors)\n        -ldapauth (use ldap for data access)\n        -console (run custom query from ui)\n        -output (export data as csv)\n    -Allow users to request a specific page\n         and filters by settings parameter\n         in the url\n    -Improve UI\n    -Add native/files API request\n    -Upgrade libs\n\nv0.1 @ 09/09/2016:\n   -Ready for landing (robinhood 3.0-rc2)\n\n"
  },
  {
    "path": "web_gui/gui_v3/api/.htaccess",
    "content": "<IfModule mod_rewrite.c>\nRewriteEngine On\nRewriteCond %{REQUEST_FILENAME} !-f\nRewriteCond %{REQUEST_FILENAME} !-d\nRewriteRule (.*)$ index.php?request=$1 [QSA,NC,L]\n</IfModule>\n"
  },
  {
    "path": "web_gui/gui_v3/api/index.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\nrequire_once(\"robinhood.php\");\nrequire_once(\"../common.php\");\nrequire_once(\"../plugin.php\");\n\nif (!check_access(\"api-ro\"))\n    return \"Permission denied\";\n\nif (!array_key_exists('request', $_REQUEST)) {\n    $_REQUEST['request']=\"\";\n}\n\ntry {\n    plugins_call(\"api_preprocess\", $_REQUEST['request']);\n    $API = new MyAPI($_REQUEST['request']);\n    echo $API->processAPI();\n} catch (Exception $e) {\n    echo json_encode(Array('error' => $e->getMessage()));\n}\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/api/rest.class.php",
    "content": "<?php\n/*\n * This class is based on Corey Maynard sample available at\n * http://coreymaynard.com/blog/creating-a-restful-api-with-php/\n */\n\nrequire_once(\"robinhood.php\");\nrequire_once(\"../common.php\");\nrequire_once(\"../plugin.php\");\n\nabstract class API\n{\n    /**\n     * Property: method\n     * The HTTP method this request was made in, either GET, POST, PUT or DELETE\n     */\n    protected $method = '';\n    /**\n     * Property: endpoint\n     * The Model requested in the URI. eg: /files\n     */\n    protected $endpoint = '';\n    /**\n     * Property: verb\n     * An optional additional descriptor about the endpoint, used for things that can\n     * not be handled by the basic methods. eg: /files/process\n     */\n    protected $verb = '';\n    /**\n     * Property: args\n     * Any additional URI components after the endpoint and verb have been removed, in our\n     * case, an integer ID for the resource. eg: /<endpoint>/<verb>/<arg0>/<arg1>\n     * or /<endpoint>/<arg0>\n     */\n    protected $args = Array();\n    /**\n     * Property: file\n     * Stores the input of the PUT request\n     */\n    protected $file = Null;\n\n    /**\n     * Constructor: __construct\n     * Allow for CORS, assemble and pre-process the data\n     */\n    public function __construct($request) {\n\n        $this->args = explode('/', rtrim($request, '/'));\n        $this->endpoint = array_shift($this->args);\n        if (array_key_exists(0, $this->args) && !is_numeric($this->args[0])) {\n            $this->verb = array_shift($this->args);\n        }\n\n        if (array_key_exists('REQUEST_METHOD', $_SERVER)) {\n            $this->method = $_SERVER['REQUEST_METHOD'];\n        } else {\n            $this->method = 'GET';\n        }\n        if ($this->method == 'POST' && array_key_exists('HTTP_X_HTTP_METHOD', $_SERVER)) {\n            if ($_SERVER['HTTP_X_HTTP_METHOD'] == 'DELETE') {\n                $this->method = 'DELETE';\n            } else if ($_SERVER['HTTP_X_HTTP_METHOD'] == 'PUT') {\n                $this->method = 'PUT';\n            } else {\n                throw new Exception(\"Unexpected Header\");\n            }\n        }\n\n        switch($this->method) {\n        case 'DELETE':\n        case 'POST':\n            $this->request = $this->_cleanInputs($_POST);\n            break;\n        case 'GET':\n            $this->request = $this->_cleanInputs($_GET);\n            break;\n        case 'PUT':\n            $this->request = $this->_cleanInputs($_GET);\n            $this->file = file_get_contents(\"php://input\");\n            break;\n        default:\n            $this->_response('Invalid Method', 405);\n            break;\n        }\n    }\n\n    public function processAPI() {\n        if (method_exists($this, $this->endpoint)) {\n            plugins_call(\"api_process\", array($this->endpoint, $this->args));\n            return $this->_response($this->{$this->endpoint}($this->args));\n        }\n        return $this->_response(\"No Endpoint: $this->endpoint\", 404);\n    }\n\n    private function _response($data, $status = 200) {\n        global $JSON_OPTIONS;\n        header(\"Access-Control-Allow-Orgin: *\");\n        header(\"Access-Control-Allow-Methods: *\");\n        header(plugins_call(\"api_header_type\", \"Content-Type: application/json\"));\n        header(\"HTTP/1.1 \" . $status . \" \" . $this->_requestStatus($status));\n        $response_data = plugins_call(\"api_response\", $data);\n        if ($response_data != $data)\n            return $response_data;\n        return json_encode($data, $JSON_OPTIONS);\n    }\n\n    private function _cleanInputs($data) {\n        $clean_input = Array();\n        if (is_array($data)) {\n            foreach ($data as $k => $v) {\n                $clean_input[$k] = $this->_cleanInputs($v);\n            }\n        } else {\n            $clean_input = trim(strip_tags($data));\n        }\n        return $clean_input;\n    }\n\n    private function _requestStatus($code) {\n        $status = array(\n            200 => 'OK',\n            404 => 'Not Found',\n            405 => 'Method Not Allowed',\n            500 => 'Internal Server Error',\n        );\n        return ($status[$code])?$status[$code]:$status[500];\n    }\n}\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/api/robinhood.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\nrequire_once \"../config.php\";\nrequire_once \"../common.php\";\nrequire_once \"../plugin.php\";\nrequire_once 'rest.class.php';\n\nclass MyAPI extends API\n{\n    protected $User;\n    /***************************************\n     * Test function to check the api works\n     **************************************/\n    protected function robinhood() {\n        if ($this->method == 'GET') {\n            return \"\\\"Fear not, my friends. This will be my greatest performance.\\\"\";\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n    /*****************************************\n     * Test function that returns args as json\n     ****************************************/\n    protected function test() {\n        if ($this->method == 'GET') {\n            $this->args;\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n    /***************************************\n     * return your current AUTH\n     **************************************/\n    protected function current_auth() {\n        if ($this->method == 'GET') {\n            return check_access('api-ro');\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n\n    /***************************************\n     * return your current AUTH\n     **************************************/\n    protected function db_info() {\n\tglobal $DBA;\n        if ($this->method == 'GET') {\n            if (!check_access('dbinfo'))\n\t\treturn \"Permission denied\";\n\t    return $DBA;\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n\n    /***************************************\n     * return differents kinds of graph\n     * JSON output with graphjs format\n     **************************************/\n    protected function native() {\n        global $db;\n\tglobal $CURRENT_DB;\n\n        if ($this->method == 'GET') {\n\n            $content_requested = $this->verb;\n            switch ($content_requested) {\n            case 'vars':\n                $self='$SELF';\n                if (!check_access(\"native_vars\")) {\n                    $self = check_self_access(\"native_vars\");\n                    if (!$self)\n                        return \"Permission denied\";\n                }\n\n                $req = $db[$CURRENT_DB]->prepare(\"SELECT * from VARS;\");\n                $req->execute();\n                $data = array();\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)){\n                    $data[$sqldata['varname']] = $sqldata['value'];\n                }\n                break;\n\n            case 'acct':\n                $self = '$SELF';\n                if (!check_access(\"native_acct\")) {\n                    $self = check_self_access(\"native_acct\");\n                    if (!$self)\n                        return \"Permission denied\";\n                }\n                $fullfilter = build_advanced_filter($this->args, $self, \"ACCT_STAT\");\n                $req = $db[$CURRENT_DB]->prepare($fullfilter[0]);\n                $req->execute($fullfilter[1]);\n                $data = $req->fetchall(PDO::FETCH_ASSOC);\n                break;\n\n            case 'files':\n                $self = '$SELF';\n                if (!check_access(\"native_files\")) {\n                    $self = check_self_access(\"native_files\");\n                    if (!$self)\n                        return \"Permission denied\";\n                }\n                $fullfilter = build_advanced_filter($this->args, $self, \"NAMES\", \"ENTRIES\");\n                $req = $db[$CURRENT_DB]->prepare($fullfilter[0]);\n                $req->execute($fullfilter[1]);\n                $data = $req->fetchall(PDO::FETCH_ASSOC);\n                break;\n\n            case 'entries':\n                $self = '$SELF';\n                if (!check_access(\"native_entries\")) {\n                    $self = check_self_access(\"native_entries\");\n                    if (!$self)\n                        return \"Permission denied\";\n                }\n                $fullfilter = build_advanced_filter($this->args, $self, \"ENTRIES\");\n                $req = $db[$CURRENT_DB]->prepare($fullfilter[0]);\n                $req->execute($fullfilter[1]);\n                $data = $req->fetchall(PDO::FETCH_ASSOC);\n                break;\n\n            case 'names':\n                $self = '$SELF';\n                if (!check_access(\"native_names\")) {\n                    $self = check_self_access(\"native_names\");\n                    if (!$self)\n                        return \"Permission denied\";\n                }\n                $fullfilter = build_advanced_filter($this->args, $self, \"NAMES\");\n                $req = $db[$CURRENT_DB]->prepare($fullfilter[0]);\n                $req->execute($fullfilter[1]);\n                $data = $req->fetchall(PDO::FETCH_ASSOC);\n                break;\n\n            default:\n                $data = plugins_call(\"api_native\", [$content_requested, $this->args]);\n            }\n            return $data;\n\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n\n\n    }\n    /***************************************\n     * return differents kinds of graph\n     * JSON output with graphjs format\n     **************************************/\n    protected function graph() {\n        global $db;\n\tglobal $CURRENT_DB;\n        if ($this->method == 'GET') {\n            $self = '$SELF';\n            if (!check_access(\"graphs\")) {\n                $self = check_self_access(\"graphs\");\n                if (!$self)\n                    return \"Permission denied\";\n            }\n\n            $content_requested = $this->verb;\n            $data = array();\n            $labels = array();\n            $size = array();\n            $count = array();\n            $color = array();\n\n            switch ($content_requested) {\n            case 'uid':\n            case 'gid':\n                $fullfilter = build_filter($this->args, array('uid'=>'uid', 'gid'=>'gid','maxsize'=>'SUM(size)', 'minsize'=>'SUM(size)'), $self);\n                $sqlfilter = $fullfilter[0];\n                $havingfilter = $fullfilter[2];\n                $sqlreq = \"SELECT $content_requested, SUM(size) AS ssize, SUM(count) AS scount FROM ACCT_STAT $sqlfilter GROUP BY $content_requested $havingfilter\";\n                $sqlreq = plugins_call(\"graph_presql_uid\", $sqlreq);\n                $req = $db[$CURRENT_DB]->prepare($sqlreq);\n                $req->execute($fullfilter[1]);\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                    $labels[] = $sqldata[$content_requested];\n                    $size[] = $sqldata['ssize'];\n                    $count[] = $sqldata['scount'];\n                    $color[] = string_color($sqldata[$content_requested]);\n                }\n\n                $data = array(\n                    'labels' => $labels,\n                    'default_graph' => 'doughnut',\n                    'filter' => array(),\n                    'datasets' => array()\n                );\n                $data['datasets'][] = array('data'=>$size, 'backgroundColor'=>$color, 'label'=>'size', 'unit'=>'size');\n                $data['datasets'][] = array('data'=>$count, 'backgroundColor'=>$color, 'label'=>'count', 'unit'=>'count');\n\n                $data = plugins_call(\"graph_postdata_uid\", $data);\n                break;\n\n            case 'Sizes':\n                $fullfilter = build_filter($this->args, array('uid'=>'uid', 'gid'=>'gid'), $self);\n                $sqlfilter = $fullfilter[0];\n                $ssize = array(\"sz0\", \"sz1\", \"sz32\", \"sz1K\", \"sz32K\", \"sz1M\", \"sz32M\", \"sz1G\", \"sz32G\", \"sz1T\");\n                $select_str = \"SUM(sz0) AS ssz0\";\n                foreach ($ssize as $ssz)\n                    $select_str = $select_str.\", SUM($ssz) AS s$ssz\";\n                $sqlreq = \"SELECT $select_str FROM ACCT_STAT $sqlfilter;\";\n                $sqlreq = plugins_call(\"graph_presql_sizes\", $sqlreq);\n                $req = $db[$CURRENT_DB]->prepare($sqlreq);\n                $req->execute($fullfilter[1]);\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                    foreach ($ssize as $ssz) {\n                        $labels[] = l($ssz);\n                        $count[] = $sqldata['s'.$ssz];\n                        $color[] = \"#DDDDFF\";\n                    }\n                }\n\n                $data = array(\n                    'labels' => $labels,\n                    'default_graph' => 'bar',\n                    'filter' => array(),\n                    'datasets' => array()\n                );\n                $data['datasets'][] = array('data'=>$count, 'backgroundColor'=>$color, 'label'=>'Number of files', 'unit'=>'count');\n\n                $data = plugins_call(\"graph_postdata_sizes\", $data);\n                break;\n\n            case 'Files':\n                global $MAX_ROWS;\n                $fullfilter = build_filter($this->args, array('filename'=>'name', 'uid'=>'uid', 'gid'=>'gid', 'offset'=>'offset','maxsize'=>'size', 'minsize'=>'size'), $self);\n                $sqlfilter=$fullfilter[0];\n                $offset = 0;\n                if (array_key_exists('k_offset',$fullfilter[1])) {\n                    $offset = intval($fullfilter[1]['k_offset']);\n                    unset($fullfilter[1]['k_offset']);\n                }\n                $req = $db[$CURRENT_DB]->prepare(\"SELECT uid, gid, size, blocks, name, type, creation_time, last_access, last_mod \".\n                    \"FROM ENTRIES LEFT JOIN NAMES ON ENTRIES.id = NAMES.id $sqlfilter LIMIT $MAX_ROWS OFFSET $offset\");\n                $req->execute($fullfilter[1]);\n                $count = $req->rowCount();\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                    $labels[] = $sqldata['uid'];\n                    $size[] = array('x' => $sqldata['last_access'], 'y'=> $sqldata['size'], 'r'=>'2');\n                    $color[] = string_color($sqldata['gid']);\n                }\n\n                $data = array(\n                    'labels' => $labels,\n                    'limited' => ($count == $MAX_ROWS) ? $MAX_ROWS : false,\n                    'offset' => $offset,\n                    'default_graph' => 'bubble',\n                    'filter' => array(),\n                    'datasets' => array()\n                );\n\n                $data['datasets'][] = array('data'=>$size, 'backgroundColor'=>$color, 'label'=>'Last Access VS Size');\n\n                break;\n\n\n\n            default:\n                $fullfilter = build_filter($this->args, array('filename'=>'name', 'uid'=>'uid', 'gid'=>'gid'), $self);\n                $sqlfilter = $fullfilter[0];\n\n                if (endsWith($content_requested, \"_status\")) {\n                    $req = $db[$CURRENT_DB]->prepare(\"SELECT $content_requested AS sstatus, SUM(size) AS ssize, SUM(count) AS scount FROM ACCT_STAT $sqlfilter GROUP BY $content_requested;\");\n                    $req->execute($fullfilter[1]);\n                    while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                        $labels[] = ($sqldata['sstatus'] == '') ? 'None': $sqldata['sstatus'];\n                        $size[] = $sqldata['ssize'];\n                        $count[] = $sqldata['scount'];\n                        $color[] = string_color($sqldata['sstatus']);\n                    }\n\n                    $data = array(\n                        'labels' => $labels,\n                        'default_graph' => 'doughnut',\n                        'filter' => array(),\n                        'datasets' => array()\n                    );\n                    $data['datasets'][] = array('data'=>$size, 'backgroundColor'=>$color, 'label'=>'size', 'unit'=>'size');\n                    $data['datasets'][] = array('data'=>$count, 'backgroundColor'=>$color, 'label'=>'count', 'unit'=>'count');\n\n                } else {\n                    //provide sample data\n                    $data = array(\n                        'labels' => array(\"un\", \"deux\", \"trois\"),\n                        'datasets' => array()\n                    );\n                    $data['datasets'][] = array('data'=>array(\"300\", \"50\", \"100\"));\n                }\n                break;\n            }\n            return $data;\n\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n    /****************************************************\n     * return your data as json in \"datatables.js\" format\n     ***************************************************/\n    protected function data() {\n        if ($this->method == 'GET') {\n            global $db;\n\t    global $CURRENT_DB;\n\n            $self='$SELF';\n            if (!check_access(\"datatables\")) {\n                $self = check_self_access(\"datatables\");\n                if (!$self)\n                    return \"Permission denied\";\n            }\n\n            $content_requested = $this->verb;\n            $data = array();\n            $columns = array();\n            $columnsDefs = array();\n            $datasets = array();\n            switch ($content_requested) {\n            case 'uid':\n            case 'gid':\n                $fullfilter = build_filter($this->args, array('uid'=>'uid', 'gid'=>'gid'), $self);\n                $sqlfilter = $fullfilter[0];\n\n                $columns[] = array('title' => $content_requested);\n                $columns[] = array('title' => 'Size');\n                $columns[] = array('title' => 'File Count');\n                $columnsDefs[] = array('type' => 'file-size', 'targets' => 1);\n                $req = $db[$CURRENT_DB]->prepare(\"SELECT $content_requested, SUM(size) AS ssize, SUM(count) AS scount FROM ACCT_STAT $sqlfilter GROUP BY $content_requested;\");\n                $req->execute($fullfilter[1]);\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                    $datasets[] = array( $sqldata[$content_requested],formatSizeNumber($sqldata['ssize']),$sqldata['scount']);\n                }\n                break;\n\n            case 'Sizes':\n                $fullfilter = build_filter($this->args, array('uid'=>'uid', 'gid'=>'gid'), $self);\n                $sqlfilter = $fullfilter[0];\n\n                $columns[] = array('title' => 'Owner');\n                $ssize = array(\"sz0\", \"sz1\", \"sz32\", \"sz1K\", \"sz32K\", \"sz1M\", \"sz32M\", \"sz1G\", \"sz32G\", \"sz1T\");\n                $select_str = \"SUM(sz0) AS ssz0\";\n                foreach ($ssize as $ssz) {\n                    $select_str = $select_str.\", SUM($ssz) AS s$ssz\";\n                    $columns[] = array('title' => l($ssz));\n                }\n                $req = $db[$CURRENT_DB]->prepare(\"SELECT uid, $select_str FROM ACCT_STAT $sqlfilter GROUP BY uid;\");\n                $req->execute($fullfilter[1]);\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n                    $list = array();\n                    $list[] = $sqldata[\"uid\"];\n                    foreach ($ssize as $ssz) {\n                        $list[] = $sqldata[\"s\".$ssz];\n                    }\n                    $datasets[] = $list;\n                }\n                break;\n\n            case 'Files':\n                global $MAX_ROWS;\n                $fullfilter = build_filter($this->args, array('filename'=>'name', 'uid'=>'uid', 'gid'=>'gid','offset'=>'offset','maxsize'=>'size', 'minsize'=>'size'), $self);\n                $sqlfilter=$fullfilter[0];\n                $offset = 0;\n                if (array_key_exists('k_offset',$fullfilter[1])) {\n                    $offset=intval($fullfilter[1]['k_offset']);\n                    unset($fullfilter[1]['k_offset']);\n                }\n                $req = $db[$CURRENT_DB]->prepare(\"SELECT uid, gid, size, blocks, name, type, from_unixtime(creation_time) AS creation_time\".\n                    \", from_unixtime(last_access) AS last_access, from_unixtime(last_mod) AS last_mod\".\n                    \" FROM ENTRIES LEFT JOIN NAMES ON ENTRIES.id = NAMES.id $sqlfilter LIMIT $MAX_ROWS OFFSET $offset\");\n                $req->execute($fullfilter[1]);\n\n                //we should autorize the user to see his own files\n                $columns[] = array('title' => 'uid');\n                $columns[] = array('title' => 'gid');\n                $columns[] = array('title' => 'size');\n                $columns[] = array('title' => 'blocks');\n                $columns[] = array('title' => 'name');\n                $columns[] = array('title' => 'type');\n                $columns[] = array('title' => 'creation_time');\n                $columns[] = array('title' => 'last_access');\n                $columns[] = array('title' => 'last_mod');\n                $columnsDefs[] = array('type' => 'file-size', 'targets' => 3);\n                $count = $req->rowCount();\n                $data['limited'] = ($count == $MAX_ROWS) ? $MAX_ROWS : false;\n                $data['offset'] = $offset;\n                while($sqldata = $req->fetch(PDO::FETCH_ASSOC)) {\n\n                    $datasets[] = array_values($sqldata);\n                }\n                break;\n\n\n            default:\n                if (endsWith($content_requested, \"_status\")) {\n                    $fullfilter = build_filter($this->args, array('filename'=>'name', 'uid'=>'uid', 'gid'=>'gid'), $self);\n                    $sqlfilter = $fullfilter[0];\n\n                    $columns[] = array('title' => 'Status');\n                    $columns[] = array('title' => 'Size');\n                    $columns[] = array('title' => 'File Count');\n                    $columnsDefs[] = array('type' => 'file-size', 'targets' => 1);\n                    $req = $db[$CURRENT_DB]->prepare(\"SELECT $content_requested AS sstatus, SUM(size) AS ssize, SUM(count) AS scount FROM ACCT_STAT $sqlfilter GROUP BY $content_requested;\");\n                    $req->execute($fullfilter[1]);\n\n                    while($sqldata = $req->fetch()) {\n                        $datasets[] = array( ($sqldata['sstatus'] == '') ? 'None': $sqldata['sstatus'],formatSizeNumber($sqldata['ssize']),$sqldata['scount']);\n                    }\n                }\n                break;\n            }\n            $data['columns'] = $columns;\n            $data['datasets'] = $datasets;\n            $data['columnsDefs'] = $columnsDefs;\n            return $data;\n\n        } else {\n            return \"\\\"Faint hearts never won fair ladies.\\\"\";\n        }\n    }\n\n\n}\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/common.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\nfunction is_assoc($var)\n{\n        return is_array($var) && array_diff_key($var,array_keys(array_keys($var)));\n}\n\n/**\n *\n * Just the \"standard\" startsWith\n *\n * @param string $haystack haystack.\n * @param string $needle needle.\n * @return bool \"startsWith\" or false is needle not a string\n */\nfunction startsWith($haystack, $needle) {\n    // search backwards starting from haystack length characters from the end\n    return $needle === \"\" || strrpos($haystack, $needle, -strlen($haystack)) !== false;\n}\n\n/**\n *\n * Just the \"standard\" endsWith\n *\n * @param string $haystack haystack.\n * @param string $needle needle.\n * @return bool \"endsWith\" or false is needle not a string\n */\nfunction endsWith($haystack, $needle) {\n    // search forward starting from end minus needle length characters\n    return $needle === \"\" || (($temp = strlen($haystack) - strlen($needle)) >= 0 && strpos($haystack, $needle, $temp) !== false);\n}\n\n/**\n *\n * Convert size to human readable format\n *\n * @param int $number number to convert\n * @param int optional $precision number of digit\n * @return string human readable size\n */\nfunction formatSizeNumber( $number, $precision=2 )\n{\n    if ($number === 0)\n\treturn '0';\n\n    $base = log($number, 1024);\n    $suffixes = array('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZT', 'YT');\n\n    return round(pow(1024, $base - floor($base)), $precision) .' '. $suffixes[floor($base)];\n}\n\n\n/**\n *\n * Get Columns from ACCT_STAT tables and clean/convert them\n *\n * @return array array of columns names as string\n */\nfunction get_acct_columns($all=false) {\n    global $FIELD_LIST;\n    global $DB_LASTERROR;\n    global $DBA;\n    global $CURRENT_DB;\n    global $db;\n    $final = array();\n    if (!$db[$CURRENT_DB])\n        return $final;\n    $result = $db[$CURRENT_DB]->query(\"select column_name from information_schema.columns where table_name = 'ACCT_STAT' AND TABLE_SCHEMA = '\".$DBA[$CURRENT_DB][\"DB_NAME\"].\"';\");\n    if ($result->rowCount() <1) {\n        $DB_LASTERROR = 'Something goes wrong with db schema: ACCT_STAT doesn\\'t exist';\n        return $final;\n    }\n    if ($result->rowCount() > 0) {\n        while ($row = $result->fetch()) {\n            if (array_key_exists($row[0], $FIELD_LIST) and !$all) {\n                if (!in_array($FIELD_LIST[$row[0]], $final) and $FIELD_LIST[$row[0]]) {\n                    $final[] = $FIELD_LIST[$row[0]];\n                }\n            } else {\n                $final[] = $row[0];\n            }\n        }\n    }\n    return array_unique($final);\n}\n\n\n\n/**\n *\n * get user\n *\n * @return string user\n */\nfunction get_user()\n{\n    $user = False;\n    if (isset($_SERVER['PHP_AUTH_USER'])) {\n        $user = $_SERVER['PHP_AUTH_USER'];\n    } else {\n            $user = '$NOAUTH';\n    }\n    $user = plugins_call(\"get_user\", $user);\n    return $user;\n}\n/**\n *\n * check user access\n *\n * @param string $part Name of the access to be check\n * @return bool\n */\nfunction check_access($part)\n{\n    GLOBAl $ACCESS_LIST;\n    $user = '';\n    if (get_user()) {\n        $user = get_user();\n    } else {\n            $user='$NOAUTH';\n    }\n    if (in_array('*', $ACCESS_LIST[$part]))\n        return $user;\n    if (in_array('$AUTH', $ACCESS_LIST[$part]))\n        return $user;\n    if (in_array($user, $ACCESS_LIST[$part]))\n        return $user;\n    return False;\n}\n\n\n/**\n *\n * build sql filter from privileges\n *\n * @param array list of uid, uidnumber and groups\n * @return string SQL filter\n */\nfunction build_sql_access($part)\n{\n    $sql_where = \"(\";\n    $sql_where.= \"uid IN (\".$part['uids'].implode(\",\").\") OR gid IN (\";\n    $sql_where.= $part['groups'].implode(\",\").\")\";\n    return $sql_where;\n}\n\n\n/**\n *\n * check user self access\n *\n * Allow user to access to his own data\n *\n * @param string $part Name of the access to be check\n * @return bool\n */\nfunction check_self_access($part)\n{\n    GLOBAl $ACCESS_LIST;\n    $user='';\n    if (get_user()) {\n        $user = get_user();\n    } else {\n        return False;\n    }\n    if (in_array('$SELF', $ACCESS_LIST[$part]))\n        return $user;\n    return False;\n}\n\n/**\n *\n * generate HEX color\n *\n * @return string \"#RRGGBB\"\n */\nfunction rand_color() {\n    return sprintf('#%06X', mt_rand(0, 0xFFFFFF));\n}\n\n/**\n *\n * generate HEX color frim string\n *\n * @param string $str\n * @return string \"#RRGGBB\"\n */\nfunction string_color($str){\n    return '#'.substr(md5($str), 0, 6);\n}\n\n\nfunction get_filter_from_list($datalist, $term)\n{\n    $i = array_search($term, $datalist);\n    if ($i == false) {\n        return false;\n    }\n    if ($i+1 < count($datalist)) {\n        if ($datalist[$i+1] == '')\n            return False;\n        else\n            return $datalist[$i+1];\n    }\n    return false;\n}\n\n/**\n *\n * Build SQLRequest/Table from REST Args\n *\n * @param array $args REST args as key/val/key/val/... list\n * @param self Filter to show only user data (for self service)\n * @return array String,Array with sql request and array of filter\n */\nfunction build_filter($args, $filter, $self='$SELF') {\n    $sqlfilter = \"\";\n    $havingfilter = \"\";\n    $values = array();\n\n    //Ensure uid if present for self usage\n    if ($self!='$SELF')\n        $filter['uid'] = 'uid';\n\n    foreach ($filter as $k => $v) {\n        $op=\"LIKE\";\n        if (startsWith($k, \"min\"))\n            $op=\">\";\n        if (startsWith($k, \"max\"))\n            $op=\"<\";\n        if(get_filter_from_list($args, $k)) {\n            $val = get_filter_from_list($args, $k);\n            if ($v != 'offset') {\n                if (strstr($v, \"(\") != false){\n                    if ($havingfilter != \"\")\n                        $havingfilter = $havingfilter.\" AND \";\n                    $havingfilter = $havingfilter.\"$v $op :k_$k \";\n                }else{\n                    if ($sqlfilter != \"\")\n                        $sqlfilter = $sqlfilter.\" AND \";\n                    $sqlfilter = $sqlfilter.\"$v $op :k_$k \";\n                }\n            }\n            $values[\"k_$k\"] = $val;\n        } elseif ($self != '$SELF' && $k == 'uid') {\n            if ($v!='offset') {\n                if (strstr($v, \"(\") != false){\n                    if ($havingfilter != \"\")\n                        $havingfilter = $havingfilter.\" AND \";\n                    $havingfilter = $havingfilter.\"$v $op :k_$k \";\n                }else{\n                    if ($sqlfilter != \"\")\n                        $sqlfilter = $sqlfilter.\" AND \";\n                    $sqlfilter = $sqlfilter.\"$v $op :k_$k \";\n                }\n            }\n            $values[\"k_$k\"] = $self;\n        }\n    }\n\n    if ($sqlfilter != \"\")\n        $sqlfilter = \" WHERE \".$sqlfilter;\n\n    if ($havingfilter != \"\")\n        $havingfilter = \" HAVING \".$havingfilter;\n\n    return array($sqlfilter, $values, $havingfilter);\n}\n\n/**\n *\n * Build SQLRequest from REST args\n *\n * @param array $args REST args (=>README.txt)\n * @param array $access User identity\n * @param string $table mysql table\n * @param string $join mysql table to join\n * @return array String,Array with sql request and array of filter\n */\nfunction build_advanced_filter($args, $access = '$SELF', $table, $join = false) {\n    global $db;\n    global $DB_LASTERROR;\n    global $DBA;\n    global $CURRENT_DB;\n\n    $shortcuts = array();\n    $fields = array();\n    $select = array();\n    $filter = array();\n    $operator = array();\n    $group = array();\n    $group_select = array();\n    $order_by = array();\n    $select_cache = array();\n    $values = array();\n    $whitelist=false;\n    $limit=false;\n\n    $shortcuts['GROUP_CONCAT'] = \"_set\";\n    $shortcuts['COUNT'] = \"_count\";\n    $shortcuts['MAX'] = \"_max\";\n    $shortcuts['MIN'] = \"_min\";\n    $shortcuts['AVG'] = \"_avg\";\n    $shortcuts['*'] = \"_all\";\n\n    $sqlrequest = \"SELECT \";\n    $ttable = \"table_name ='$table'\";\n\n    if (in_array(\"whitelist\", $args))\n        $whitelist=true;\n\n    $i = array_search(\"limit\", $args);\n    if ($i)\n        $limit=intval($args[$i+1]);\n\n    if ($join)\n        $ttable = $ttable.\" OR table_name='$join'\";\n\n    $result = $db[$CURRENT_DB]->query(\"SELECT column_name,column_type,table_name FROM information_schema.columns WHERE ($ttable) AND TABLE_SCHEMA = '\".$DBA[$CURRENT_DB][\"DB_NAME\"].\"';\");\n    if ($result->rowCount() <1) {\n        $DB_LASTERROR = 'Something goes wrong with db schema: $TABLE doesn\\'t exist';\n        exit;\n    }\n    if ($result->rowCount() > 0) {\n        while ($row = $result->fetch()) {\n            $fields[$row[0]] = $row[1];\n            $grouptype = false;\n            if (strstr($row[1], \"int\")!=false)\n                $grouptype=\"SUM\";\n            elseif (strstr($row[1], \"var\")!=false)\n                $grouptype=\"GROUP_CONCAT\";\n            elseif (strstr($row[1], \"enum\")!=false)\n                $grouptype=\"GROUP_CONCAT\";\n            if (!$whitelist) {\n                if ($join)\n                    $select[$row[2].'.'.$row[0]] = $grouptype;\n                else\n                    $select[$row[0]] = $grouptype;\n            }\n        }\n        $i=0;\n        foreach ($args as $arg) {\n            if (strstr($arg, \".\")!=false) {\n                $prop = explode(\".\", $arg);\n                $field = $prop[0];\n                if ($join && $field==\"id\")\n                    $field = $table.\".\".$field;\n                unset($prop[0]);\n                if (array_key_exists($field, $fields) OR $field==\"*\") {\n                    if (in_array(\"group\", $prop)) {\n                        $group[] = $field;\n                        $group_select[] = $field;\n                        unset($select[$field]);\n                    }\n                    if (in_array(\"groupbytime\", $prop)) {\n                        $interval = \"86400\";\n                        if (in_array(\"hour\", $prop))\n                            $interval = \"3600\";\n                        if (in_array(\"day\", $prop))\n                            $interval = \"86400\";\n                        if (in_array(\"week\", $prop))\n                            $interval = \"604800\";\n                        if (in_array(\"month\", $prop))\n                            $interval = \"26280030\";\n                        if (in_array(\"year\", $prop))\n                            $interval = \"315360365\";\n\n                        $group[] = \"FLOOR(\".$field.\"/\".$interval.\")*\".$interval;\n                        $group_select[] = \"FLOOR(\".$field.\"/\".$interval.\")*\".$interval.\" AS \".$field.\"_by\";\n                        unset($select[$field]);\n                    }\n                    if (in_array(\"groupbylog2\", $prop)) {\n                        $div = \"\";\n                        if (in_array(\"unit\", $prop))\n                                $div = \"/10\";\n                        if (in_array(\"hunit\", $prop))\n                            $div = \"/5\";\n                        $group[] = \"FLOOR(LOG2(\".$field.\")$div)\";\n                        $group_select[] = \"FLOOR(LOG2(\".$field.\")$div)  AS \".$field.\"_by\";\n                        unset($select[$field]);\n                    }\n                    if (in_array(\"count\", $prop)) {\n                        $select[$field] = \"COUNT\";\n                    }\n                    if (in_array(\"max\", $prop)) {\n                        $select[$field] = \"MAX\";\n                    }\n                    if (in_array(\"min\", $prop)) {\n                        $select[$field] = \"MIN\";\n                    }\n                    if (in_array(\"avg\", $prop)) {\n                        $select[$field] = \"AVG\";\n                    }\n                    if (in_array(\"sum\", $prop)) {\n                        $select[$field] = \"SUM\";\n                    }\n                    if (in_array(\"concat\", $prop)) {\n                        $select[$field] = \"GROUP_CONCAT\";\n                    }\n                    if (in_array(\"remove\", $prop)) {\n                        unset($select[$field]);\n                    }\n                    if (in_array(\"filter\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \"LIKE\";\n                    }\n                    if (in_array(\"nfilter\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \"NOT LIKE\";\n                    }\n                    if (in_array(\"equal\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \"=\";\n                    }\n                    if (in_array(\"less\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \"<\";\n                    }\n                    if (in_array(\"bigger\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \">\";\n                    }\n                    if (in_array(\"soundslike\", $prop)) {\n                        $filter[$field] = $args[$i+1];\n                        $operator[$field] = \"SOUNDS LIKE\";\n                    }\n                    if (in_array(\"asc\", $prop)) {\n                        $order_by[$field] = \"ASC\";\n                    }\n                    if (in_array(\"desc\", $prop)) {\n                        $order_by[$field] = \"DESC\";\n                    }\n                }\n            }\n            $i++;\n        }\n\n        //build select\n        if (sizeof($group)!=0)\n            $sqlrequest = $sqlrequest.\"\".implode(\", \", $group_select);\n        $first = true;\n        foreach ($select as $k => $v) {\n            if($v && sizeof($group)!=0) {\n                $attr= \"\";\n                if ($v == \"GROUP_CONCAT\")\n                    $attr=\"DISTINCT \";\n                $ext= \"\";\n                if (array_key_exists($v, $shortcuts))\n                        $ext = $shortcuts[$v];\n                $kk = str_replace(\"*\", \"_all\", $k);\n                $sqlrequest = $sqlrequest.\", $v($attr$k) AS $kk$ext\";\n                $select_cache[$k] = $kk.$ext;\n            } elseif (sizeof($group) == 0) {\n                if ($first)\n                    $sqlrequest = $sqlrequest.\"$k\";\n                else\n                    $sqlrequest = $sqlrequest.\", $k\";\n                $select_cache[$k] = $k;\n                $first = false;\n            }\n        }\n        $sqlrequest = $sqlrequest.\" FROM $table \";\n        if ($join)\n            $sqlrequest = $sqlrequest.\" LEFT JOIN $join ON $table.id = $join.id \";\n        //build where\n        if (sizeof($filter) !=0 )\n            $sqlrequest = $sqlrequest.\" WHERE \";\n        $first = true;\n        foreach ($filter as $k => $v) {\n            if (!$first)\n                $sqlrequest = $sqlrequest.\" AND \";\n            $sqlrequest = $sqlrequest.\"$k \".$operator[$k].\" :k_$k \";\n            $values[\"k_$k\"] = str_replace('*', '%', $v);\n            $first=false;\n        }\n        if ($access != '$SELF') {\n                if (sizeof($filter) != 0)\n                        $sqlrequest.= \" AND \";\n                else\n                        $sqlrequest.= \" WHERE \";\n\n             $values[\"k_uid\"] = $access;\n             $data = plugins_call(\"access_sql_filter\",[\" uid LIKE :k_uid \", $table, $values]);\n             $sqlrequest = $sqlrequest.$data[0];\n             $values = $data[2];\n        }\n        //build group by\n        if (sizeof($group)!=0)\n                $sqlrequest = $sqlrequest.\" GROUP BY \".implode(\", \", $group);\n\n        //order by\n        if (sizeof($order_by) != 0) {\n                $sqlrequest = $sqlrequest.\" ORDER BY \";\n                $first = true;\n                foreach ($order_by as $k => $v) {\n                    if (!$first)\n                        $sqlrequest = $sqlrequest.\", \";\n                    $sqlrequest = $sqlrequest.$select_cache[$k].\" \".$v;\n                    $first = false;\n                }\n        }\n        if ($limit) {\n            $sqlrequest = $sqlrequest.\" LIMIT $limit\";\n        }\n\n    }\n    return array($sqlrequest, $values);\n}\n\n/**\n *\n * Translate string\n *\n * @param string $str word to translate\n * @return string Translation\n */\nfunction l($text)\n{\n    global $lang;\n    if (array_key_exists($text, $lang)) {\n        return $lang[$text];\n    } elseif (endsWith($text, \"_status\")) {\n        return ucfirst(str_replace(\"_\", \" \", $text));\n    }\n    return $text;\n}\n\n/**\n *\n * Check file permissions\n *\n * @param string $file path to file\n * @return string File permissions in \"xxx\" format\n */\nfunction getFilePermission($file) {\n    $length = strlen(decoct(fileperms($file)))-3;\n    return substr(decoct(fileperms($file)), $length);\n}\n\n\n/**\n *\n * Set form values from url\n *\n * @return string javascript which set the form value\n */\nfunction setFormValues() {\n\n    $js = \"<script>\\n\";\n    foreach ($_GET as $k => $v) {\n        if (startsWith($k, \"form\")) {\n            $js.=\"document.getElementById('$k').value='$v';\\n\";\n        }\n    }\n    $js.= \"</script>\\n\";\n    return $js;\n}\n\n\n/**\n *\n * Call a specific graph from URL parameter\n *\n * @return string javascript which set the graph call\n */\nfunction callGraph() {\n\n    $js = \"<script>\\n\";\n    foreach ($_GET as $k => $v) {\n        if ($k==\"callGraph\") {\n            $js.=\"GetGraph('$v');\\n\";\n        }\n    }\n    $js.= \"</script>\\n\";\n    return $js;\n}\n\n\n/**\n *\n * Return DBs from type\n *\n * @param string filter\n * @return list of db name\n */\nfunction getDB($filter)\n{\n    global $DBA;\n    $result = array();\n    foreach ($DBA as $k=>$v) {\n\tif (in_array($filter,$v[\"DB_USAGE\"])) {\n\t\tarray_push($result,$k);\n\t}\n    }\n    return $result;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/config.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*****************************\n*        Database            *\n*****************************/\n\n$DEFAULT_DB = \"main\";\n//Support at least mysql/pgsql/sqlite\n\n$DBA = array();\n\n$DBA[$DEFAULT_DB] = [\n\"DB_TYPE\"     => \"mysql\",\n\"DB_HOST\"     => \"localhost\",\n\"DB_NAME\"     => \"\",\n\"DB_USER\"     => \"\",\n\"DB_PASSWD\"   => \"\",\n\"DB_USAGE\"    => array(\"data\",\"config\"),\n];\n/*****************************\n*        Access              *\n*****************************/\n\n$ACCESS_LIST = array();\n$ACCESS_LIST['webgui'] = array();\n$ACCESS_LIST['api-ro'] = array();\n$ACCESS_LIST['dbinfo'] = array();\n$ACCESS_LIST['datatables'] = array();\n$ACCESS_LIST['graphs'] = array();\n$ACCESS_LIST['native_vars'] = array();\n$ACCESS_LIST['native_acct'] = array();\n$ACCESS_LIST['native_files'] = array();\n$ACCESS_LIST['native_entries'] = array();\n$ACCESS_LIST['native_names'] = array();\n\n\n$ACCESS_LIST['tasks'] = array();\n$ACCESS_LIST['customgraph'] = array();\n/* Beware, by default everyone can access to everything */\n\n//Web GUI with graphs and lists\n$ACCESS_LIST['webgui'][] = '*';\n//Read Only API, required for webgui\n$ACCESS_LIST['api-ro'][] = '*';\n//Show database configuration\n$ACCESS_LIST['dbinfo'][] = '*';\n//Datatables\n$ACCESS_LIST['datatables'][] = '*';\n//Graphs\n$ACCESS_LIST['graphs'][] = '*';\n//Native (raw data)\n$ACCESS_LIST['native_vars'][] = '*';\n$ACCESS_LIST['native_acct'][] = '*';\n$ACCESS_LIST['native_files'][] = '*';\n$ACCESS_LIST['native_entries'][] = '*';\n$ACCESS_LIST['native_names'][] = '*';\n\n$ACCESS_LIST['tasks'][] = '*';\n$ACCESS_LIST['customgraph'][] = '*';\n/*****************************\n*        General parameters  *\n*****************************/\n//Max row per result\n$MAX_ROWS = 1000;\n\n$JSON_OPTIONS = null;\nif (version_compare(phpversion(), '5.4.0', '>='))\n    $JSON_OPTIONS |= JSON_PRETTY_PRINT;\n\n//Only allow cron from console\n$CONSOLE_CRON_ONLY = true;\n\n//Disable webgui files access\n//Use the access list to disable completly the access to files\n$DISABLE_FILES_PAGE = false;\n\n/*****************************\n*       ChartJS/dataTable    *\n*****************************/\n$CHARTJS = array();\n$CHARTJS['Chart.defaults.global.title.display'] = \"true\";\n$CHARTJS['Chart.defaults.global.defaultFontSize'] = 13;\n$CHARTJS['responsiveChart'] = \"true\";\n$CHARTJS['animationChart'] = \"false\";\n$CHARTJS['showAllTooltipsChart'] = \"true\";\n$CHARTJS['maxdisplayedrows'] = 50000;\n\n/*****************************\n*        Customization       *\n*****************************/\n$CUSTOM['vendor_logo'] = 'images/logoCEA20.jpg';\n$CUSTOM['vendor_url'] = 'http://www-hpc.cea.fr/index-en.htm';\n\n/*****************************\n*        Dynamic Fields      *\n*****************************/\n$FIELD_LIST = array();\n$FIELD_LIST['sz0'] = 'Sizes';\n$FIELD_LIST['sz1'] = 'Sizes';\n$FIELD_LIST['sz32'] = 'Sizes';\n$FIELD_LIST['sz1K'] = 'Sizes';\n$FIELD_LIST['sz32K'] = 'Sizes';\n$FIELD_LIST['sz1M'] = 'Sizes';\n$FIELD_LIST['sz32M'] = 'Sizes';\n$FIELD_LIST['sz1G'] = 'Sizes';\n$FIELD_LIST['sz32G'] = 'Sizes';\n$FIELD_LIST['sz1T'] = 'Sizes';\n$FIELD_LIST['type'] = null;\n$FIELD_LIST['size'] = null;\n$FIELD_LIST['blocks'] = null;\n$FIELD_LIST['count'] = null;\n\n/*****************************\n *        Language            *\n *****************************/\nif (array_key_exists('HTTP_ACCEPT_LANGUAGE', $_SERVER))\n    $lang = substr($_SERVER['HTTP_ACCEPT_LANGUAGE'], 0, 2);\nelse\n    $lang = \"en\";\n\nswitch ($lang) {\ncase \"fr\":\n        $lang_file = 'fr.php';\n        break;\ndefault:\n        $lang_file = 'en.php';\n}\n\ninclude_once 'lang/sys.php';\ninclude_once 'lang/'.$lang_file;\n\n\n/*****************************\n *        Plugins            *\n *****************************/\n\n$PLUGINS_REG = array();\n$PLUGINS_INST = array();\n\n$PLUGINS_REG[] = \"stackgraph\";\n$PLUGINS_REG[] = \"colorgraph\";\n$PLUGINS_REG[] = \"plugdisplay\";\n$PLUGINS_REG[] = \"internalstats\";\n$PLUGINS_REG[] = \"browser\";\n$PLUGINS_REG[] = \"console\";\n$PLUGINS_REG[] = \"output\";\n#$PLUGINS_REG[] = \"tasks\";\n#$PLUGINS_REG[] = \"customgraph\";\n//This plugin requires a valid ldap conf.\n//$PLUGINS_REG[] = \"ldapauth\";\n\n/*****************************\n *        Local config        *\n *****************************/\n//Allow to override config with local file\nif (!@include \"config_local.php\") {\n        $err = error_get_last();\n        if ($err[\"type\"] == 2){\n                //Clear the last error if the file is not found\n                if (version_compare(phpversion(), '7.0.0', '>='))\n                    error_clear_last();\n        } else {\n                //something get wrong in the file, notify !\n                print_r(get_last_error());\n        }\n}\n\n/****************************\n*        DB Connection       *\n*****************************/\n$DB_LASTERROR = \"\";\n\nforeach ($DBA as $k=>$v) {\n    try {\n        $db[$k] = new PDO($DBA[$k][\"DB_TYPE\"].\":host=\".$DBA[$k][\"DB_HOST\"].\";dbname=\".$DBA[$k][\"DB_NAME\"], $DBA[$k][\"DB_USER\"], $DBA[$k][\"DB_PASSWD\"]);\n    \t$DBA[$k][\"DB_PASSWD\"]=\"****\";\n\t    $db[$k]->exec(\"USE \".$DBA[$k][\"DB_NAME\"].\";\");\n    \t$DBA[$k][\"DB_STATUS\"] = \"Ok\";\n    } catch(Exception $e) {\n    \t$DBA[$k][\"DB_PASSWD\"]=\"****\";\n\t    $DBA[$k][\"DB_STATUS\"] = \"Error\";\n    \t$DBA[$k][\"DB_ERROR\"] = $e->getMessage();\n        $DB_LASTERROR .= $e->getMessage();\n    }\n}\n//Set defaut DB as current\n$CURRENT_DB = $DEFAULT_DB;\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/config_local.php.template",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/*\nComplete the following parameters with your local configuration and\nrename the file to config_local.php\n\nYou can redefine any of the parameter from config.php.\n*/\n\n/*****************************\n*        Database            *\n*****************************/\n\n$DBA[$DEFAULT_DB] = [\n\"DB_TYPE\"     => \"mysql\",\n\"DB_HOST\"     => \"localhost\",\n\"DB_NAME\"     => \"\",\n\"DB_USER\"     => \"\",\n\"DB_PASSWD\"   => \"\",\n\"DB_USAGE\"    => array(\"data\",\"config\"),\n];\n\n/*****************************\n*        Access              *\n*****************************/\n\n/* Beware, by default everyone can access to everything */\n\n/* Uncomment the following lines to reset default access */\n//$ACCESS_LIST['webgui'] = array();\n//$ACCESS_LIST['api-ro'] = array();\n//$ACCESS_LIST['datatables'] = array();\n//$ACCESS_LIST['graphs'] = array();\n//$ACCESS_LIST['native_vars'] = array();\n//$ACCESS_LIST['native_acct'] = array();\n//$ACCESS_LIST['native_files'] = array();\n\n//Web GUI with graphs and lists\n//$ACCESS_LIST['webgui'][] = '*';\n//Read Only API, required for webgui\n//$ACCESS_LIST['api-ro'][] = '*';\n//Datatables\n//$ACCESS_LIST['datatables'][] = '*';\n//Graphs\n//$ACCESS_LIST['graphs'][] = '*';\n//Native (raw data)\n//$ACCESS_LIST['native_vars'][] = '*';\n//$ACCESS_LIST['native_acct'][] = '*';\n//$ACCESS_LIST['native_files'][] = '*';\n\n/*****************************\n*        General parameters  *\n*****************************/\n\n//Max row per result\n//$MAX_ROWS = 1000;\n\n/*****************************\n*           ChartJS          *\n*****************************/\n\n//$CHARTJS = array();\n//$CHARTJS['Chart.defaults.global.title.display'] = \"true\";\n//$CHARTJS['Chart.defaults.global.defaultFontSize'] = 13;\n//$CHARTJS['responsiveChart'] = \"true\";\n//$CHARTJS['animationChart'] = \"false\";\n//CHARTJS['showAllTooltipsChart'] = \"true\";\n\n/*****************************\n *        Plugins            *\n *****************************/\n\n/* Uncomment to reset the plugin list */\n//$PLUGINS_REG = array();\n\n/* Plugin names must match directories names plugins folder */\n//$PLUGINS_REG[] = \"stackgraph\";\n//$PLUGINS_REG[] = \"colorgraph\";\n//$PLUGINS_REG[] = \"plugdisplay\";\n//$PLUGINS_REG[] = \"fsinfo\";\n//$PLUGINS_REG[] = \"browser\";\n//$PLUGINS_REG[] = \"ldapauth\";\n//$PLUGINS_REG[] = \"console\";\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/cron.php",
    "content": "<?php\n/*\n * Copyright (C) 2018 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\nrequire_once \"config.php\";\nrequire_once \"common.php\";\nrequire_once \"plugin.php\";\n\nif ( (! PHP_SAPI === \"cli\") && $CONSOLE_CRON_ONLY == true) {\n\techo \"You are not allowed to run cron.php, please check config.php\";\n\texit(1);\n}\n\nplugins_call(\"init\");\n\n\nplugins_call(\"cron\");\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/css/bootstrap-datetimepicker.css",
    "content": "/*!\n * Datetimepicker for Bootstrap 3\n * version : 4.17.46\n * https://github.com/Eonasdan/bootstrap-datetimepicker/\n */\n.bootstrap-datetimepicker-widget {\n  list-style: none;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu {\n  display: block;\n  margin: 2px 0;\n  padding: 4px;\n  width: 19em;\n}\n@media (min-width: 768px) {\n  .bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs {\n    width: 38em;\n  }\n}\n@media (min-width: 992px) {\n  .bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs {\n    width: 38em;\n  }\n}\n@media (min-width: 1200px) {\n  .bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs {\n    width: 38em;\n  }\n}\n.bootstrap-datetimepicker-widget.dropdown-menu:before,\n.bootstrap-datetimepicker-widget.dropdown-menu:after {\n  content: '';\n  display: inline-block;\n  position: absolute;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.bottom:before {\n  border-left: 7px solid transparent;\n  border-right: 7px solid transparent;\n  border-bottom: 7px solid #ccc;\n  border-bottom-color: rgba(0, 0, 0, 0.2);\n  top: -7px;\n  left: 7px;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.bottom:after {\n  border-left: 6px solid transparent;\n  border-right: 6px solid transparent;\n  border-bottom: 6px solid white;\n  top: -6px;\n  left: 8px;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.top:before {\n  border-left: 7px solid transparent;\n  border-right: 7px solid transparent;\n  border-top: 7px solid #ccc;\n  border-top-color: rgba(0, 0, 0, 0.2);\n  bottom: -7px;\n  left: 6px;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.top:after {\n  border-left: 6px solid transparent;\n  border-right: 6px solid transparent;\n  border-top: 6px solid white;\n  bottom: -6px;\n  left: 7px;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.pull-right:before {\n  left: auto;\n  right: 6px;\n}\n.bootstrap-datetimepicker-widget.dropdown-menu.pull-right:after {\n  left: auto;\n  right: 7px;\n}\n.bootstrap-datetimepicker-widget .list-unstyled {\n  margin: 0;\n}\n.bootstrap-datetimepicker-widget a[data-action] {\n  padding: 6px 0;\n}\n.bootstrap-datetimepicker-widget a[data-action]:active {\n  box-shadow: none;\n}\n.bootstrap-datetimepicker-widget .timepicker-hour,\n.bootstrap-datetimepicker-widget .timepicker-minute,\n.bootstrap-datetimepicker-widget .timepicker-second {\n  width: 54px;\n  font-weight: bold;\n  font-size: 1.2em;\n  margin: 0;\n}\n.bootstrap-datetimepicker-widget button[data-action] {\n  padding: 6px;\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"incrementHours\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Increment Hours\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"incrementMinutes\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Increment Minutes\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"decrementHours\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Decrement Hours\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"decrementMinutes\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Decrement Minutes\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"showHours\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Show Hours\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"showMinutes\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Show Minutes\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"togglePeriod\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Toggle AM/PM\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"clear\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Clear the picker\";\n}\n.bootstrap-datetimepicker-widget .btn[data-action=\"today\"]::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Set the date to today\";\n}\n.bootstrap-datetimepicker-widget .picker-switch {\n  text-align: center;\n}\n.bootstrap-datetimepicker-widget .picker-switch::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Toggle Date and Time Screens\";\n}\n.bootstrap-datetimepicker-widget .picker-switch td {\n  padding: 0;\n  margin: 0;\n  height: auto;\n  width: auto;\n  line-height: inherit;\n}\n.bootstrap-datetimepicker-widget .picker-switch td span {\n  line-height: 2.5;\n  height: 2.5em;\n  width: 100%;\n}\n.bootstrap-datetimepicker-widget table {\n  width: 100%;\n  margin: 0;\n}\n.bootstrap-datetimepicker-widget table td,\n.bootstrap-datetimepicker-widget table th {\n  text-align: center;\n  border-radius: 4px;\n}\n.bootstrap-datetimepicker-widget table th {\n  height: 20px;\n  line-height: 20px;\n  width: 20px;\n}\n.bootstrap-datetimepicker-widget table th.picker-switch {\n  width: 145px;\n}\n.bootstrap-datetimepicker-widget table th.disabled,\n.bootstrap-datetimepicker-widget table th.disabled:hover {\n  background: none;\n  color: #777777;\n  cursor: not-allowed;\n}\n.bootstrap-datetimepicker-widget table th.prev::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Previous Month\";\n}\n.bootstrap-datetimepicker-widget table th.next::after {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n  content: \"Next Month\";\n}\n.bootstrap-datetimepicker-widget table thead tr:first-child th {\n  cursor: pointer;\n}\n.bootstrap-datetimepicker-widget table thead tr:first-child th:hover {\n  background: #eeeeee;\n}\n.bootstrap-datetimepicker-widget table td {\n  height: 54px;\n  line-height: 54px;\n  width: 54px;\n}\n.bootstrap-datetimepicker-widget table td.cw {\n  font-size: .8em;\n  height: 20px;\n  line-height: 20px;\n  color: #777777;\n}\n.bootstrap-datetimepicker-widget table td.day {\n  height: 20px;\n  line-height: 20px;\n  width: 20px;\n}\n.bootstrap-datetimepicker-widget table td.day:hover,\n.bootstrap-datetimepicker-widget table td.hour:hover,\n.bootstrap-datetimepicker-widget table td.minute:hover,\n.bootstrap-datetimepicker-widget table td.second:hover {\n  background: #eeeeee;\n  cursor: pointer;\n}\n.bootstrap-datetimepicker-widget table td.old,\n.bootstrap-datetimepicker-widget table td.new {\n  color: #777777;\n}\n.bootstrap-datetimepicker-widget table td.today {\n  position: relative;\n}\n.bootstrap-datetimepicker-widget table td.today:before {\n  content: '';\n  display: inline-block;\n  border: solid transparent;\n  border-width: 0 0 7px 7px;\n  border-bottom-color: #337ab7;\n  border-top-color: rgba(0, 0, 0, 0.2);\n  position: absolute;\n  bottom: 4px;\n  right: 4px;\n}\n.bootstrap-datetimepicker-widget table td.active,\n.bootstrap-datetimepicker-widget table td.active:hover {\n  background-color: #337ab7;\n  color: #fff;\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.bootstrap-datetimepicker-widget table td.active.today:before {\n  border-bottom-color: #fff;\n}\n.bootstrap-datetimepicker-widget table td.disabled,\n.bootstrap-datetimepicker-widget table td.disabled:hover {\n  background: none;\n  color: #777777;\n  cursor: not-allowed;\n}\n.bootstrap-datetimepicker-widget table td span {\n  display: inline-block;\n  width: 54px;\n  height: 54px;\n  line-height: 54px;\n  margin: 2px 1.5px;\n  cursor: pointer;\n  border-radius: 4px;\n}\n.bootstrap-datetimepicker-widget table td span:hover {\n  background: #eeeeee;\n}\n.bootstrap-datetimepicker-widget table td span.active {\n  background-color: #337ab7;\n  color: #fff;\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.bootstrap-datetimepicker-widget table td span.old {\n  color: #777777;\n}\n.bootstrap-datetimepicker-widget table td span.disabled,\n.bootstrap-datetimepicker-widget table td span.disabled:hover {\n  background: none;\n  color: #777777;\n  cursor: not-allowed;\n}\n.bootstrap-datetimepicker-widget.usetwentyfour td.hour {\n  height: 27px;\n  line-height: 27px;\n}\n.bootstrap-datetimepicker-widget.wider {\n  width: 21em;\n}\n.bootstrap-datetimepicker-widget .datepicker-decades .decade {\n  line-height: 1.8em !important;\n}\n.input-group.date .input-group-addon {\n  cursor: pointer;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/bootstrap-slider.css",
    "content": "/*! =======================================================\n                      VERSION  9.7.1              \n========================================================= */\n/*! =========================================================\n * bootstrap-slider.js\n *\n * Maintainers:\n *\t\tKyle Kemp\n *\t\t\t- Twitter: @seiyria\n *\t\t\t- Github:  seiyria\n *\t\tRohit Kalkur\n *\t\t\t- Twitter: @Rovolutionary\n *\t\t\t- Github:  rovolution\n *\n * =========================================================\n  *\n * bootstrap-slider is released under the MIT License\n * Copyright (c) 2017 Kyle Kemp, Rohit Kalkur, and contributors\n * \n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n * \n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n *\n * ========================================================= */\n.slider {\n  display: inline-block;\n  vertical-align: middle;\n  position: relative;\n}\n.slider.slider-horizontal {\n  width: 210px;\n  height: 20px;\n}\n.slider.slider-horizontal .slider-track {\n  height: 10px;\n  width: 100%;\n  margin-top: -5px;\n  top: 50%;\n  left: 0;\n}\n.slider.slider-horizontal .slider-selection,\n.slider.slider-horizontal .slider-track-low,\n.slider.slider-horizontal .slider-track-high {\n  height: 100%;\n  top: 0;\n  bottom: 0;\n}\n.slider.slider-horizontal .slider-tick,\n.slider.slider-horizontal .slider-handle {\n  margin-left: -10px;\n}\n.slider.slider-horizontal .slider-tick.triangle,\n.slider.slider-horizontal .slider-handle.triangle {\n  position: relative;\n  top: 50%;\n  transform: translateY(-50%);\n  border-width: 0 10px 10px 10px;\n  width: 0;\n  height: 0;\n  border-bottom-color: #0480be;\n  margin-top: 0;\n}\n.slider.slider-horizontal .slider-tick-container {\n  white-space: nowrap;\n  position: absolute;\n  top: 0;\n  left: 0;\n  width: 100%;\n}\n.slider.slider-horizontal .slider-tick-label-container {\n  white-space: nowrap;\n  margin-top: 20px;\n}\n.slider.slider-horizontal .slider-tick-label-container .slider-tick-label {\n  padding-top: 4px;\n  display: inline-block;\n  text-align: center;\n}\n.slider.slider-horizontal.slider-rtl .slider-track {\n  left: initial;\n  right: 0;\n}\n.slider.slider-horizontal.slider-rtl .slider-tick,\n.slider.slider-horizontal.slider-rtl .slider-handle {\n  margin-left: initial;\n  margin-right: -10px;\n}\n.slider.slider-horizontal.slider-rtl .slider-tick-container {\n  left: initial;\n  right: 0;\n}\n.slider.slider-vertical {\n  height: 210px;\n  width: 20px;\n}\n.slider.slider-vertical .slider-track {\n  width: 10px;\n  height: 100%;\n  left: 25%;\n  top: 0;\n}\n.slider.slider-vertical .slider-selection {\n  width: 100%;\n  left: 0;\n  top: 0;\n  bottom: 0;\n}\n.slider.slider-vertical .slider-track-low,\n.slider.slider-vertical .slider-track-high {\n  width: 100%;\n  left: 0;\n  right: 0;\n}\n.slider.slider-vertical .slider-tick,\n.slider.slider-vertical .slider-handle {\n  margin-top: -10px;\n}\n.slider.slider-vertical .slider-tick.triangle,\n.slider.slider-vertical .slider-handle.triangle {\n  border-width: 10px 0 10px 10px;\n  width: 1px;\n  height: 1px;\n  border-left-color: #0480be;\n  border-right-color: #0480be;\n  margin-left: 0;\n  margin-right: 0;\n}\n.slider.slider-vertical .slider-tick-label-container {\n  white-space: nowrap;\n}\n.slider.slider-vertical .slider-tick-label-container .slider-tick-label {\n  padding-left: 4px;\n}\n.slider.slider-vertical.slider-rtl .slider-track {\n  left: initial;\n  right: 25%;\n}\n.slider.slider-vertical.slider-rtl .slider-selection {\n  left: initial;\n  right: 0;\n}\n.slider.slider-vertical.slider-rtl .slider-tick.triangle,\n.slider.slider-vertical.slider-rtl .slider-handle.triangle {\n  border-width: 10px 10px 10px 0;\n}\n.slider.slider-vertical.slider-rtl .slider-tick-label-container .slider-tick-label {\n  padding-left: initial;\n  padding-right: 4px;\n}\n.slider.slider-disabled .slider-handle {\n  background-image: -webkit-linear-gradient(top, #dfdfdf 0%, #bebebe 100%);\n  background-image: -o-linear-gradient(top, #dfdfdf 0%, #bebebe 100%);\n  background-image: linear-gradient(to bottom, #dfdfdf 0%, #bebebe 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdfdfdf', endColorstr='#ffbebebe', GradientType=0);\n}\n.slider.slider-disabled .slider-track {\n  background-image: -webkit-linear-gradient(top, #e5e5e5 0%, #e9e9e9 100%);\n  background-image: -o-linear-gradient(top, #e5e5e5 0%, #e9e9e9 100%);\n  background-image: linear-gradient(to bottom, #e5e5e5 0%, #e9e9e9 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe5e5e5', endColorstr='#ffe9e9e9', GradientType=0);\n  cursor: not-allowed;\n}\n.slider input {\n  display: none;\n}\n.slider .tooltip.top {\n  margin-top: -36px;\n}\n.slider .tooltip-inner {\n  white-space: nowrap;\n  max-width: none;\n}\n.slider .hide {\n  display: none;\n}\n.slider-track {\n  position: absolute;\n  cursor: pointer;\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #f9f9f9 100%);\n  background-image: -o-linear-gradient(top, #f5f5f5 0%, #f9f9f9 100%);\n  background-image: linear-gradient(to bottom, #f5f5f5 0%, #f9f9f9 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#fff9f9f9', GradientType=0);\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n  border-radius: 4px;\n}\n.slider-selection {\n  position: absolute;\n  background-image: -webkit-linear-gradient(top, #f9f9f9 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #f9f9f9 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #f9f9f9 0%, #f5f5f5 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9', endColorstr='#fff5f5f5', GradientType=0);\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  border-radius: 4px;\n}\n.slider-selection.tick-slider-selection {\n  background-image: -webkit-linear-gradient(top, #89cdef 0%, #81bfde 100%);\n  background-image: -o-linear-gradient(top, #89cdef 0%, #81bfde 100%);\n  background-image: linear-gradient(to bottom, #89cdef 0%, #81bfde 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff89cdef', endColorstr='#ff81bfde', GradientType=0);\n}\n.slider-track-low,\n.slider-track-high {\n  position: absolute;\n  background: transparent;\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  border-radius: 4px;\n}\n.slider-handle {\n  position: absolute;\n  top: 0;\n  width: 20px;\n  height: 20px;\n  background-color: #337ab7;\n  background-image: -webkit-linear-gradient(top, #149bdf 0%, #0480be 100%);\n  background-image: -o-linear-gradient(top, #149bdf 0%, #0480be 100%);\n  background-image: linear-gradient(to bottom, #149bdf 0%, #0480be 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf', endColorstr='#ff0480be', GradientType=0);\n  filter: none;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255,255,255,.2), 0 1px 2px rgba(0,0,0,.05);\n  box-shadow: inset 0 1px 0 rgba(255,255,255,.2), 0 1px 2px rgba(0,0,0,.05);\n  border: 0px solid transparent;\n}\n.slider-handle.round {\n  border-radius: 50%;\n}\n.slider-handle.triangle {\n  background: transparent none;\n}\n.slider-handle.custom {\n  background: transparent none;\n}\n.slider-handle.custom::before {\n  line-height: 20px;\n  font-size: 20px;\n  content: '\\2605';\n  color: #726204;\n}\n.slider-tick {\n  position: absolute;\n  width: 20px;\n  height: 20px;\n  background-image: -webkit-linear-gradient(top, #f9f9f9 0%, #f5f5f5 100%);\n  background-image: -o-linear-gradient(top, #f9f9f9 0%, #f5f5f5 100%);\n  background-image: linear-gradient(to bottom, #f9f9f9 0%, #f5f5f5 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff9f9f9', endColorstr='#fff5f5f5', GradientType=0);\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n  filter: none;\n  opacity: 0.8;\n  border: 0px solid transparent;\n}\n.slider-tick.round {\n  border-radius: 50%;\n}\n.slider-tick.triangle {\n  background: transparent none;\n}\n.slider-tick.custom {\n  background: transparent none;\n}\n.slider-tick.custom::before {\n  line-height: 20px;\n  font-size: 20px;\n  content: '\\2605';\n  color: #726204;\n}\n.slider-tick.in-selection {\n  background-image: -webkit-linear-gradient(top, #89cdef 0%, #81bfde 100%);\n  background-image: -o-linear-gradient(top, #89cdef 0%, #81bfde 100%);\n  background-image: linear-gradient(to bottom, #89cdef 0%, #81bfde 100%);\n  background-repeat: repeat-x;\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff89cdef', endColorstr='#ff81bfde', GradientType=0);\n  opacity: 1;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/bootstrap-theme.css",
    "content": "/*!\n * Bootstrap v3.3.6 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n  text-shadow: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  text-shadow: 0 1px 0 #fff;\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image:      -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0));\n  background-image:         linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #e0e0e0;\n  background-image: none;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image:      -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88));\n  background-image:         linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #265a88;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #265a88;\n  border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #265a88;\n  background-image: none;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image:      -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641));\n  background-image:         linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #419641;\n  background-image: none;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image:      -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2));\n  background-image:         linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #2aabd2;\n  background-image: none;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image:      -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316));\n  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #eb9316;\n  background-image: none;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image:      -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a));\n  background-image:         linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #c12e2a;\n  background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-color: #e8e8e8;\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image:      -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-color: #2e6da4;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image:      -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image:         linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%);\n  background-image:      -o-linear-gradient(top, #fff 0%, #f8f8f8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#f8f8f8));\n  background-image:         linear-gradient(to bottom, #fff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image:      -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2));\n  background-image:         linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image:      -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222));\n  background-image:         linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image:      -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f));\n  background-image:         linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);\n          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n@media (max-width: 767px) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image:      -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n    background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n    background-image:         linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n    filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n    background-repeat: repeat-x;\n  }\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image:      -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc));\n  background-image:         linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image:      -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0));\n  background-image:         linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image:      -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0));\n  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image:      -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3));\n  background-image:         linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image:      -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5));\n  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image:      -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090));\n  background-image:         linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image:      -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44));\n  background-image:         linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image:      -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5));\n  background-image:         linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image:      -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f));\n  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image:      -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c));\n  background-image:         linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #286090;\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image:      -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a));\n  background-image:         linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n  text-shadow: none;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image:      -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image:      -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n  background-image:         linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image:      -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6));\n  background-image:         linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image:      -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3));\n  background-image:         linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image:      -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc));\n  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image:      -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc));\n  background-image:         linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image:      -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5));\n  background-image:         linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */\n"
  },
  {
    "path": "web_gui/gui_v3/css/bootstrap.css",
    "content": "/*!\n * Bootstrap v3.3.6 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -webkit-text-size-adjust: 100%;\n      -ms-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: 1px dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  margin: .67em 0;\n  font-size: 2em;\n}\nmark {\n  color: #000;\n  background: #ff0;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  position: relative;\n  font-size: 75%;\n  line-height: 0;\n  vertical-align: baseline;\n}\nsup {\n  top: -.5em;\n}\nsub {\n  bottom: -.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  height: 0;\n  -webkit-box-sizing: content-box;\n     -moz-box-sizing: content-box;\n          box-sizing: content-box;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  margin: 0;\n  font: inherit;\n  color: inherit;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  padding: 0;\n  border: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: content-box;\n     -moz-box-sizing: content-box;\n          box-sizing: content-box;\n  -webkit-appearance: textfield;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  padding: .35em .625em .75em;\n  margin: 0 2px;\n  border: 1px solid #c0c0c0;\n}\nlegend {\n  padding: 0;\n  border: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-spacing: 0;\n  border-collapse: collapse;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    -webkit-box-shadow: none !important;\n            box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: 'Glyphicons Halflings';\n\n  src: url('../fonts/glyphicons-halflings-regular.eot');\n  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all .2s ease-in-out;\n       -o-transition: all .2s ease-in-out;\n          transition: all .2s ease-in-out;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: normal;\n  line-height: 1;\n  color: #777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\nmark,\n.mark {\n  padding: .2em;\n  background-color: #fcf8e3;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  margin-left: -5px;\n  list-style: none;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.42857143;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    overflow: hidden;\n    clear: left;\n    text-align: right;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted #777;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.42857143;\n  color: #777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: '\\00A0 \\2014';\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n}\nkbd kbd {\n  padding: 0;\n  font-size: 100%;\n  font-weight: bold;\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.42857143;\n  color: #333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666667%;\n}\n.col-xs-10 {\n  width: 83.33333333%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666667%;\n}\n.col-xs-7 {\n  width: 58.33333333%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666667%;\n}\n.col-xs-4 {\n  width: 33.33333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.66666667%;\n}\n.col-xs-1 {\n  width: 8.33333333%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666667%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666667%;\n}\n.col-xs-pull-7 {\n  right: 58.33333333%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.66666667%;\n}\n.col-xs-pull-1 {\n  right: 8.33333333%;\n}\n.col-xs-pull-0 {\n  right: auto;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666667%;\n}\n.col-xs-push-10 {\n  left: 83.33333333%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666667%;\n}\n.col-xs-push-7 {\n  left: 58.33333333%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.66666667%;\n}\n.col-xs-push-1 {\n  left: 8.33333333%;\n}\n.col-xs-push-0 {\n  left: auto;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666667%;\n  }\n  .col-sm-10 {\n    width: 83.33333333%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666667%;\n  }\n  .col-sm-7 {\n    width: 58.33333333%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.66666667%;\n  }\n  .col-sm-1 {\n    width: 8.33333333%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-sm-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-sm-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-sm-pull-0 {\n    right: auto;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666667%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666667%;\n  }\n  .col-sm-push-7 {\n    left: 58.33333333%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.66666667%;\n  }\n  .col-sm-push-1 {\n    left: 8.33333333%;\n  }\n  .col-sm-push-0 {\n    left: auto;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666667%;\n  }\n  .col-md-10 {\n    width: 83.33333333%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666667%;\n  }\n  .col-md-7 {\n    width: 58.33333333%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.66666667%;\n  }\n  .col-md-1 {\n    width: 8.33333333%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-md-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-md-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-md-pull-0 {\n    right: auto;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666667%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666667%;\n  }\n  .col-md-push-7 {\n    left: 58.33333333%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.66666667%;\n  }\n  .col-md-push-1 {\n    left: 8.33333333%;\n  }\n  .col-md-push-0 {\n    left: auto;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666667%;\n  }\n  .col-lg-10 {\n    width: 83.33333333%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666667%;\n  }\n  .col-lg-7 {\n    width: 58.33333333%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.66666667%;\n  }\n  .col-lg-1 {\n    width: 8.33333333%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666667%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666667%;\n  }\n  .col-lg-pull-7 {\n    right: 58.33333333%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.66666667%;\n  }\n  .col-lg-pull-1 {\n    right: 8.33333333%;\n  }\n  .col-lg-pull-0 {\n    right: auto;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666667%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666667%;\n  }\n  .col-lg-push-7 {\n    left: 58.33333333%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.66666667%;\n  }\n  .col-lg-push-1 {\n    left: 8.33333333%;\n  }\n  .col-lg-push-0 {\n    left: auto;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666667%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666667%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.33333333%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.66666667%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.33333333%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0;\n  }\n}\ntable {\n  background-color: transparent;\n}\ncaption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777;\n  text-align: left;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  max-width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.42857143;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n  background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n.table-responsive {\n  min-height: .01%;\n  overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-y: hidden;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  max-width: 100%;\n  margin-bottom: 5px;\n  font-weight: bold;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  line-height: normal;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n  -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s;\n       -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control::-ms-expand {\n  background-color: transparent;\n  border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  background-color: #eee;\n  opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n}\ntextarea.form-control {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: none;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n  input[type=\"date\"].form-control,\n  input[type=\"time\"].form-control,\n  input[type=\"datetime-local\"].form-control,\n  input[type=\"month\"].form-control {\n    line-height: 34px;\n  }\n  input[type=\"date\"].input-sm,\n  input[type=\"time\"].input-sm,\n  input[type=\"datetime-local\"].input-sm,\n  input[type=\"month\"].input-sm,\n  .input-group-sm input[type=\"date\"],\n  .input-group-sm input[type=\"time\"],\n  .input-group-sm input[type=\"datetime-local\"],\n  .input-group-sm input[type=\"month\"] {\n    line-height: 30px;\n  }\n  input[type=\"date\"].input-lg,\n  input[type=\"time\"].input-lg,\n  input[type=\"datetime-local\"].input-lg,\n  input[type=\"month\"].input-lg,\n  .input-group-lg input[type=\"date\"],\n  .input-group-lg input[type=\"time\"],\n  .input-group-lg input[type=\"datetime-local\"],\n  .input-group-lg input[type=\"month\"] {\n    line-height: 46px;\n  }\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  position: relative;\n  display: block;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n  min-height: 20px;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: normal;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  position: absolute;\n  margin-top: 4px \\9;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  position: relative;\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: normal;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n  cursor: not-allowed;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n  cursor: not-allowed;\n}\n.form-control-static {\n  min-height: 34px;\n  padding-top: 7px;\n  padding-bottom: 7px;\n  margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.form-group-sm .form-control {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.form-group-sm select.form-control {\n  height: 30px;\n  line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n  height: auto;\n}\n.form-group-sm .form-control-static {\n  height: 30px;\n  min-height: 32px;\n  padding: 6px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.form-group-lg .form-control {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.form-group-lg select.form-control {\n  height: 46px;\n  line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n  height: auto;\n}\n.form-group-lg .form-control-static {\n  height: 46px;\n  min-height: 38px;\n  padding: 11px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.form-control-feedback {\n  position: absolute;\n  top: 0;\n  right: 0;\n  z-index: 2;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n  pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n  width: 46px;\n  height: 46px;\n  line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n  width: 30px;\n  height: 30px;\n  line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n  top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n  top: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .form-control-static {\n    display: inline-block;\n  }\n  .form-inline .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .form-inline .input-group .input-group-addon,\n  .form-inline .input-group .input-group-btn,\n  .form-inline .input-group .form-control {\n    width: auto;\n  }\n  .form-inline .input-group > .form-control {\n    width: 100%;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio label,\n  .form-inline .checkbox label {\n    padding-left: 0;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    padding-top: 7px;\n    margin-bottom: 0;\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  right: 15px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-lg .control-label {\n    padding-top: 11px;\n    font-size: 18px;\n  }\n}\n@media (min-width: 768px) {\n  .form-horizontal .form-group-sm .control-label {\n    padding-top: 6px;\n    font-size: 12px;\n  }\n}\n.btn {\n  display: inline-block;\n  padding: 6px 12px;\n  margin-bottom: 0;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1.42857143;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  -ms-touch-action: manipulation;\n      touch-action: manipulation;\n  cursor: pointer;\n  -webkit-user-select: none;\n     -moz-user-select: none;\n      -ms-user-select: none;\n          user-select: none;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  -webkit-box-shadow: none;\n          box-shadow: none;\n  opacity: .65;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n  pointer-events: none;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #8c8c8c;\n}\n.btn-default:hover {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #e6e6e6;\n  border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n  color: #333;\n  background-color: #d4d4d4;\n  border-color: #8c8c8c;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n  background-image: none;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n  color: #fff;\n  background-color: #286090;\n  border-color: #122b40;\n}\n.btn-primary:hover {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #286090;\n  border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n  color: #fff;\n  background-color: #204d74;\n  border-color: #122b40;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n  background-image: none;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n  background-color: #337ab7;\n  border-color: #2e6da4;\n}\n.btn-primary .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #255625;\n}\n.btn-success:hover {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #449d44;\n  border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n  color: #fff;\n  background-color: #398439;\n  border-color: #255625;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n  background-image: none;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #1b6d85;\n}\n.btn-info:hover {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #31b0d5;\n  border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n  color: #fff;\n  background-color: #269abc;\n  border-color: #1b6d85;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n  background-image: none;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #985f0d;\n}\n.btn-warning:hover {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ec971f;\n  border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n  color: #fff;\n  background-color: #d58512;\n  border-color: #985f0d;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n  background-image: none;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #761c19;\n}\n.btn-danger:hover {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #c9302c;\n  border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n  color: #fff;\n  background-color: #ac2925;\n  border-color: #761c19;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n  background-image: none;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: normal;\n  color: #337ab7;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #23527c;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #777;\n  text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity .15s linear;\n       -o-transition: opacity .15s linear;\n          transition: opacity .15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\ntr.collapse.in {\n  display: table-row;\n}\ntbody.collapse.in {\n  display: table-row-group;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition-timing-function: ease;\n       -o-transition-timing-function: ease;\n          transition-timing-function: ease;\n  -webkit-transition-duration: .35s;\n       -o-transition-duration: .35s;\n          transition-duration: .35s;\n  -webkit-transition-property: height, visibility;\n       -o-transition-property: height, visibility;\n          transition-property: height, visibility;\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px dashed;\n  border-top: 4px solid \\9;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  text-align: left;\n  list-style: none;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n          box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: normal;\n  line-height: 1.42857143;\n  color: #333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #337ab7;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.42857143;\n  color: #777;\n  white-space: nowrap;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px dashed;\n  border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n  left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n  position: absolute;\n  clip: rect(0, 0, 0, 0);\n  pointer-events: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  position: relative;\n  z-index: 2;\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group .form-control:focus {\n  z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1;\n  color: #555;\n  text-align: center;\n  background-color: #eee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  z-index: 2;\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.nav > li.disabled > a {\n  color: #777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #777;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eee;\n  border-color: #337ab7;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.42857143;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eee #eee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #337ab7;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  -webkit-overflow-scrolling: touch;\n  border-top: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n  max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    max-height: 200px;\n  }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.navbar-brand {\n  float: left;\n  height: 50px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n.navbar-brand > img {\n  display: block;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-top: 8px;\n  margin-right: 15px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: 0;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-top: 8px;\n  margin-right: -15px;\n  margin-bottom: 8px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control-static {\n    display: inline-block;\n  }\n  .navbar-form .input-group {\n    display: inline-table;\n    vertical-align: middle;\n  }\n  .navbar-form .input-group .input-group-addon,\n  .navbar-form .input-group .input-group-btn,\n  .navbar-form .input-group .form-control {\n    width: auto;\n  }\n  .navbar-form .input-group > .form-control {\n    width: 100%;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio label,\n  .navbar-form .checkbox label {\n    padding-left: 0;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    position: relative;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n  .navbar-form .form-group:last-child {\n    margin-bottom: 0;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  margin-bottom: 0;\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n    margin-right: -15px;\n  }\n  .navbar-right ~ .navbar-right {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-default .btn-link {\n  color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n  color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n  color: #ccc;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #9d9d9d;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.navbar-inverse .btn-link {\n  color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n  color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n  color: #444;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #777;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.42857143;\n  color: #337ab7;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  z-index: 2;\n  color: #23527c;\n  background-color: #eee;\n  border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 3;\n  color: #fff;\n  cursor: default;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n}\na.label:hover,\na.label:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #5e5e5e;\n}\n.label-primary {\n  background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #286090;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  background-color: #777;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding-top: 30px;\n  padding-bottom: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.jumbotron > hr {\n  border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n  padding-right: 15px;\n  padding-left: 15px;\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron,\n  .container-fluid .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: border .2s ease-in-out;\n       -o-transition: border .2s ease-in-out;\n          transition: border .2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #337ab7;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n  padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@-o-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n          box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n}\n.progress-bar {\n  float: left;\n  width: 0;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #337ab7;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n  -webkit-transition: width .6s ease;\n       -o-transition: width .6s ease;\n          transition: width .6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  -webkit-background-size: 40px 40px;\n          background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n       -o-animation: progress-bar-stripes 2s linear infinite;\n          animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:      -o-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media-body {\n  width: 10000px;\n}\n.media-object {\n  display: block;\n}\n.media-object.img-thumbnail {\n  max-width: none;\n}\n.media-right,\n.media > .pull-right {\n  padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n  padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n  display: table-cell;\n  vertical-align: top;\n}\n.media-middle {\n  vertical-align: middle;\n}\n.media-bottom {\n  vertical-align: bottom;\n}\n.media-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\na.list-group-item,\nbutton.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n  color: #555;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\nbutton.list-group-item {\n  width: 100%;\n  text-align: left;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n  color: #777;\n  cursor: not-allowed;\n  background-color: #eee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n  color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n  color: #777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n  color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n  color: #c7ddef;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n  border-top: 0;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n  border-bottom: 0;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.list-group + .panel-footer {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n  margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n  border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n  color: #f5f5f5;\n  background-color: #333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #337ab7;\n  border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n  color: #337ab7;\n  background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #337ab7;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n  color: #dff0d8;\n  background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n  color: #d9edf7;\n  background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n  color: #fcf8e3;\n  background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n  color: #f2dede;\n  background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n  position: relative;\n  display: block;\n  height: 0;\n  padding: 0;\n  overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 100%;\n  height: 100%;\n  border: 0;\n}\n.embed-responsive-16by9 {\n  padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n  padding-bottom: 75%;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, .15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: .2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\nbutton.close {\n  -webkit-appearance: none;\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: hidden;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transition: -webkit-transform .3s ease-out;\n       -o-transition:      -o-transform .3s ease-out;\n          transition:         transform .3s ease-out;\n  -webkit-transform: translate(0, -25%);\n      -ms-transform: translate(0, -25%);\n       -o-transform: translate(0, -25%);\n          transform: translate(0, -25%);\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n      -ms-transform: translate(0, 0);\n       -o-transform: translate(0, 0);\n          transform: translate(0, 0);\n}\n.modal-open .modal {\n  overflow-x: hidden;\n  overflow-y: auto;\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  outline: 0;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n          box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.modal-header {\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.42857143;\n}\n.modal-body {\n  position: relative;\n  padding: 15px;\n}\n.modal-footer {\n  padding: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n.modal-scrollbar-measure {\n  position: absolute;\n  top: -9999px;\n  width: 50px;\n  height: 50px;\n  overflow: scroll;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n            box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n}\n@media (min-width: 992px) {\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1070;\n  display: block;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 12px;\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1.42857143;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  filter: alpha(opacity=0);\n  opacity: 0;\n\n  line-break: auto;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: .9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  margin-bottom: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  margin-top: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1060;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1.42857143;\n  text-align: left;\n  text-align: start;\n  text-decoration: none;\n  text-shadow: none;\n  text-transform: none;\n  letter-spacing: normal;\n  word-break: normal;\n  word-spacing: normal;\n  word-wrap: normal;\n  white-space: normal;\n  background-color: #fff;\n  -webkit-background-clip: padding-box;\n          background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n          box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n\n  line-break: auto;\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover > .arrow {\n  border-width: 11px;\n}\n.popover > .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top > .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999;\n  border-top-color: rgba(0, 0, 0, .25);\n  border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right > .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999;\n  border-right-color: rgba(0, 0, 0, .25);\n  border-left-width: 0;\n}\n.popover.right > .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom > .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999;\n  border-bottom-color: rgba(0, 0, 0, .25);\n}\n.popover.bottom > .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999;\n  border-left-color: rgba(0, 0, 0, .25);\n}\n.popover.left > .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: .6s ease-in-out left;\n       -o-transition: .6s ease-in-out left;\n          transition: .6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n  .carousel-inner > .item {\n    -webkit-transition: -webkit-transform .6s ease-in-out;\n         -o-transition:      -o-transform .6s ease-in-out;\n            transition:         transform .6s ease-in-out;\n\n    -webkit-backface-visibility: hidden;\n            backface-visibility: hidden;\n    -webkit-perspective: 1000px;\n            perspective: 1000px;\n  }\n  .carousel-inner > .item.next,\n  .carousel-inner > .item.active.right {\n    left: 0;\n    -webkit-transform: translate3d(100%, 0, 0);\n            transform: translate3d(100%, 0, 0);\n  }\n  .carousel-inner > .item.prev,\n  .carousel-inner > .item.active.left {\n    left: 0;\n    -webkit-transform: translate3d(-100%, 0, 0);\n            transform: translate3d(-100%, 0, 0);\n  }\n  .carousel-inner > .item.next.left,\n  .carousel-inner > .item.prev.right,\n  .carousel-inner > .item.active {\n    left: 0;\n    -webkit-transform: translate3d(0, 0, 0);\n            transform: translate3d(0, 0, 0);\n  }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n  background-color: rgba(0, 0, 0, 0);\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  background-image:      -o-linear-gradient(left, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .5)), to(rgba(0, 0, 0, .0001)));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  background-image:      -o-linear-gradient(left, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, .0001)), to(rgba(0, 0, 0, .5)));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  filter: alpha(opacity=90);\n  outline: 0;\n  opacity: .9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n  margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n  margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n  margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  font-family: serif;\n  line-height: 1;\n}\n.carousel-control .icon-prev:before {\n  content: '\\2039';\n}\n.carousel-control .icon-next:before {\n  content: '\\203a';\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -10px;\n    font-size: 30px;\n  }\n  .carousel-control .glyphicon-chevron-left,\n  .carousel-control .icon-prev {\n    margin-left: -10px;\n  }\n  .carousel-control .glyphicon-chevron-right,\n  .carousel-control .icon-next {\n    margin-right: -10px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n  display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table !important;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-block {\n    display: block !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline {\n    display: inline !important;\n  }\n}\n@media (max-width: 767px) {\n  .visible-xs-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table !important;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-block {\n    display: block !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table !important;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-block {\n    display: block !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table !important;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-block {\n    display: block !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline {\n    display: inline !important;\n  }\n}\n@media (min-width: 1200px) {\n  .visible-lg-inline-block {\n    display: inline-block !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table !important;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n.visible-print-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-block {\n    display: block !important;\n  }\n}\n.visible-print-inline {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline {\n    display: inline !important;\n  }\n}\n.visible-print-inline-block {\n  display: none !important;\n}\n@media print {\n  .visible-print-inline-block {\n    display: inline-block !important;\n  }\n}\n@media print {\n  .hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */\n"
  },
  {
    "path": "web_gui/gui_v3/css/dashboard.css",
    "content": "/*\n *  * Base structure\n *   */\n\n/* Move down content because we have a fixed navbar that is 50px tall */\nbody {\n  padding-top: 50px;\n}\n\n\n/*\n *  * Global add-ons\n *   */\n\n.sub-header {\n  padding-bottom: 10px;\n  border-bottom: 1px solid #eee;\n}\n\n/*\n *  * Top navigation\n *   * Hide default border to remove 1px line.\n *    */\n.navbar-fixed-top {\n  border: 0;\n}\n\n/*\n *  * Sidebar\n *   */\n\n/* Hide for mobile, show later */\n.sidebar {\n  display: none;\n}\n@media (min-width: 768px) {\n  .sidebar {\n    position: fixed;\n    top: 51px;\n    bottom: 0;\n    left: 0;\n    z-index: 1000;\n    display: block;\n    padding: 20px;\n    overflow-x: hidden;\n    overflow-y: auto; /* Scrollable contents if viewport is shorter than content. */\n    background-color: #f5f5f5;\n    border-right: 1px solid #eee;\n  }\n}\n\n/* Sidebar navigation */\n.nav-sidebar {\n  margin-right: -21px; /* 20px padding + 1px border */\n  margin-bottom: 20px;\n  margin-left: -20px;\n}\n.nav-sidebar > li > a {\n  padding-right: 20px;\n  padding-left: 20px;\n}\n.nav-sidebar > .active > a,\n.nav-sidebar > .active > a:hover,\n.nav-sidebar > .active > a:focus {\n  color: #fff;\n  background-color: #428bca;\n}\n\n\n/*\n *  * Main content\n *   */\n\n.main {\n  padding: 20px;\n}\n@media (min-width: 768px) {\n  .main {\n    padding-right: 40px;\n    padding-left: 40px;\n  }\n}\n.main .page-header {\n  margin-top: 0;\n}\n\n\n/*\n *  * Placeholder dashboard ideas\n *   */\n\n.placeholders {\n  margin-bottom: 30px;\n  text-align: center;\n}\n.placeholders h4 {\n  margin-bottom: 0;\n}\n.placeholder {\n  margin-bottom: 20px;\n}\n.placeholder img {\n  display: inline-block;\n  border-radius: 50%;\n}\n\n.profile-logopic img {\n  float: none;\n  margin: 0 auto;\n  width: 50%;\n  height: 50%;\n  -webkit-border-radius: 50% !important;\n  -moz-border-radius: 50% !important;\n  border-radius: 50% !important;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.bootstrap.css",
    "content": "table.dataTable {\n  clear: both;\n  margin-top: 6px !important;\n  margin-bottom: 6px !important;\n  max-width: none !important;\n  border-collapse: separate !important;\n}\ntable.dataTable td,\ntable.dataTable th {\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n}\ntable.dataTable td.dataTables_empty,\ntable.dataTable th.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable.nowrap th,\ntable.dataTable.nowrap td {\n  white-space: nowrap;\n}\n\ndiv.dataTables_wrapper div.dataTables_length label {\n  font-weight: normal;\n  text-align: left;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_length select {\n  width: 75px;\n  display: inline-block;\n}\ndiv.dataTables_wrapper div.dataTables_filter {\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_filter label {\n  font-weight: normal;\n  white-space: nowrap;\n  text-align: left;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  margin-left: 0.5em;\n  display: inline-block;\n  width: auto;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 8px;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_paginate {\n  margin: 0;\n  white-space: nowrap;\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_paginate ul.pagination {\n  margin: 2px 0;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  margin-top: -26px;\n  text-align: center;\n  padding: 1em 0;\n}\n\ntable.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting,\ntable.dataTable thead > tr > td.sorting_asc,\ntable.dataTable thead > tr > td.sorting_desc,\ntable.dataTable thead > tr > td.sorting {\n  padding-right: 30px;\n}\ntable.dataTable thead > tr > th:active,\ntable.dataTable thead > tr > td:active {\n  outline: none;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting_asc_disabled,\ntable.dataTable thead .sorting_desc_disabled {\n  cursor: pointer;\n  position: relative;\n}\ntable.dataTable thead .sorting:after,\ntable.dataTable thead .sorting_asc:after,\ntable.dataTable thead .sorting_desc:after,\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:after {\n  position: absolute;\n  bottom: 8px;\n  right: 8px;\n  display: block;\n  font-family: 'Glyphicons Halflings';\n  opacity: 0.5;\n}\ntable.dataTable thead .sorting:after {\n  opacity: 0.2;\n  content: \"\\e150\";\n  /* sort */\n}\ntable.dataTable thead .sorting_asc:after {\n  content: \"\\e155\";\n  /* sort-by-attributes */\n}\ntable.dataTable thead .sorting_desc:after {\n  content: \"\\e156\";\n  /* sort-by-attributes-alt */\n}\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:after {\n  color: #eee;\n}\n\ndiv.dataTables_scrollHead table.dataTable {\n  margin-bottom: 0 !important;\n}\n\ndiv.dataTables_scrollBody table {\n  border-top: none;\n  margin-top: 0 !important;\n  margin-bottom: 0 !important;\n}\ndiv.dataTables_scrollBody table thead .sorting:after,\ndiv.dataTables_scrollBody table thead .sorting_asc:after,\ndiv.dataTables_scrollBody table thead .sorting_desc:after {\n  display: none;\n}\ndiv.dataTables_scrollBody table tbody tr:first-child th,\ndiv.dataTables_scrollBody table tbody tr:first-child td {\n  border-top: none;\n}\n\ndiv.dataTables_scrollFoot table {\n  margin-top: 0 !important;\n  border-top: none;\n}\n\n@media screen and (max-width: 767px) {\n  div.dataTables_wrapper div.dataTables_length,\n  div.dataTables_wrapper div.dataTables_filter,\n  div.dataTables_wrapper div.dataTables_info,\n  div.dataTables_wrapper div.dataTables_paginate {\n    text-align: center;\n  }\n}\ntable.dataTable.table-condensed > thead > tr > th {\n  padding-right: 20px;\n}\ntable.dataTable.table-condensed .sorting:after,\ntable.dataTable.table-condensed .sorting_asc:after,\ntable.dataTable.table-condensed .sorting_desc:after {\n  top: 6px;\n  right: 6px;\n}\n\ntable.table-bordered.dataTable th,\ntable.table-bordered.dataTable td {\n  border-left-width: 0;\n}\ntable.table-bordered.dataTable th:last-child, table.table-bordered.dataTable th:last-child,\ntable.table-bordered.dataTable td:last-child,\ntable.table-bordered.dataTable td:last-child {\n  border-right-width: 0;\n}\ntable.table-bordered.dataTable tbody th,\ntable.table-bordered.dataTable tbody td {\n  border-bottom-width: 0;\n}\n\ndiv.dataTables_scrollHead table.table-bordered {\n  border-bottom-width: 0;\n}\n\ndiv.table-responsive > div.dataTables_wrapper > div.row {\n  margin: 0;\n}\ndiv.table-responsive > div.dataTables_wrapper > div.row > div[class^=\"col-\"]:first-child {\n  padding-left: 0;\n}\ndiv.table-responsive > div.dataTables_wrapper > div.row > div[class^=\"col-\"]:last-child {\n  padding-right: 0;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.bootstrap4.css",
    "content": "table.dataTable {\n  clear: both;\n  margin-top: 6px !important;\n  margin-bottom: 6px !important;\n  max-width: none !important;\n  border-collapse: separate !important;\n}\ntable.dataTable td,\ntable.dataTable th {\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\ntable.dataTable td.dataTables_empty,\ntable.dataTable th.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable.nowrap th,\ntable.dataTable.nowrap td {\n  white-space: nowrap;\n}\n\ndiv.dataTables_wrapper div.dataTables_length label {\n  font-weight: normal;\n  text-align: left;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_length select {\n  width: 75px;\n  display: inline-block;\n}\ndiv.dataTables_wrapper div.dataTables_filter {\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_filter label {\n  font-weight: normal;\n  white-space: nowrap;\n  text-align: left;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  margin-left: 0.5em;\n  display: inline-block;\n  width: auto;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 0.85em;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_paginate {\n  margin: 0;\n  white-space: nowrap;\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_paginate ul.pagination {\n  margin: 2px 0;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  margin-top: -26px;\n  text-align: center;\n  padding: 1em 0;\n}\n\ntable.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting,\ntable.dataTable thead > tr > td.sorting_asc,\ntable.dataTable thead > tr > td.sorting_desc,\ntable.dataTable thead > tr > td.sorting {\n  padding-right: 30px;\n}\ntable.dataTable thead > tr > th:active,\ntable.dataTable thead > tr > td:active {\n  outline: none;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting_asc_disabled,\ntable.dataTable thead .sorting_desc_disabled {\n  cursor: pointer;\n  position: relative;\n}\ntable.dataTable thead .sorting:before, table.dataTable thead .sorting:after,\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_asc:after,\ntable.dataTable thead .sorting_desc:before,\ntable.dataTable thead .sorting_desc:after,\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:after {\n  position: absolute;\n  bottom: 0.9em;\n  display: block;\n  opacity: 0.3;\n}\ntable.dataTable thead .sorting:before,\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_desc:before,\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:before {\n  right: 1em;\n  content: \"\\2191\";\n}\ntable.dataTable thead .sorting:after,\ntable.dataTable thead .sorting_asc:after,\ntable.dataTable thead .sorting_desc:after,\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:after {\n  right: 0.5em;\n  content: \"\\2193\";\n}\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_desc:after {\n  opacity: 1;\n}\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:after {\n  opacity: 0;\n}\n\ndiv.dataTables_scrollHead table.dataTable {\n  margin-bottom: 0 !important;\n}\n\ndiv.dataTables_scrollBody table {\n  border-top: none;\n  margin-top: 0 !important;\n  margin-bottom: 0 !important;\n}\ndiv.dataTables_scrollBody table thead .sorting:after,\ndiv.dataTables_scrollBody table thead .sorting_asc:after,\ndiv.dataTables_scrollBody table thead .sorting_desc:after {\n  display: none;\n}\ndiv.dataTables_scrollBody table tbody tr:first-child th,\ndiv.dataTables_scrollBody table tbody tr:first-child td {\n  border-top: none;\n}\n\ndiv.dataTables_scrollFoot table {\n  margin-top: 0 !important;\n  border-top: none;\n}\n\n@media screen and (max-width: 767px) {\n  div.dataTables_wrapper div.dataTables_length,\n  div.dataTables_wrapper div.dataTables_filter,\n  div.dataTables_wrapper div.dataTables_info,\n  div.dataTables_wrapper div.dataTables_paginate {\n    text-align: center;\n  }\n}\ntable.dataTable.table-condensed > thead > tr > th {\n  padding-right: 20px;\n}\ntable.dataTable.table-condensed .sorting:after,\ntable.dataTable.table-condensed .sorting_asc:after,\ntable.dataTable.table-condensed .sorting_desc:after {\n  top: 6px;\n  right: 6px;\n}\n\ntable.table-bordered.dataTable th,\ntable.table-bordered.dataTable td {\n  border-left-width: 0;\n}\ntable.table-bordered.dataTable th:last-child, table.table-bordered.dataTable th:last-child,\ntable.table-bordered.dataTable td:last-child,\ntable.table-bordered.dataTable td:last-child {\n  border-right-width: 0;\n}\ntable.table-bordered.dataTable tbody th,\ntable.table-bordered.dataTable tbody td {\n  border-bottom-width: 0;\n}\n\ndiv.dataTables_scrollHead table.table-bordered {\n  border-bottom-width: 0;\n}\n\ndiv.table-responsive > div.dataTables_wrapper > div.row {\n  margin: 0;\n}\ndiv.table-responsive > div.dataTables_wrapper > div.row > div[class^=\"col-\"]:first-child {\n  padding-left: 0;\n}\ndiv.table-responsive > div.dataTables_wrapper > div.row > div[class^=\"col-\"]:last-child {\n  padding-right: 0;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.foundation.css",
    "content": "table.dataTable {\n  clear: both;\n  margin: 0.5em 0 !important;\n  max-width: none !important;\n  width: 100%;\n}\ntable.dataTable td,\ntable.dataTable th {\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\ntable.dataTable td.dataTables_empty,\ntable.dataTable th.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable.nowrap th, table.dataTable.nowrap td {\n  white-space: nowrap;\n}\n\ndiv.dataTables_wrapper {\n  position: relative;\n}\ndiv.dataTables_wrapper div.dataTables_length label {\n  float: left;\n  text-align: left;\n  margin-bottom: 0;\n}\ndiv.dataTables_wrapper div.dataTables_length select {\n  width: 75px;\n  margin-bottom: 0;\n}\ndiv.dataTables_wrapper div.dataTables_filter label {\n  float: right;\n  margin-bottom: 0;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  display: inline-block !important;\n  width: auto !important;\n  margin-bottom: 0;\n  margin-left: 0.5em;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 2px;\n}\ndiv.dataTables_wrapper div.dataTables_paginate {\n  float: right;\n  margin: 0;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  margin-top: -26px;\n  text-align: center;\n  padding: 1rem 0;\n}\n\ntable.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting,\ntable.dataTable thead > tr > td.sorting_asc,\ntable.dataTable thead > tr > td.sorting_desc,\ntable.dataTable thead > tr > td.sorting {\n  padding-right: 1.5rem;\n}\ntable.dataTable thead > tr > th:active,\ntable.dataTable thead > tr > td:active {\n  outline: none;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc {\n  cursor: pointer;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting_asc_disabled,\ntable.dataTable thead .sorting_desc_disabled {\n  background-repeat: no-repeat;\n  background-position: center right;\n}\ntable.dataTable thead .sorting {\n  background-image: url(\"../images/sort_both.png\");\n}\ntable.dataTable thead .sorting_asc {\n  background-image: url(\"../images/sort_asc.png\");\n}\ntable.dataTable thead .sorting_desc {\n  background-image: url(\"../images/sort_desc.png\");\n}\ntable.dataTable thead .sorting_asc_disabled {\n  background-image: url(\"../images/sort_asc_disabled.png\");\n}\ntable.dataTable thead .sorting_desc_disabled {\n  background-image: url(\"../images/sort_desc_disabled.png\");\n}\n\ndiv.dataTables_scrollHead table {\n  margin-bottom: 0 !important;\n}\n\ndiv.dataTables_scrollBody table {\n  border-top: none;\n  margin-top: 0 !important;\n  margin-bottom: 0 !important;\n}\ndiv.dataTables_scrollBody table tbody tr:first-child th,\ndiv.dataTables_scrollBody table tbody tr:first-child td {\n  border-top: none;\n}\n\ndiv.dataTables_scrollFoot table {\n  margin-top: 0 !important;\n  border-top: none;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.jqueryui.css",
    "content": "/*\n * Table styles\n */\ntable.dataTable {\n  width: 100%;\n  margin: 0 auto;\n  clear: both;\n  border-collapse: separate;\n  border-spacing: 0;\n  /*\n   * Header and footer styles\n   */\n  /*\n   * Body styles\n   */\n}\ntable.dataTable thead th,\ntable.dataTable tfoot th {\n  font-weight: bold;\n}\ntable.dataTable thead th,\ntable.dataTable thead td {\n  padding: 10px 18px;\n}\ntable.dataTable thead th:active,\ntable.dataTable thead td:active {\n  outline: none;\n}\ntable.dataTable tfoot th,\ntable.dataTable tfoot td {\n  padding: 10px 18px 6px 18px;\n}\ntable.dataTable tbody tr {\n  background-color: #ffffff;\n}\ntable.dataTable tbody tr.selected {\n  background-color: #B0BED9;\n}\ntable.dataTable tbody th,\ntable.dataTable tbody td {\n  padding: 8px 10px;\n}\ntable.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td {\n  border-top: 1px solid #ddd;\n}\ntable.dataTable.row-border tbody tr:first-child th,\ntable.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th,\ntable.dataTable.display tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td {\n  border-top: 1px solid #ddd;\n  border-right: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr th:first-child,\ntable.dataTable.cell-border tbody tr td:first-child {\n  border-left: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr:first-child th,\ntable.dataTable.cell-border tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd {\n  background-color: #f9f9f9;\n}\ntable.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected {\n  background-color: #acbad4;\n}\ntable.dataTable.hover tbody tr:hover, table.dataTable.display tbody tr:hover {\n  background-color: #f6f6f6;\n}\ntable.dataTable.hover tbody tr:hover.selected, table.dataTable.display tbody tr:hover.selected {\n  background-color: #aab7d1;\n}\ntable.dataTable.order-column tbody tr > .sorting_1,\ntable.dataTable.order-column tbody tr > .sorting_2,\ntable.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1,\ntable.dataTable.display tbody tr > .sorting_2,\ntable.dataTable.display tbody tr > .sorting_3 {\n  background-color: #fafafa;\n}\ntable.dataTable.order-column tbody tr.selected > .sorting_1,\ntable.dataTable.order-column tbody tr.selected > .sorting_2,\ntable.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1,\ntable.dataTable.display tbody tr.selected > .sorting_2,\ntable.dataTable.display tbody tr.selected > .sorting_3 {\n  background-color: #acbad5;\n}\ntable.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 {\n  background-color: #f1f1f1;\n}\ntable.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 {\n  background-color: #f3f3f3;\n}\ntable.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 {\n  background-color: whitesmoke;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 {\n  background-color: #a6b4cd;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 {\n  background-color: #a8b5cf;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 {\n  background-color: #a9b7d1;\n}\ntable.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 {\n  background-color: #fafafa;\n}\ntable.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 {\n  background-color: #fcfcfc;\n}\ntable.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 {\n  background-color: #fefefe;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 {\n  background-color: #acbad5;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 {\n  background-color: #aebcd6;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 {\n  background-color: #afbdd8;\n}\ntable.dataTable.display tbody tr:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1 {\n  background-color: #eaeaea;\n}\ntable.dataTable.display tbody tr:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2 {\n  background-color: #ececec;\n}\ntable.dataTable.display tbody tr:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3 {\n  background-color: #efefef;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1 {\n  background-color: #a2aec7;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2 {\n  background-color: #a3b0c9;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3 {\n  background-color: #a5b2cb;\n}\ntable.dataTable.no-footer {\n  border-bottom: 1px solid #111;\n}\ntable.dataTable.nowrap th, table.dataTable.nowrap td {\n  white-space: nowrap;\n}\ntable.dataTable.compact thead th,\ntable.dataTable.compact thead td {\n  padding: 4px 17px 4px 4px;\n}\ntable.dataTable.compact tfoot th,\ntable.dataTable.compact tfoot td {\n  padding: 4px;\n}\ntable.dataTable.compact tbody th,\ntable.dataTable.compact tbody td {\n  padding: 4px;\n}\ntable.dataTable th.dt-left,\ntable.dataTable td.dt-left {\n  text-align: left;\n}\ntable.dataTable th.dt-center,\ntable.dataTable td.dt-center,\ntable.dataTable td.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable th.dt-right,\ntable.dataTable td.dt-right {\n  text-align: right;\n}\ntable.dataTable th.dt-justify,\ntable.dataTable td.dt-justify {\n  text-align: justify;\n}\ntable.dataTable th.dt-nowrap,\ntable.dataTable td.dt-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable thead th.dt-head-left,\ntable.dataTable thead td.dt-head-left,\ntable.dataTable tfoot th.dt-head-left,\ntable.dataTable tfoot td.dt-head-left {\n  text-align: left;\n}\ntable.dataTable thead th.dt-head-center,\ntable.dataTable thead td.dt-head-center,\ntable.dataTable tfoot th.dt-head-center,\ntable.dataTable tfoot td.dt-head-center {\n  text-align: center;\n}\ntable.dataTable thead th.dt-head-right,\ntable.dataTable thead td.dt-head-right,\ntable.dataTable tfoot th.dt-head-right,\ntable.dataTable tfoot td.dt-head-right {\n  text-align: right;\n}\ntable.dataTable thead th.dt-head-justify,\ntable.dataTable thead td.dt-head-justify,\ntable.dataTable tfoot th.dt-head-justify,\ntable.dataTable tfoot td.dt-head-justify {\n  text-align: justify;\n}\ntable.dataTable thead th.dt-head-nowrap,\ntable.dataTable thead td.dt-head-nowrap,\ntable.dataTable tfoot th.dt-head-nowrap,\ntable.dataTable tfoot td.dt-head-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable tbody th.dt-body-left,\ntable.dataTable tbody td.dt-body-left {\n  text-align: left;\n}\ntable.dataTable tbody th.dt-body-center,\ntable.dataTable tbody td.dt-body-center {\n  text-align: center;\n}\ntable.dataTable tbody th.dt-body-right,\ntable.dataTable tbody td.dt-body-right {\n  text-align: right;\n}\ntable.dataTable tbody th.dt-body-justify,\ntable.dataTable tbody td.dt-body-justify {\n  text-align: justify;\n}\ntable.dataTable tbody th.dt-body-nowrap,\ntable.dataTable tbody td.dt-body-nowrap {\n  white-space: nowrap;\n}\n\ntable.dataTable,\ntable.dataTable th,\ntable.dataTable td {\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\n\n/*\n * Control feature layout\n */\n.dataTables_wrapper {\n  position: relative;\n  clear: both;\n  *zoom: 1;\n  zoom: 1;\n}\n.dataTables_wrapper .dataTables_length {\n  float: left;\n}\n.dataTables_wrapper .dataTables_filter {\n  float: right;\n  text-align: right;\n}\n.dataTables_wrapper .dataTables_filter input {\n  margin-left: 0.5em;\n}\n.dataTables_wrapper .dataTables_info {\n  clear: both;\n  float: left;\n  padding-top: 0.755em;\n}\n.dataTables_wrapper .dataTables_paginate {\n  float: right;\n  text-align: right;\n  padding-top: 0.25em;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button {\n  box-sizing: border-box;\n  display: inline-block;\n  min-width: 1.5em;\n  padding: 0.5em 1em;\n  margin-left: 2px;\n  text-align: center;\n  text-decoration: none !important;\n  cursor: pointer;\n  *cursor: hand;\n  color: #333 !important;\n  border: 1px solid transparent;\n  border-radius: 2px;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {\n  color: #333 !important;\n  border: 1px solid #979797;\n  background-color: white;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, white), color-stop(100%, #dcdcdc));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, white 0%, #dcdcdc 100%);\n  /* W3C */\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {\n  cursor: default;\n  color: #666 !important;\n  border: 1px solid transparent;\n  background: transparent;\n  box-shadow: none;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button:hover {\n  color: white !important;\n  border: 1px solid #111;\n  background-color: #585858;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #585858), color-stop(100%, #111));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, #585858 0%, #111 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, #585858 0%, #111 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, #585858 0%, #111 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, #585858 0%, #111 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, #585858 0%, #111 100%);\n  /* W3C */\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button:active {\n  outline: none;\n  background-color: #2b2b2b;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #2b2b2b), color-stop(100%, #0c0c0c));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%);\n  /* W3C */\n  box-shadow: inset 0 0 3px #111;\n}\n.dataTables_wrapper .dataTables_paginate .ellipsis {\n  padding: 0 1em;\n}\n.dataTables_wrapper .dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 100%;\n  height: 40px;\n  margin-left: -50%;\n  margin-top: -25px;\n  padding-top: 20px;\n  text-align: center;\n  font-size: 1.2em;\n  background-color: white;\n  background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0)));\n  background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n}\n.dataTables_wrapper .dataTables_length,\n.dataTables_wrapper .dataTables_filter,\n.dataTables_wrapper .dataTables_info,\n.dataTables_wrapper .dataTables_processing,\n.dataTables_wrapper .dataTables_paginate {\n  color: #333;\n}\n.dataTables_wrapper .dataTables_scroll {\n  clear: both;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody {\n  *margin-top: -1px;\n  -webkit-overflow-scrolling: touch;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td {\n  vertical-align: middle;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th > div.dataTables_sizing,\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td > div.dataTables_sizing {\n  height: 0;\n  overflow: hidden;\n  margin: 0 !important;\n  padding: 0 !important;\n}\n.dataTables_wrapper.no-footer .dataTables_scrollBody {\n  border-bottom: 1px solid #111;\n}\n.dataTables_wrapper.no-footer div.dataTables_scrollHead table,\n.dataTables_wrapper.no-footer div.dataTables_scrollBody table {\n  border-bottom: none;\n}\n.dataTables_wrapper:after {\n  visibility: hidden;\n  display: block;\n  content: \"\";\n  clear: both;\n  height: 0;\n}\n\n@media screen and (max-width: 767px) {\n  .dataTables_wrapper .dataTables_info,\n  .dataTables_wrapper .dataTables_paginate {\n    float: none;\n    text-align: center;\n  }\n  .dataTables_wrapper .dataTables_paginate {\n    margin-top: 0.5em;\n  }\n}\n@media screen and (max-width: 640px) {\n  .dataTables_wrapper .dataTables_length,\n  .dataTables_wrapper .dataTables_filter {\n    float: none;\n    text-align: center;\n  }\n  .dataTables_wrapper .dataTables_filter {\n    margin-top: 0.5em;\n  }\n}\ntable.dataTable thead th div.DataTables_sort_wrapper {\n  position: relative;\n}\ntable.dataTable thead th div.DataTables_sort_wrapper span {\n  position: absolute;\n  top: 50%;\n  margin-top: -8px;\n  right: -18px;\n}\ntable.dataTable thead th.ui-state-default,\ntable.dataTable tfoot th.ui-state-default {\n  border-left-width: 0;\n}\ntable.dataTable thead th.ui-state-default:first-child,\ntable.dataTable tfoot th.ui-state-default:first-child {\n  border-left-width: 1px;\n}\n\n/*\n * Control feature layout\n */\n.dataTables_wrapper .dataTables_paginate .fg-button {\n  box-sizing: border-box;\n  display: inline-block;\n  min-width: 1.5em;\n  padding: 0.5em;\n  margin-left: 2px;\n  text-align: center;\n  text-decoration: none !important;\n  cursor: pointer;\n  *cursor: hand;\n  border: 1px solid transparent;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:active {\n  outline: none;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:first-child {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:last-child {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.dataTables_wrapper .ui-widget-header {\n  font-weight: normal;\n}\n.dataTables_wrapper .ui-toolbar {\n  padding: 8px;\n}\n.dataTables_wrapper.no-footer .dataTables_scrollBody {\n  border-bottom: none;\n}\n.dataTables_wrapper .dataTables_length,\n.dataTables_wrapper .dataTables_filter,\n.dataTables_wrapper .dataTables_info,\n.dataTables_wrapper .dataTables_processing,\n.dataTables_wrapper .dataTables_paginate {\n  color: inherit;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.material.css",
    "content": "div.dataTables_wrapper div.dataTables_filter {\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  margin-left: 0.5em;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 10px;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  text-align: center;\n}\ndiv.dataTables_wrapper div.dataTables_paginate {\n  text-align: right;\n}\ndiv.dataTables_wrapper div.mdl-grid.dt-table {\n  padding-top: 0;\n  padding-bottom: 0;\n}\ndiv.dataTables_wrapper div.mdl-grid.dt-table > div.mdl-cell {\n  margin-top: 0;\n  margin-bottom: 0;\n}\n\ntable.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting,\ntable.dataTable thead > tr > td.sorting_asc,\ntable.dataTable thead > tr > td.sorting_desc,\ntable.dataTable thead > tr > td.sorting {\n  padding-right: 30px;\n}\ntable.dataTable thead > tr > th:active,\ntable.dataTable thead > tr > td:active {\n  outline: none;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting_asc_disabled,\ntable.dataTable thead .sorting_desc_disabled {\n  cursor: pointer;\n  position: relative;\n}\ntable.dataTable thead .sorting:before, table.dataTable thead .sorting:after,\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_asc:after,\ntable.dataTable thead .sorting_desc:before,\ntable.dataTable thead .sorting_desc:after,\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:after {\n  position: absolute;\n  bottom: 11px;\n  display: block;\n  opacity: 0.3;\n  font-size: 1.3em;\n}\ntable.dataTable thead .sorting:before,\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_desc:before,\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:before {\n  right: 1em;\n  content: \"\\2191\";\n}\ntable.dataTable thead .sorting:after,\ntable.dataTable thead .sorting_asc:after,\ntable.dataTable thead .sorting_desc:after,\ntable.dataTable thead .sorting_asc_disabled:after,\ntable.dataTable thead .sorting_desc_disabled:after {\n  right: 0.5em;\n  content: \"\\2193\";\n}\ntable.dataTable thead .sorting_asc:before,\ntable.dataTable thead .sorting_desc:after {\n  opacity: 1;\n}\ntable.dataTable thead .sorting_asc_disabled:before,\ntable.dataTable thead .sorting_desc_disabled:after {\n  opacity: 0;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.semanticui.css",
    "content": "/*\n * Styling for DataTables with Semantic UI\n */\ntable.dataTable.table {\n  margin: 0;\n}\ntable.dataTable.table thead th,\ntable.dataTable.table thead td {\n  position: relative;\n}\ntable.dataTable.table thead th.sorting, table.dataTable.table thead th.sorting_asc, table.dataTable.table thead th.sorting_desc,\ntable.dataTable.table thead td.sorting,\ntable.dataTable.table thead td.sorting_asc,\ntable.dataTable.table thead td.sorting_desc {\n  padding-right: 20px;\n}\ntable.dataTable.table thead th.sorting:after, table.dataTable.table thead th.sorting_asc:after, table.dataTable.table thead th.sorting_desc:after,\ntable.dataTable.table thead td.sorting:after,\ntable.dataTable.table thead td.sorting_asc:after,\ntable.dataTable.table thead td.sorting_desc:after {\n  position: absolute;\n  top: 12px;\n  right: 8px;\n  display: block;\n  font-family: Icons;\n}\ntable.dataTable.table thead th.sorting:after,\ntable.dataTable.table thead td.sorting:after {\n  content: \"\\f0dc\";\n  color: #ddd;\n  font-size: 0.8em;\n}\ntable.dataTable.table thead th.sorting_asc:after,\ntable.dataTable.table thead td.sorting_asc:after {\n  content: \"\\f0de\";\n}\ntable.dataTable.table thead th.sorting_desc:after,\ntable.dataTable.table thead td.sorting_desc:after {\n  content: \"\\f0dd\";\n}\ntable.dataTable.table td,\ntable.dataTable.table th {\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n}\ntable.dataTable.table td.dataTables_empty,\ntable.dataTable.table th.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable.table.nowrap th,\ntable.dataTable.table.nowrap td {\n  white-space: nowrap;\n}\n\ndiv.dataTables_wrapper div.dataTables_length select {\n  vertical-align: middle;\n  min-height: 2.7142em;\n}\ndiv.dataTables_wrapper div.dataTables_length .ui.selection.dropdown {\n  min-width: 0;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  margin-left: 0.5em;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 13px;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  text-align: center;\n}\ndiv.dataTables_wrapper div.row.dt-table {\n  padding: 0;\n}\ndiv.dataTables_wrapper div.dataTables_scrollHead table.dataTable {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n  border-bottom: none;\n}\ndiv.dataTables_wrapper div.dataTables_scrollBody thead .sorting:after,\ndiv.dataTables_wrapper div.dataTables_scrollBody thead .sorting_asc:after,\ndiv.dataTables_wrapper div.dataTables_scrollBody thead .sorting_desc:after {\n  display: none;\n}\ndiv.dataTables_wrapper div.dataTables_scrollBody table.dataTable {\n  border-radius: 0;\n  border-top: none;\n  border-bottom-width: 0;\n}\ndiv.dataTables_wrapper div.dataTables_scrollBody table.dataTable.no-footer {\n  border-bottom-width: 1px;\n}\ndiv.dataTables_wrapper div.dataTables_scrollFoot table.dataTable {\n  border-top-right-radius: 0;\n  border-top-left-radius: 0;\n  border-top: none;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/dataTables.uikit.css",
    "content": "table.dataTable {\n  clear: both;\n  margin-top: 6px !important;\n  margin-bottom: 6px !important;\n  max-width: none !important;\n}\ntable.dataTable td,\ntable.dataTable th {\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\ntable.dataTable td.dataTables_empty,\ntable.dataTable th.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable.nowrap th,\ntable.dataTable.nowrap td {\n  white-space: nowrap;\n}\n\ndiv.dataTables_wrapper div.row.uk-grid.dt-merge-grid {\n  margin-top: 5px;\n}\ndiv.dataTables_wrapper div.dataTables_length label {\n  font-weight: normal;\n  text-align: left;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_length select {\n  width: 75px;\n  display: inline-block;\n}\ndiv.dataTables_wrapper div.dataTables_filter {\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_filter label {\n  font-weight: normal;\n  white-space: nowrap;\n  text-align: left;\n}\ndiv.dataTables_wrapper div.dataTables_filter input {\n  margin-left: 0.5em;\n  display: inline-block;\n  width: auto;\n}\ndiv.dataTables_wrapper div.dataTables_info {\n  padding-top: 8px;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_paginate {\n  margin: 0;\n  white-space: nowrap;\n  text-align: right;\n}\ndiv.dataTables_wrapper div.dataTables_paginate ul.pagination {\n  margin: 2px 0;\n  white-space: nowrap;\n}\ndiv.dataTables_wrapper div.dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 200px;\n  margin-left: -100px;\n  margin-top: -26px;\n  text-align: center;\n  padding: 1em 0;\n}\n\ntable.dataTable thead > tr > th,\ntable.dataTable thead > tr > td {\n  position: relative;\n}\ntable.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting,\ntable.dataTable thead > tr > td.sorting_asc,\ntable.dataTable thead > tr > td.sorting_desc,\ntable.dataTable thead > tr > td.sorting {\n  padding-right: 30px;\n}\ntable.dataTable thead > tr > th.sorting:after, table.dataTable thead > tr > th.sorting_asc:after, table.dataTable thead > tr > th.sorting_desc:after,\ntable.dataTable thead > tr > td.sorting:after,\ntable.dataTable thead > tr > td.sorting_asc:after,\ntable.dataTable thead > tr > td.sorting_desc:after {\n  position: absolute;\n  top: 7px;\n  right: 8px;\n  display: block;\n  font-family: 'FontAwesome';\n}\ntable.dataTable thead > tr > th.sorting:after,\ntable.dataTable thead > tr > td.sorting:after {\n  content: \"\\f0dc\";\n  color: #ddd;\n  font-size: 0.8em;\n  padding-top: 0.12em;\n}\ntable.dataTable thead > tr > th.sorting_asc:after,\ntable.dataTable thead > tr > td.sorting_asc:after {\n  content: \"\\f0de\";\n}\ntable.dataTable thead > tr > th.sorting_desc:after,\ntable.dataTable thead > tr > td.sorting_desc:after {\n  content: \"\\f0dd\";\n}\n\ndiv.dataTables_scrollHead table.dataTable {\n  margin-bottom: 0 !important;\n}\n\ndiv.dataTables_scrollBody table {\n  border-top: none;\n  margin-top: 0 !important;\n  margin-bottom: 0 !important;\n}\ndiv.dataTables_scrollBody table thead .sorting:after,\ndiv.dataTables_scrollBody table thead .sorting_asc:after,\ndiv.dataTables_scrollBody table thead .sorting_desc:after {\n  display: none;\n}\ndiv.dataTables_scrollBody table tbody tr:first-child th,\ndiv.dataTables_scrollBody table tbody tr:first-child td {\n  border-top: none;\n}\n\ndiv.dataTables_scrollFoot table {\n  margin-top: 0 !important;\n  border-top: none;\n}\n\n@media screen and (max-width: 767px) {\n  div.dataTables_wrapper div.dataTables_length,\n  div.dataTables_wrapper div.dataTables_filter,\n  div.dataTables_wrapper div.dataTables_info,\n  div.dataTables_wrapper div.dataTables_paginate {\n    text-align: center;\n  }\n}\ntable.dataTable.uk-table-condensed > thead > tr > th {\n  padding-right: 20px;\n}\ntable.dataTable.uk-table-condensed .sorting:after,\ntable.dataTable.uk-table-condensed .sorting_asc:after,\ntable.dataTable.uk-table-condensed .sorting_desc:after {\n  top: 6px;\n  right: 6px;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/jquery.dataTables.css",
    "content": "/*\n * Table styles\n */\ntable.dataTable {\n  width: 100%;\n  margin: 0 auto;\n  clear: both;\n  border-collapse: separate;\n  border-spacing: 0;\n  /*\n   * Header and footer styles\n   */\n  /*\n   * Body styles\n   */\n}\ntable.dataTable thead th,\ntable.dataTable tfoot th {\n  font-weight: bold;\n}\ntable.dataTable thead th,\ntable.dataTable thead td {\n  padding: 10px 18px;\n  border-bottom: 1px solid #111;\n}\ntable.dataTable thead th:active,\ntable.dataTable thead td:active {\n  outline: none;\n}\ntable.dataTable tfoot th,\ntable.dataTable tfoot td {\n  padding: 10px 18px 6px 18px;\n  border-top: 1px solid #111;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc {\n  cursor: pointer;\n  *cursor: hand;\n}\ntable.dataTable thead .sorting,\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting_asc_disabled,\ntable.dataTable thead .sorting_desc_disabled {\n  background-repeat: no-repeat;\n  background-position: center right;\n}\ntable.dataTable thead .sorting {\n  background-image: url(\"../images/sort_both.png\");\n}\ntable.dataTable thead .sorting_asc {\n  background-image: url(\"../images/sort_asc.png\");\n}\ntable.dataTable thead .sorting_desc {\n  background-image: url(\"../images/sort_desc.png\");\n}\ntable.dataTable thead .sorting_asc_disabled {\n  background-image: url(\"../images/sort_asc_disabled.png\");\n}\ntable.dataTable thead .sorting_desc_disabled {\n  background-image: url(\"../images/sort_desc_disabled.png\");\n}\ntable.dataTable tbody tr {\n  background-color: #ffffff;\n}\ntable.dataTable tbody tr.selected {\n  background-color: #B0BED9;\n}\ntable.dataTable tbody th,\ntable.dataTable tbody td {\n  padding: 8px 10px;\n}\ntable.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td {\n  border-top: 1px solid #ddd;\n}\ntable.dataTable.row-border tbody tr:first-child th,\ntable.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th,\ntable.dataTable.display tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td {\n  border-top: 1px solid #ddd;\n  border-right: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr th:first-child,\ntable.dataTable.cell-border tbody tr td:first-child {\n  border-left: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr:first-child th,\ntable.dataTable.cell-border tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd {\n  background-color: #f9f9f9;\n}\ntable.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected {\n  background-color: #acbad4;\n}\ntable.dataTable.hover tbody tr:hover, table.dataTable.display tbody tr:hover {\n  background-color: #f6f6f6;\n}\ntable.dataTable.hover tbody tr:hover.selected, table.dataTable.display tbody tr:hover.selected {\n  background-color: #aab7d1;\n}\ntable.dataTable.order-column tbody tr > .sorting_1,\ntable.dataTable.order-column tbody tr > .sorting_2,\ntable.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1,\ntable.dataTable.display tbody tr > .sorting_2,\ntable.dataTable.display tbody tr > .sorting_3 {\n  background-color: #fafafa;\n}\ntable.dataTable.order-column tbody tr.selected > .sorting_1,\ntable.dataTable.order-column tbody tr.selected > .sorting_2,\ntable.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1,\ntable.dataTable.display tbody tr.selected > .sorting_2,\ntable.dataTable.display tbody tr.selected > .sorting_3 {\n  background-color: #acbad5;\n}\ntable.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 {\n  background-color: #f1f1f1;\n}\ntable.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 {\n  background-color: #f3f3f3;\n}\ntable.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 {\n  background-color: whitesmoke;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 {\n  background-color: #a6b4cd;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 {\n  background-color: #a8b5cf;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 {\n  background-color: #a9b7d1;\n}\ntable.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 {\n  background-color: #fafafa;\n}\ntable.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 {\n  background-color: #fcfcfc;\n}\ntable.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 {\n  background-color: #fefefe;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 {\n  background-color: #acbad5;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 {\n  background-color: #aebcd6;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 {\n  background-color: #afbdd8;\n}\ntable.dataTable.display tbody tr:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1 {\n  background-color: #eaeaea;\n}\ntable.dataTable.display tbody tr:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2 {\n  background-color: #ececec;\n}\ntable.dataTable.display tbody tr:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3 {\n  background-color: #efefef;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1 {\n  background-color: #a2aec7;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2 {\n  background-color: #a3b0c9;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3 {\n  background-color: #a5b2cb;\n}\ntable.dataTable.no-footer {\n  border-bottom: 1px solid #111;\n}\ntable.dataTable.nowrap th, table.dataTable.nowrap td {\n  white-space: nowrap;\n}\ntable.dataTable.compact thead th,\ntable.dataTable.compact thead td {\n  padding: 4px 17px 4px 4px;\n}\ntable.dataTable.compact tfoot th,\ntable.dataTable.compact tfoot td {\n  padding: 4px;\n}\ntable.dataTable.compact tbody th,\ntable.dataTable.compact tbody td {\n  padding: 4px;\n}\ntable.dataTable th.dt-left,\ntable.dataTable td.dt-left {\n  text-align: left;\n}\ntable.dataTable th.dt-center,\ntable.dataTable td.dt-center,\ntable.dataTable td.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable th.dt-right,\ntable.dataTable td.dt-right {\n  text-align: right;\n}\ntable.dataTable th.dt-justify,\ntable.dataTable td.dt-justify {\n  text-align: justify;\n}\ntable.dataTable th.dt-nowrap,\ntable.dataTable td.dt-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable thead th.dt-head-left,\ntable.dataTable thead td.dt-head-left,\ntable.dataTable tfoot th.dt-head-left,\ntable.dataTable tfoot td.dt-head-left {\n  text-align: left;\n}\ntable.dataTable thead th.dt-head-center,\ntable.dataTable thead td.dt-head-center,\ntable.dataTable tfoot th.dt-head-center,\ntable.dataTable tfoot td.dt-head-center {\n  text-align: center;\n}\ntable.dataTable thead th.dt-head-right,\ntable.dataTable thead td.dt-head-right,\ntable.dataTable tfoot th.dt-head-right,\ntable.dataTable tfoot td.dt-head-right {\n  text-align: right;\n}\ntable.dataTable thead th.dt-head-justify,\ntable.dataTable thead td.dt-head-justify,\ntable.dataTable tfoot th.dt-head-justify,\ntable.dataTable tfoot td.dt-head-justify {\n  text-align: justify;\n}\ntable.dataTable thead th.dt-head-nowrap,\ntable.dataTable thead td.dt-head-nowrap,\ntable.dataTable tfoot th.dt-head-nowrap,\ntable.dataTable tfoot td.dt-head-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable tbody th.dt-body-left,\ntable.dataTable tbody td.dt-body-left {\n  text-align: left;\n}\ntable.dataTable tbody th.dt-body-center,\ntable.dataTable tbody td.dt-body-center {\n  text-align: center;\n}\ntable.dataTable tbody th.dt-body-right,\ntable.dataTable tbody td.dt-body-right {\n  text-align: right;\n}\ntable.dataTable tbody th.dt-body-justify,\ntable.dataTable tbody td.dt-body-justify {\n  text-align: justify;\n}\ntable.dataTable tbody th.dt-body-nowrap,\ntable.dataTable tbody td.dt-body-nowrap {\n  white-space: nowrap;\n}\n\ntable.dataTable,\ntable.dataTable th,\ntable.dataTable td {\n  -webkit-box-sizing: content-box;\n  box-sizing: content-box;\n}\n\n/*\n * Control feature layout\n */\n.dataTables_wrapper {\n  position: relative;\n  clear: both;\n  *zoom: 1;\n  zoom: 1;\n}\n.dataTables_wrapper .dataTables_length {\n  float: left;\n}\n.dataTables_wrapper .dataTables_filter {\n  float: right;\n  text-align: right;\n}\n.dataTables_wrapper .dataTables_filter input {\n  margin-left: 0.5em;\n}\n.dataTables_wrapper .dataTables_info {\n  clear: both;\n  float: left;\n  padding-top: 0.755em;\n}\n.dataTables_wrapper .dataTables_paginate {\n  float: right;\n  text-align: right;\n  padding-top: 0.25em;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button {\n  box-sizing: border-box;\n  display: inline-block;\n  min-width: 1.5em;\n  padding: 0.5em 1em;\n  margin-left: 2px;\n  text-align: center;\n  text-decoration: none !important;\n  cursor: pointer;\n  *cursor: hand;\n  color: #333 !important;\n  border: 1px solid transparent;\n  border-radius: 2px;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {\n  color: #333 !important;\n  border: 1px solid #979797;\n  background-color: white;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, white), color-stop(100%, #dcdcdc));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, white 0%, #dcdcdc 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, white 0%, #dcdcdc 100%);\n  /* W3C */\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {\n  cursor: default;\n  color: #666 !important;\n  border: 1px solid transparent;\n  background: transparent;\n  box-shadow: none;\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button:hover {\n  color: white !important;\n  border: 1px solid #111;\n  background-color: #585858;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #585858), color-stop(100%, #111));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, #585858 0%, #111 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, #585858 0%, #111 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, #585858 0%, #111 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, #585858 0%, #111 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, #585858 0%, #111 100%);\n  /* W3C */\n}\n.dataTables_wrapper .dataTables_paginate .paginate_button:active {\n  outline: none;\n  background-color: #2b2b2b;\n  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #2b2b2b), color-stop(100%, #0c0c0c));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%);\n  /* W3C */\n  box-shadow: inset 0 0 3px #111;\n}\n.dataTables_wrapper .dataTables_paginate .ellipsis {\n  padding: 0 1em;\n}\n.dataTables_wrapper .dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 100%;\n  height: 40px;\n  margin-left: -50%;\n  margin-top: -25px;\n  padding-top: 20px;\n  text-align: center;\n  font-size: 1.2em;\n  background-color: white;\n  background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0)));\n  background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n}\n.dataTables_wrapper .dataTables_length,\n.dataTables_wrapper .dataTables_filter,\n.dataTables_wrapper .dataTables_info,\n.dataTables_wrapper .dataTables_processing,\n.dataTables_wrapper .dataTables_paginate {\n  color: #333;\n}\n.dataTables_wrapper .dataTables_scroll {\n  clear: both;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody {\n  *margin-top: -1px;\n  -webkit-overflow-scrolling: touch;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td {\n  vertical-align: middle;\n}\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th > div.dataTables_sizing,\n.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td > div.dataTables_sizing {\n  height: 0;\n  overflow: hidden;\n  margin: 0 !important;\n  padding: 0 !important;\n}\n.dataTables_wrapper.no-footer .dataTables_scrollBody {\n  border-bottom: 1px solid #111;\n}\n.dataTables_wrapper.no-footer div.dataTables_scrollHead table,\n.dataTables_wrapper.no-footer div.dataTables_scrollBody table {\n  border-bottom: none;\n}\n.dataTables_wrapper:after {\n  visibility: hidden;\n  display: block;\n  content: \"\";\n  clear: both;\n  height: 0;\n}\n\n@media screen and (max-width: 767px) {\n  .dataTables_wrapper .dataTables_info,\n  .dataTables_wrapper .dataTables_paginate {\n    float: none;\n    text-align: center;\n  }\n  .dataTables_wrapper .dataTables_paginate {\n    margin-top: 0.5em;\n  }\n}\n@media screen and (max-width: 640px) {\n  .dataTables_wrapper .dataTables_length,\n  .dataTables_wrapper .dataTables_filter {\n    float: none;\n    text-align: center;\n  }\n  .dataTables_wrapper .dataTables_filter {\n    margin-top: 0.5em;\n  }\n}\n"
  },
  {
    "path": "web_gui/gui_v3/css/jquery.dataTables_themeroller.css",
    "content": "/*\n * Table styles\n */\ntable.dataTable {\n  width: 100%;\n  margin: 0 auto;\n  clear: both;\n  border-collapse: separate;\n  border-spacing: 0;\n  /*\n   * Header and footer styles\n   */\n  /*\n   * Body styles\n   */\n}\ntable.dataTable thead th,\ntable.dataTable thead td,\ntable.dataTable tfoot th,\ntable.dataTable tfoot td {\n  padding: 4px 10px;\n}\ntable.dataTable thead th,\ntable.dataTable tfoot th {\n  font-weight: bold;\n}\ntable.dataTable thead th:active,\ntable.dataTable thead td:active {\n  outline: none;\n}\ntable.dataTable thead .sorting_asc,\ntable.dataTable thead .sorting_desc,\ntable.dataTable thead .sorting {\n  cursor: pointer;\n  *cursor: hand;\n}\ntable.dataTable thead th div.DataTables_sort_wrapper {\n  position: relative;\n  padding-right: 10px;\n}\ntable.dataTable thead th div.DataTables_sort_wrapper span {\n  position: absolute;\n  top: 50%;\n  margin-top: -8px;\n  right: -5px;\n}\ntable.dataTable thead th.ui-state-default {\n  border-right-width: 0;\n}\ntable.dataTable thead th.ui-state-default:last-child {\n  border-right-width: 1px;\n}\ntable.dataTable tbody tr {\n  background-color: #ffffff;\n}\ntable.dataTable tbody tr.selected {\n  background-color: #B0BED9;\n}\ntable.dataTable tbody th,\ntable.dataTable tbody td {\n  padding: 8px 10px;\n}\ntable.dataTable th.center,\ntable.dataTable td.center,\ntable.dataTable td.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable th.right,\ntable.dataTable td.right {\n  text-align: right;\n}\ntable.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td {\n  border-top: 1px solid #ddd;\n}\ntable.dataTable.row-border tbody tr:first-child th,\ntable.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th,\ntable.dataTable.display tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td {\n  border-top: 1px solid #ddd;\n  border-right: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr th:first-child,\ntable.dataTable.cell-border tbody tr td:first-child {\n  border-left: 1px solid #ddd;\n}\ntable.dataTable.cell-border tbody tr:first-child th,\ntable.dataTable.cell-border tbody tr:first-child td {\n  border-top: none;\n}\ntable.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd {\n  background-color: #f9f9f9;\n}\ntable.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected {\n  background-color: #abb9d3;\n}\ntable.dataTable.hover tbody tr:hover,\ntable.dataTable.hover tbody tr.odd:hover,\ntable.dataTable.hover tbody tr.even:hover, table.dataTable.display tbody tr:hover,\ntable.dataTable.display tbody tr.odd:hover,\ntable.dataTable.display tbody tr.even:hover {\n  background-color: whitesmoke;\n}\ntable.dataTable.hover tbody tr:hover.selected,\ntable.dataTable.hover tbody tr.odd:hover.selected,\ntable.dataTable.hover tbody tr.even:hover.selected, table.dataTable.display tbody tr:hover.selected,\ntable.dataTable.display tbody tr.odd:hover.selected,\ntable.dataTable.display tbody tr.even:hover.selected {\n  background-color: #a9b7d1;\n}\ntable.dataTable.order-column tbody tr > .sorting_1,\ntable.dataTable.order-column tbody tr > .sorting_2,\ntable.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1,\ntable.dataTable.display tbody tr > .sorting_2,\ntable.dataTable.display tbody tr > .sorting_3 {\n  background-color: #f9f9f9;\n}\ntable.dataTable.order-column tbody tr.selected > .sorting_1,\ntable.dataTable.order-column tbody tr.selected > .sorting_2,\ntable.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1,\ntable.dataTable.display tbody tr.selected > .sorting_2,\ntable.dataTable.display tbody tr.selected > .sorting_3 {\n  background-color: #acbad4;\n}\ntable.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 {\n  background-color: #f1f1f1;\n}\ntable.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 {\n  background-color: #f3f3f3;\n}\ntable.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 {\n  background-color: whitesmoke;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 {\n  background-color: #a6b3cd;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 {\n  background-color: #a7b5ce;\n}\ntable.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 {\n  background-color: #a9b6d0;\n}\ntable.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 {\n  background-color: #f9f9f9;\n}\ntable.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 {\n  background-color: #fbfbfb;\n}\ntable.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 {\n  background-color: #fdfdfd;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 {\n  background-color: #acbad4;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 {\n  background-color: #adbbd6;\n}\ntable.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 {\n  background-color: #afbdd8;\n}\ntable.dataTable.display tbody tr:hover > .sorting_1,\ntable.dataTable.display tbody tr.odd:hover > .sorting_1,\ntable.dataTable.display tbody tr.even:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1,\ntable.dataTable.order-column.hover tbody tr.odd:hover > .sorting_1,\ntable.dataTable.order-column.hover tbody tr.even:hover > .sorting_1 {\n  background-color: #eaeaea;\n}\ntable.dataTable.display tbody tr:hover > .sorting_2,\ntable.dataTable.display tbody tr.odd:hover > .sorting_2,\ntable.dataTable.display tbody tr.even:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2,\ntable.dataTable.order-column.hover tbody tr.odd:hover > .sorting_2,\ntable.dataTable.order-column.hover tbody tr.even:hover > .sorting_2 {\n  background-color: #ebebeb;\n}\ntable.dataTable.display tbody tr:hover > .sorting_3,\ntable.dataTable.display tbody tr.odd:hover > .sorting_3,\ntable.dataTable.display tbody tr.even:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3,\ntable.dataTable.order-column.hover tbody tr.odd:hover > .sorting_3,\ntable.dataTable.order-column.hover tbody tr.even:hover > .sorting_3 {\n  background-color: #eeeeee;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_1,\ntable.dataTable.display tbody tr.odd:hover.selected > .sorting_1,\ntable.dataTable.display tbody tr.even:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1,\ntable.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_1,\ntable.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_1 {\n  background-color: #a1aec7;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_2,\ntable.dataTable.display tbody tr.odd:hover.selected > .sorting_2,\ntable.dataTable.display tbody tr.even:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2,\ntable.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_2,\ntable.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_2 {\n  background-color: #a2afc8;\n}\ntable.dataTable.display tbody tr:hover.selected > .sorting_3,\ntable.dataTable.display tbody tr.odd:hover.selected > .sorting_3,\ntable.dataTable.display tbody tr.even:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3,\ntable.dataTable.order-column.hover tbody tr.odd:hover.selected > .sorting_3,\ntable.dataTable.order-column.hover tbody tr.even:hover.selected > .sorting_3 {\n  background-color: #a4b2cb;\n}\ntable.dataTable.nowrap th, table.dataTable.nowrap td {\n  white-space: nowrap;\n}\ntable.dataTable.compact thead th,\ntable.dataTable.compact thead td {\n  padding: 5px 9px;\n}\ntable.dataTable.compact tfoot th,\ntable.dataTable.compact tfoot td {\n  padding: 5px 9px 3px 9px;\n}\ntable.dataTable.compact tbody th,\ntable.dataTable.compact tbody td {\n  padding: 4px 5px;\n}\ntable.dataTable th.dt-left,\ntable.dataTable td.dt-left {\n  text-align: left;\n}\ntable.dataTable th.dt-center,\ntable.dataTable td.dt-center,\ntable.dataTable td.dataTables_empty {\n  text-align: center;\n}\ntable.dataTable th.dt-right,\ntable.dataTable td.dt-right {\n  text-align: right;\n}\ntable.dataTable th.dt-justify,\ntable.dataTable td.dt-justify {\n  text-align: justify;\n}\ntable.dataTable th.dt-nowrap,\ntable.dataTable td.dt-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable thead th.dt-head-left,\ntable.dataTable thead td.dt-head-left,\ntable.dataTable tfoot th.dt-head-left,\ntable.dataTable tfoot td.dt-head-left {\n  text-align: left;\n}\ntable.dataTable thead th.dt-head-center,\ntable.dataTable thead td.dt-head-center,\ntable.dataTable tfoot th.dt-head-center,\ntable.dataTable tfoot td.dt-head-center {\n  text-align: center;\n}\ntable.dataTable thead th.dt-head-right,\ntable.dataTable thead td.dt-head-right,\ntable.dataTable tfoot th.dt-head-right,\ntable.dataTable tfoot td.dt-head-right {\n  text-align: right;\n}\ntable.dataTable thead th.dt-head-justify,\ntable.dataTable thead td.dt-head-justify,\ntable.dataTable tfoot th.dt-head-justify,\ntable.dataTable tfoot td.dt-head-justify {\n  text-align: justify;\n}\ntable.dataTable thead th.dt-head-nowrap,\ntable.dataTable thead td.dt-head-nowrap,\ntable.dataTable tfoot th.dt-head-nowrap,\ntable.dataTable tfoot td.dt-head-nowrap {\n  white-space: nowrap;\n}\ntable.dataTable tbody th.dt-body-left,\ntable.dataTable tbody td.dt-body-left {\n  text-align: left;\n}\ntable.dataTable tbody th.dt-body-center,\ntable.dataTable tbody td.dt-body-center {\n  text-align: center;\n}\ntable.dataTable tbody th.dt-body-right,\ntable.dataTable tbody td.dt-body-right {\n  text-align: right;\n}\ntable.dataTable tbody th.dt-body-justify,\ntable.dataTable tbody td.dt-body-justify {\n  text-align: justify;\n}\ntable.dataTable tbody th.dt-body-nowrap,\ntable.dataTable tbody td.dt-body-nowrap {\n  white-space: nowrap;\n}\n\ntable.dataTable,\ntable.dataTable th,\ntable.dataTable td {\n  -webkit-box-sizing: content-box;\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n}\n\n/*\n * Control feature layout\n */\n.dataTables_wrapper {\n  position: relative;\n  clear: both;\n  *zoom: 1;\n  zoom: 1;\n}\n.dataTables_wrapper .dataTables_length {\n  float: left;\n}\n.dataTables_wrapper .dataTables_filter {\n  float: right;\n  text-align: right;\n}\n.dataTables_wrapper .dataTables_filter input {\n  margin-left: 0.5em;\n}\n.dataTables_wrapper .dataTables_info {\n  clear: both;\n  float: left;\n  padding-top: 0.55em;\n}\n.dataTables_wrapper .dataTables_paginate {\n  float: right;\n  text-align: right;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button {\n  box-sizing: border-box;\n  display: inline-block;\n  min-width: 1.5em;\n  padding: 0.5em;\n  margin-left: 2px;\n  text-align: center;\n  text-decoration: none !important;\n  cursor: pointer;\n  *cursor: hand;\n  color: #333 !important;\n  border: 1px solid transparent;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:active {\n  outline: none;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:first-child {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.dataTables_wrapper .dataTables_paginate .fg-button:last-child {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.dataTables_wrapper .dataTables_processing {\n  position: absolute;\n  top: 50%;\n  left: 50%;\n  width: 100%;\n  height: 40px;\n  margin-left: -50%;\n  margin-top: -25px;\n  padding-top: 20px;\n  text-align: center;\n  font-size: 1.2em;\n  background-color: white;\n  background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0)));\n  /* Chrome,Safari4+ */\n  background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  /* Chrome10+,Safari5.1+ */\n  background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  /* FF3.6+ */\n  background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  /* IE10+ */\n  background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  /* Opera 11.10+ */\n  background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);\n  /* W3C */\n}\n.dataTables_wrapper .dataTables_length,\n.dataTables_wrapper .dataTables_filter,\n.dataTables_wrapper .dataTables_info,\n.dataTables_wrapper .dataTables_processing,\n.dataTables_wrapper .dataTables_paginate {\n  color: #333;\n}\n.dataTables_wrapper .dataTables_scroll {\n  clear: both;\n}\n.dataTables_wrapper .dataTables_scrollBody {\n  *margin-top: -1px;\n  -webkit-overflow-scrolling: touch;\n}\n.dataTables_wrapper .ui-widget-header {\n  font-weight: normal;\n}\n.dataTables_wrapper .ui-toolbar {\n  padding: 8px;\n}\n.dataTables_wrapper:after {\n  visibility: hidden;\n  display: block;\n  content: \"\";\n  clear: both;\n  height: 0;\n}\n\n@media screen and (max-width: 767px) {\n  .dataTables_wrapper .dataTables_length,\n  .dataTables_wrapper .dataTables_filter,\n  .dataTables_wrapper .dataTables_info,\n  .dataTables_wrapper .dataTables_paginate {\n    float: none;\n    text-align: center;\n  }\n  .dataTables_wrapper .dataTables_filter,\n  .dataTables_wrapper .dataTables_paginate {\n    margin-top: 0.5em;\n  }\n}\n"
  },
  {
    "path": "web_gui/gui_v3/customjs/newgui.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/*************************\n * Graph VAR\n ************************/\nvar GraphState=\"empty\";\nvar TableState=\"empty\";\nvar myChart;\nvar GraphCTX;\nvar lastGet=\"empty\";\n\n//Used for background data loading\nvar tableData;\nvar tableDataCount;\nvar tableDataTaskId=0;\nvar graphData;\nvar graphDataCount;\nvar graphDataTaskId=0;\n\nChart.pluginService.register({\n    beforeRender: function (chart) {\n        if (chart.config.options.showAllTooltips && (chart.config.type == \"doughnut\" || chart.config.type == \"bar\" || chart.config.type == \"pie\" )) {\n            // create an array of tooltips\n            // we can't use the chart tooltip because there is only one tooltip per chart\n            chart.pluginTooltips = [];\n            chart.config.data.datasets.forEach(function (dataset, i) {\n                chart.getDatasetMeta(i).data.forEach(function (sector, j) {\n                    chart.pluginTooltips.push(new Chart.Tooltip({\n                        _chart: chart.chart,\n                        _chartInstance: chart,\n                        _data: chart.data,\n                        _options: chart.options.tooltips,\n                        _active: [sector]\n                    }, chart));\n                });\n            });\n            // turn off normal tooltips\n            chart.options.tooltips.enabled = false;\n        }\n    },\n        afterDraw: function (chart, easing) {\n            if (chart.config.options.showAllTooltips) {\n                // we don't want the permanent tooltips to animate, so don't do anything till the animation runs atleast once\n                if (!chart.allTooltipsOnce) {\n                    if (easing !== 1)\n                        return;\n                    chart.allTooltipsOnce = true;\n                }\n\n                // turn on tooltips\n                chart.options.tooltips.enabled = true;\n                Chart.helpers.each(chart.pluginTooltips, function (tooltip) {\n                    tooltip.initialize();\n                    tooltip.update();\n                    // we don't actually need this since we are not animating tooltips\n                    tooltip.pivot();\n                    tooltip.transition(easing).draw();\n                });\n                chart.options.tooltips.enabled = false;\n            }\n        }\n});\n\nfunction formatBytes(bytes,decimals) {\n    if(bytes == 0) return '0 Byte';\n    var k = 1024;\n    var dm = decimals + 1 || 3;\n    var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];\n    var i = Math.floor(Math.log(bytes) / Math.log(k));\n    return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];\n}\n\nfunction formatCount(bytes,decimals) {\n    if(bytes == 0) return 'None';\n    var k = 1000;\n    var dm = decimals + 1 || 3;\n    var sizes = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];\n    var i = Math.floor(Math.log(bytes) / Math.log(k));\n    return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];\n}\n\nfunction getRGB(str){\n    var hash = $.md5(str);\n    var rgb = '#' + hash.substring(0,2) + hash.substring(2,4) + hash.substring(4,6);\n    return rgb;\n}\n\nvar stringToColour = function(str) {\n    var hash = 0;\n    str=str+str+str+str+str+str+str+str;\n    for (var i = 0; i < str.length; i++) {\n        hash = str.charCodeAt(i) + ((hash << 5) - hash);\n    }\n    var colour = '#';\n    for (var i = 0; i < 3; i++) {\n        var value = (hash >> (i * 8)) & 0xFF;\n        colour += ('00' + value.toString(16)).substr(-2);\n    }\n    return colour;\n}\n\n\nfunction msg_warning(str){\n    $('#messagebox').append('<div class=\"alert alert-warning\">'+\n            '<a href=\"#\" class=\"close\" data-dismiss=\"alert\" aria-label=\"close\">&times;</a>' +\n            '<strong>Warning!</strong> ' +\n            str +\n            '</div>');\n}\n\nfunction msg_danger(str){\n    $('#messagebox').append('<div class=\"alert alert-danger\">'+\n            '<a href=\"#\" class=\"close\" data-dismiss=\"alert\" aria-label=\"close\">&times;</a>' +\n            '<strong>Danger!</strong> ' +\n            str +\n            '</div>');\n}\n\n\nfunction msg_clean() {\n    $('#messagebox').html('');\n}\n\n/*********************************\n * Helper to check the API State\n * and document state.\n ********************************/\n$(function() {\n\n    //Check if api website is available\n    $.ajax({\n        url: \"api/index.php?request=robinhood\"\n    }).then(function(data) {\n        $('#quote').append(data);\n    });\n\n    //Get the filesystem path\n    $.ajax({\n        url: \"api/index.php?request=native/vars\"\n    }).then(function(data) {\n        $('#main_title').append(\": \"+data.FS_Path);\n        document.title = document.title + \": \"+data.FS_Path;\n    });\n\n    //Get the current user\n    $.ajax({\n        url: \"api/index.php?request=current_auth\"\n    }).then(function(data) {\n        if (data!='$NOAUTH') {\n            $('#loggedas').html(\"Logged as \"+data);\n        } else {\n            $('#loggedas').html(\"Logged as Nobody\");\n        }\n    });\n\n    //Add keypress event for filter form\n    $('#filterform input').on('keypress', function(event){\n        if(event.key==\"Enter\" && !event.shiftKey){\n            GetGraph(lastGet);\n       }\n    });\n\n\n});\n\n/****************************************\n * Logout from apache auth\n **************************************/\nfunction logout() {\n    var out = window.location.href.replace(/:\\/\\//, '://log:out@');\n    jQuery.get(out);\n}\n\n/****************************************\n * Recursive background load of dataTable\n **************************************/\nfunction loadDataTable(item, queryString, taskID) {\n        if (tableData.limited!=false && tableDataCount<maxdisplayedrows && tableDataTaskId==taskID) {\n                newoffset = tableData.offset + tableData.limited;\n                console.log(\"Table Query offset:\",newoffset);\n                $.ajax({\n                        url: \"api/index.php?request=data/\" + item + \"/\" +queryString + \"/offset/\" + newoffset + \"/\"\n                }).then(function(data) {\n                        tableData=data;\n                        TableState.rows.add(data.datasets).draw();\n                        tableDataCount=tableDataCount+data.limited;\n\n                        //let's test the stack size ! (20 000 for IE10, 281 810 for FF42 )\n                        loadDataTable(item, queryString, taskID);\n                });\n        } else if (tableDataCount>=maxdisplayedrows) {\n                msg_warning(\"Partial result for graph and table,  limited to \"+maxdisplayedrows+\" entries by maxdisplayedrows (config.php). Please use filter !\");\n        }\n}\n\n\n/****************************************\n * Recursive background load of a graph\n **************************************/\nfunction loadDataGraph(item, queryString, taskID) {\n        if (graphData.limited!=false && graphDataCount<maxdisplayedrows && graphDataTaskId==taskID) {\n                newoffset = graphData.offset + graphData.limited;\n                console.log(\"Graph Query offset:\",newoffset);\n        $.ajax({\n                url: \"api/index.php?request=graph/\" + item + \"/\" +queryString + \"/offset/\" + newoffset + \"/\"\n        }).then(function(data) {\n                graphData=data\n                myChart.data.datasets[0].data = myChart.data.datasets[0].data.concat(data.datasets[0].data);\n                myChart.data.datasets[0].backgroundColor = myChart.data.datasets[0].backgroundColor.concat(data.datasets[0].backgroundColor);\n                console.log(data);\n                myChart.update();\n                graphDataCount=graphDataCount+data.limited;\n\n        //let's test the stack size ! (20 000 for IE10, 281 810 for FF42 )\n        loadDataGraph(item, queryString, taskID);\n        });\n        }\n}\n\n/****************************************\n * Async. function which request graph\n * and data, then update graph and table\n **************************************/\n//GetGraph and Table Data async\nfunction GetGraph(item){\n\n    //Set the main_content frame\n    document.getElementById(\"main_content\").innerHTML = `\n    <canvas style=\"max-height:640px; min-height:320px\" id=\"ctx\"></canvas> <!-- Canvas for Graph -->\n    <table id=\"datalist\" class=\"table table-striped table-bordered\" width=\"100%\"></table> <!-- Datalist-->\n    `\n\n    $('#filter').button('loading');\n    lastGet=item;\n    //Get filter\n    var queryString=\"\";\n    var myForm = document.getElementById(\"filterform\");\n    for (var i = 0; i < myForm.elements.length; i++) {\n        if (myForm.elements[i].name.length>0)\n        {\n            queryString = queryString + \"/\" + myForm.elements[i].name + \"/\" + myForm.elements[i].value.replace(\"/\", \"-\");\n        }\n    }\n\n\n    //Clean Graph and Data table\n\n    if (TableState!=\"empty\"){\n        TableState.destroy();\n        $('#datalist').empty();\n    }\n\n     //Delete the old graph\n     if (GraphState!=\"empty\") {\n         myChart.destroy();\n     }\n\n\n    graphDataTaskId++;\n\n    //Get the Graph data\n    $.ajax({\n        url: \"api/index.php?request=graph/\" + item + \"/\" + queryString\n    }).then(function(data) {\n        var options = {\n            responsive : responsiveChart,\n            maintainAspectRatio: false,\n            animation : animationChart,\n            showAllTooltips: showAllTooltipsChart,\n            tooltips: {\n                enabled: true,\n                mode: 'single',\n                callbacks: {\n                    label: function(tooltipItems, data) {\n                        val = data.datasets[tooltipItems.datasetIndex].data[tooltipItems.index]\n                        type = data.datasets[tooltipItems.datasetIndex].unit\n                        if (type==\"size\") {\n                            return formatBytes(val,0);\n                        } else if (type==\"count\") {\n                            return formatCount(val,0);\n                        } else if (type==\"date\") {\n                            return (new Date(val*1000));\n                        }\n                        return val\n                    }\n                }\n            }\n        }\n\n\n        GraphCTX = document.getElementById(\"ctx\").getContext(\"2d\");\n        graphData=data;\n        //Create the new graph\n        myChart = new Chart(GraphCTX,{\n            type: data.default_graph,\n                data: data,\n                options: options\n        });\n        //Load the whole graph in backgroup\n        graphDataCount = data.limited;\n        loadDataGraph(item, queryString, tableDataTaskId);\n        GraphState=data.default_graph;\n        $('#filter').button('reset')\n    });\n\n\n\n    tableDataTaskId++;\n\n\n    $.ajax({\n            url: \"api/index.php?request=data/\" + item + \"/\" +queryString\n    }).then(function(data) {\n            tableData=data;\n            TableState = $('#datalist').DataTable( {\n                    destroy: true,\n                       clear: true,\n                       bAutoWidth: false,\n                       bSortClasses: false,\n                       bDeferRender: true,\n                       data: data.datasets,\n                       columns: data.columns,\n                       columnDefs: data.columnsDefs\n            } );\n            //Load the whole table in background\n            tableDataCount = data.limited;\n            loadDataTable(item, queryString, tableDataTaskId);\n            });\n\n\n}\n\n/*********************************\n * Clean the filter form\n ********************************/\n\nfunction CleanForm() {\n    var myForm = document.getElementById(\"filterform\");\n    for (var i = 0; i < myForm.elements.length; i++) {\n        myForm.elements[i].value = \"\"\n    }\n}\n"
  },
  {
    "path": "web_gui/gui_v3/customjs/param.php",
    "content": "<?php\n/*\n * Copyright (C) 2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\ninclude(\"../config.php\");\ninclude(\"../common.php\");\ninclude(\"../plugin.php\");\n\nforeach($CHARTJS as $conf => $val)\n{\n        echo \"$conf=$val;\\n\";\n}\n\necho plugins_call(\"jscript\", \"<!--Javascript from plugins-->\\n\");\n"
  },
  {
    "path": "web_gui/gui_v3/index.php",
    "content": "<!DOCTYPE html>\n<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\nrequire_once \"config.php\";\nrequire_once \"common.php\";\nrequire_once \"plugin.php\";\n\n?>\n\n<?php\n/*******************************************************\n *                   HEADER                             *\n *******************************************************/\n?>\n<html lang=\"en\">\n<head>\n    <title>Robinhood Report</title>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n    <link rel=\"stylesheet\" href=\"css/bootstrap.css\">\n    <link rel=\"stylesheet\" href=\"css/dataTables.bootstrap.css\">\n    <link rel=\"stylesheet\" href=\"css/dashboard.css\">\n    <link rel=\"stylesheet\" href=\"css/bootstrap-datetimepicker.css\">\n    <link rel=\"stylesheet\" href=\"css/bootstrap-slider.css\">\n    <script src=\"js/jquery-2.2.4.min.js\"></script>\n    <script src=\"js/bootstrap.min.js\"></script>\n    <script src=\"js/Chart.bundle.js\"></script>\n    <script src=\"js/jquery.dataTables.js\"></script>\n    <script src=\"js/filesize.dataTables.js\"></script>\n    <script src=\"js/moment.js\"></script>\n    <script src=\"js/bootstrap-datetimepicker.min.js\"></script>\n    <script src=\"js/bootstrap-slider.js\"></script>\n    <script src=\"customjs/param.php\"></script>\n    <script src=\"customjs/newgui.js\"></script>\n    <?php echo plugins_call(\"ui_header\", \"<!--header from plugins-->\"); ?>\n</head>\n\n<?php\n/*******************************************************\n *                   BODY                               *\n *******************************************************/\n?>\n<body>\n\n    <nav class=\"navbar navbar-inverse navbar-fixed-top\">\n      <div class=\"container-fluid\">\n        <div class=\"navbar-header\">\n          <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#navbar\" aria-expanded=\"false\" aria-controls=\"navbar\">\n            <span class=\"sr-only\">Toggle navigation</span>\n            <span class=\"icon-bar\"></span>\n            <span class=\"icon-bar\"></span>\n            <span class=\"icon-bar\"></span>\n          </button>\n          <a class=\"navbar-brand\" href=\"#\" id=\"main_title\">Robinhood Policy Engine</a>\n\n        </div>\n        <div id=\"navbar\" class=\"navbar-collapse collapse\">\n          <ul class=\"nav navbar-nav navbar-right\">\n        <li><a id=\"loggedas\" onclick=\"logout()\"  href=\"\"></a></li>\n            <li><a href=\"https://github.com/cea-hpc/robinhood/wiki/Documentation\">Help</a></li>\n          </ul>\n    <i><p id=\"quote\" class=\"navbar-text\"></p></i>\n\n        </div>\n      </div>\n    </nav>\n\n    <div class=\"container-fluid\">\n      <div  class=\"row\">\n        <div class=\"col-sm-3 col-md-2 sidebar\">\n     <div class=\"nav nav-sidebar\">\n      <a class=\"navbar-brand\" href=\"http://robinhood.sf.net\"><img src=\"images/logo_rh_sf.gif\" ></a>\n      <a class=\"navbar-brand\" href=\"<?php echo $CUSTOM['vendor_url']; ?>\"><img src=\"<?php echo $CUSTOM['vendor_logo']; ?>\" height=\"64\"></a>\n    </div>\n<br><br>\n          <ul class=\"nav nav-sidebar\">\n<?php\n\n/****************************************\n *       LEFT MENU BUTTONS               *\n ****************************************/\n\n$fields = get_acct_columns();\nforeach ($fields as $field) {\n        echo '<li><a href=\"#\" onclick=\"GetGraph(\\''.$field.'\\')\">'.l($field).'</a></li>';\n}\n\nif (!$DISABLE_FILES_PAGE)\n{\n\techo '<li><a href=\"#\" onclick=\"GetGraph(\\'Files\\')\">Files</a></li>';\n}\n\necho plugins_call(\"ui_menu_top\", \"<!--Data from plugins-->\\n\");\n?>\n\n          </ul>\n<?php\n/****************************************\n *                 FILTER               *\n ****************************************/\n?>\n\n    <form id=\"filterform\" name=\"filterform\">\n            <fieldset class=\"form-group\">\n                <label for=\"formUID\">Filter</label>\n                <input type=\"text\" class=\"form-control\" id=\"formUID\" name=\"uid\" placeholder=\"UID\">\n            </fieldset>\n            <fieldset class=\"form-group\">\n                <input type=\"text\" class=\"form-control\" id=\"formGID\" name=\"gid\" placeholder=\"GID\">\n            </fieldset>\n            <fieldset class=\"form-group\">\n                <input type=\"text\" class=\"form-control\" id=\"formFilename\" name=\"filename\" placeholder=\"Filename\">\n            </fieldset>\n\n            <fieldset class=\"form-group\">\n            <label>Size range</label>\n            <input id=\"ex1\" data-slider-id='ex1Slider' type=\"text\" name=minsize />\n            <input id=\"ex2\" data-slider-id='ex2Slider' type=\"text\" name=maxsize />\n            </fieldset>\n\n<script>\n$('#ex1').slider({\n        min: 0,\n        max: 1125899906842624,\n        step: 8,\n        scale: 'logarithmic',\n        formatter: function(value) {\n                return formatBytes(value,1);\n        }\n});\n\n\n$('#ex2').slider({\n        min: 0,\n         max: 1125899906842624,\n        value: 1125899906842624,\n        step: 8,\n        scale: 'logarithmic',\n        formatter: function(value) {\n                return formatBytes(value,1);\n        }\n});\n\n</script>\n\n<?php\n    echo plugins_call(\"ui_form_filter\", \"<!--Data from plugins-->\");\n?>\n\n            <button type=\"button\" id=\"filter\" class=\"btn btn-primary\" data-loading-text=\"Loading...\" autocomplete=\"off\" onclick=\"GetGraph(lastGet)\">Filter</button>\n            <button type=\"button\" class=\"btn btn-primary\" onclick=\"CleanForm();GetGraph(lastGet)\">Clean</button>\n    </form>\n\n\n<?php\n    echo plugins_call(\"ui_menu_bottom\", \"<!--data from plugins-->\\n\");\n?>\n\n</div>\n\n<div class=\"col-sm-9 col-sm-offset-3 col-md-10 col-md-offset-2 main\"> <!-- Graph/Data Div-->\n    <div id=\"messagebox\"></div> <!-- MessageBox Div-->\n    <div id=\"main_content\">\n    </div>\n</div> <!-- Graph Div end-->\n</div>\n</div>\n\n\n<?php\n$permission = getFilePermission(\"config.php\");\nif ($permission != \"640\") {\n        echo \"<script>$(msg_danger(\\\"Bad permission on config.php ($permission) shoud be 640 .\\\"))</script>\";\n}\n\nif ($DB_LASTERROR != \"\") {\n        echo \"<script>$(msg_danger(\\\"PDO Database error: $DB_LASTERROR\\\"))</script>\";\n}\n\n/* Load values in the filder is parameters are set */\necho setFormValues();\n/* Load a specific graph is the parameter is set */\necho callGraph();\n?>\n\n</body>\n</html>\n\n"
  },
  {
    "path": "web_gui/gui_v3/js/Chart.bundle.js",
    "content": "/*!\n * Chart.js\n * http://chartjs.org/\n * Version: 2.7.2\n *\n * Copyright 2018 Chart.js Contributors\n * Released under the MIT license\n * https://github.com/chartjs/Chart.js/blob/master/LICENSE.md\n */\n(function(f){if(typeof exports===\"object\"&&typeof module!==\"undefined\"){module.exports=f()}else if(typeof define===\"function\"&&define.amd){define([],f)}else{var g;if(typeof window!==\"undefined\"){g=window}else if(typeof global!==\"undefined\"){g=global}else if(typeof self!==\"undefined\"){g=self}else{g=this}g.Chart = f()}})(function(){var define,module,exports;return (function(){function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require==\"function\"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error(\"Cannot find module '\"+o+\"'\");throw f.code=\"MODULE_NOT_FOUND\",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require==\"function\"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s}return e})()({1:[function(require,module,exports){\n/* MIT license */\nvar colorNames = require(5);\n\nmodule.exports = {\n   getRgba: getRgba,\n   getHsla: getHsla,\n   getRgb: getRgb,\n   getHsl: getHsl,\n   getHwb: getHwb,\n   getAlpha: getAlpha,\n\n   hexString: hexString,\n   rgbString: rgbString,\n   rgbaString: rgbaString,\n   percentString: percentString,\n   percentaString: percentaString,\n   hslString: hslString,\n   hslaString: hslaString,\n   hwbString: hwbString,\n   keyword: keyword\n}\n\nfunction getRgba(string) {\n   if (!string) {\n      return;\n   }\n   var abbr =  /^#([a-fA-F0-9]{3})$/i,\n       hex =  /^#([a-fA-F0-9]{6})$/i,\n       rgba = /^rgba?\\(\\s*([+-]?\\d+)\\s*,\\s*([+-]?\\d+)\\s*,\\s*([+-]?\\d+)\\s*(?:,\\s*([+-]?[\\d\\.]+)\\s*)?\\)$/i,\n       per = /^rgba?\\(\\s*([+-]?[\\d\\.]+)\\%\\s*,\\s*([+-]?[\\d\\.]+)\\%\\s*,\\s*([+-]?[\\d\\.]+)\\%\\s*(?:,\\s*([+-]?[\\d\\.]+)\\s*)?\\)$/i,\n       keyword = /(\\w+)/;\n\n   var rgb = [0, 0, 0],\n       a = 1,\n       match = string.match(abbr);\n   if (match) {\n      match = match[1];\n      for (var i = 0; i < rgb.length; i++) {\n         rgb[i] = parseInt(match[i] + match[i], 16);\n      }\n   }\n   else if (match = string.match(hex)) {\n      match = match[1];\n      for (var i = 0; i < rgb.length; i++) {\n         rgb[i] = parseInt(match.slice(i * 2, i * 2 + 2), 16);\n      }\n   }\n   else if (match = string.match(rgba)) {\n      for (var i = 0; i < rgb.length; i++) {\n         rgb[i] = parseInt(match[i + 1]);\n      }\n      a = parseFloat(match[4]);\n   }\n   else if (match = string.match(per)) {\n      for (var i = 0; i < rgb.length; i++) {\n         rgb[i] = Math.round(parseFloat(match[i + 1]) * 2.55);\n      }\n      a = parseFloat(match[4]);\n   }\n   else if (match = string.match(keyword)) {\n      if (match[1] == \"transparent\") {\n         return [0, 0, 0, 0];\n      }\n      rgb = colorNames[match[1]];\n      if (!rgb) {\n         return;\n      }\n   }\n\n   for (var i = 0; i < rgb.length; i++) {\n      rgb[i] = scale(rgb[i], 0, 255);\n   }\n   if (!a && a != 0) {\n      a = 1;\n   }\n   else {\n      a = scale(a, 0, 1);\n   }\n   rgb[3] = a;\n   return rgb;\n}\n\nfunction getHsla(string) {\n   if (!string) {\n      return;\n   }\n   var hsl = /^hsla?\\(\\s*([+-]?\\d+)(?:deg)?\\s*,\\s*([+-]?[\\d\\.]+)%\\s*,\\s*([+-]?[\\d\\.]+)%\\s*(?:,\\s*([+-]?[\\d\\.]+)\\s*)?\\)/;\n   var match = string.match(hsl);\n   if (match) {\n      var alpha = parseFloat(match[4]);\n      var h = scale(parseInt(match[1]), 0, 360),\n          s = scale(parseFloat(match[2]), 0, 100),\n          l = scale(parseFloat(match[3]), 0, 100),\n          a = scale(isNaN(alpha) ? 1 : alpha, 0, 1);\n      return [h, s, l, a];\n   }\n}\n\nfunction getHwb(string) {\n   if (!string) {\n      return;\n   }\n   var hwb = /^hwb\\(\\s*([+-]?\\d+)(?:deg)?\\s*,\\s*([+-]?[\\d\\.]+)%\\s*,\\s*([+-]?[\\d\\.]+)%\\s*(?:,\\s*([+-]?[\\d\\.]+)\\s*)?\\)/;\n   var match = string.match(hwb);\n   if (match) {\n    var alpha = parseFloat(match[4]);\n      var h = scale(parseInt(match[1]), 0, 360),\n          w = scale(parseFloat(match[2]), 0, 100),\n          b = scale(parseFloat(match[3]), 0, 100),\n          a = scale(isNaN(alpha) ? 1 : alpha, 0, 1);\n      return [h, w, b, a];\n   }\n}\n\nfunction getRgb(string) {\n   var rgba = getRgba(string);\n   return rgba && rgba.slice(0, 3);\n}\n\nfunction getHsl(string) {\n  var hsla = getHsla(string);\n  return hsla && hsla.slice(0, 3);\n}\n\nfunction getAlpha(string) {\n   var vals = getRgba(string);\n   if (vals) {\n      return vals[3];\n   }\n   else if (vals = getHsla(string)) {\n      return vals[3];\n   }\n   else if (vals = getHwb(string)) {\n      return vals[3];\n   }\n}\n\n// generators\nfunction hexString(rgb) {\n   return \"#\" + hexDouble(rgb[0]) + hexDouble(rgb[1])\n              + hexDouble(rgb[2]);\n}\n\nfunction rgbString(rgba, alpha) {\n   if (alpha < 1 || (rgba[3] && rgba[3] < 1)) {\n      return rgbaString(rgba, alpha);\n   }\n   return \"rgb(\" + rgba[0] + \", \" + rgba[1] + \", \" + rgba[2] + \")\";\n}\n\nfunction rgbaString(rgba, alpha) {\n   if (alpha === undefined) {\n      alpha = (rgba[3] !== undefined ? rgba[3] : 1);\n   }\n   return \"rgba(\" + rgba[0] + \", \" + rgba[1] + \", \" + rgba[2]\n           + \", \" + alpha + \")\";\n}\n\nfunction percentString(rgba, alpha) {\n   if (alpha < 1 || (rgba[3] && rgba[3] < 1)) {\n      return percentaString(rgba, alpha);\n   }\n   var r = Math.round(rgba[0]/255 * 100),\n       g = Math.round(rgba[1]/255 * 100),\n       b = Math.round(rgba[2]/255 * 100);\n\n   return \"rgb(\" + r + \"%, \" + g + \"%, \" + b + \"%)\";\n}\n\nfunction percentaString(rgba, alpha) {\n   var r = Math.round(rgba[0]/255 * 100),\n       g = Math.round(rgba[1]/255 * 100),\n       b = Math.round(rgba[2]/255 * 100);\n   return \"rgba(\" + r + \"%, \" + g + \"%, \" + b + \"%, \" + (alpha || rgba[3] || 1) + \")\";\n}\n\nfunction hslString(hsla, alpha) {\n   if (alpha < 1 || (hsla[3] && hsla[3] < 1)) {\n      return hslaString(hsla, alpha);\n   }\n   return \"hsl(\" + hsla[0] + \", \" + hsla[1] + \"%, \" + hsla[2] + \"%)\";\n}\n\nfunction hslaString(hsla, alpha) {\n   if (alpha === undefined) {\n      alpha = (hsla[3] !== undefined ? hsla[3] : 1);\n   }\n   return \"hsla(\" + hsla[0] + \", \" + hsla[1] + \"%, \" + hsla[2] + \"%, \"\n           + alpha + \")\";\n}\n\n// hwb is a bit different than rgb(a) & hsl(a) since there is no alpha specific syntax\n// (hwb have alpha optional & 1 is default value)\nfunction hwbString(hwb, alpha) {\n   if (alpha === undefined) {\n      alpha = (hwb[3] !== undefined ? hwb[3] : 1);\n   }\n   return \"hwb(\" + hwb[0] + \", \" + hwb[1] + \"%, \" + hwb[2] + \"%\"\n           + (alpha !== undefined && alpha !== 1 ? \", \" + alpha : \"\") + \")\";\n}\n\nfunction keyword(rgb) {\n  return reverseNames[rgb.slice(0, 3)];\n}\n\n// helpers\nfunction scale(num, min, max) {\n   return Math.min(Math.max(min, num), max);\n}\n\nfunction hexDouble(num) {\n  var str = num.toString(16).toUpperCase();\n  return (str.length < 2) ? \"0\" + str : str;\n}\n\n\n//create a list of reverse color names\nvar reverseNames = {};\nfor (var name in colorNames) {\n   reverseNames[colorNames[name]] = name;\n}\n\n},{\"5\":5}],2:[function(require,module,exports){\n/* MIT license */\nvar convert = require(4);\nvar string = require(1);\n\nvar Color = function (obj) {\n\tif (obj instanceof Color) {\n\t\treturn obj;\n\t}\n\tif (!(this instanceof Color)) {\n\t\treturn new Color(obj);\n\t}\n\n\tthis.valid = false;\n\tthis.values = {\n\t\trgb: [0, 0, 0],\n\t\thsl: [0, 0, 0],\n\t\thsv: [0, 0, 0],\n\t\thwb: [0, 0, 0],\n\t\tcmyk: [0, 0, 0, 0],\n\t\talpha: 1\n\t};\n\n\t// parse Color() argument\n\tvar vals;\n\tif (typeof obj === 'string') {\n\t\tvals = string.getRgba(obj);\n\t\tif (vals) {\n\t\t\tthis.setValues('rgb', vals);\n\t\t} else if (vals = string.getHsla(obj)) {\n\t\t\tthis.setValues('hsl', vals);\n\t\t} else if (vals = string.getHwb(obj)) {\n\t\t\tthis.setValues('hwb', vals);\n\t\t}\n\t} else if (typeof obj === 'object') {\n\t\tvals = obj;\n\t\tif (vals.r !== undefined || vals.red !== undefined) {\n\t\t\tthis.setValues('rgb', vals);\n\t\t} else if (vals.l !== undefined || vals.lightness !== undefined) {\n\t\t\tthis.setValues('hsl', vals);\n\t\t} else if (vals.v !== undefined || vals.value !== undefined) {\n\t\t\tthis.setValues('hsv', vals);\n\t\t} else if (vals.w !== undefined || vals.whiteness !== undefined) {\n\t\t\tthis.setValues('hwb', vals);\n\t\t} else if (vals.c !== undefined || vals.cyan !== undefined) {\n\t\t\tthis.setValues('cmyk', vals);\n\t\t}\n\t}\n};\n\nColor.prototype = {\n\tisValid: function () {\n\t\treturn this.valid;\n\t},\n\trgb: function () {\n\t\treturn this.setSpace('rgb', arguments);\n\t},\n\thsl: function () {\n\t\treturn this.setSpace('hsl', arguments);\n\t},\n\thsv: function () {\n\t\treturn this.setSpace('hsv', arguments);\n\t},\n\thwb: function () {\n\t\treturn this.setSpace('hwb', arguments);\n\t},\n\tcmyk: function () {\n\t\treturn this.setSpace('cmyk', arguments);\n\t},\n\n\trgbArray: function () {\n\t\treturn this.values.rgb;\n\t},\n\thslArray: function () {\n\t\treturn this.values.hsl;\n\t},\n\thsvArray: function () {\n\t\treturn this.values.hsv;\n\t},\n\thwbArray: function () {\n\t\tvar values = this.values;\n\t\tif (values.alpha !== 1) {\n\t\t\treturn values.hwb.concat([values.alpha]);\n\t\t}\n\t\treturn values.hwb;\n\t},\n\tcmykArray: function () {\n\t\treturn this.values.cmyk;\n\t},\n\trgbaArray: function () {\n\t\tvar values = this.values;\n\t\treturn values.rgb.concat([values.alpha]);\n\t},\n\thslaArray: function () {\n\t\tvar values = this.values;\n\t\treturn values.hsl.concat([values.alpha]);\n\t},\n\talpha: function (val) {\n\t\tif (val === undefined) {\n\t\t\treturn this.values.alpha;\n\t\t}\n\t\tthis.setValues('alpha', val);\n\t\treturn this;\n\t},\n\n\tred: function (val) {\n\t\treturn this.setChannel('rgb', 0, val);\n\t},\n\tgreen: function (val) {\n\t\treturn this.setChannel('rgb', 1, val);\n\t},\n\tblue: function (val) {\n\t\treturn this.setChannel('rgb', 2, val);\n\t},\n\thue: function (val) {\n\t\tif (val) {\n\t\t\tval %= 360;\n\t\t\tval = val < 0 ? 360 + val : val;\n\t\t}\n\t\treturn this.setChannel('hsl', 0, val);\n\t},\n\tsaturation: function (val) {\n\t\treturn this.setChannel('hsl', 1, val);\n\t},\n\tlightness: function (val) {\n\t\treturn this.setChannel('hsl', 2, val);\n\t},\n\tsaturationv: function (val) {\n\t\treturn this.setChannel('hsv', 1, val);\n\t},\n\twhiteness: function (val) {\n\t\treturn this.setChannel('hwb', 1, val);\n\t},\n\tblackness: function (val) {\n\t\treturn this.setChannel('hwb', 2, val);\n\t},\n\tvalue: function (val) {\n\t\treturn this.setChannel('hsv', 2, val);\n\t},\n\tcyan: function (val) {\n\t\treturn this.setChannel('cmyk', 0, val);\n\t},\n\tmagenta: function (val) {\n\t\treturn this.setChannel('cmyk', 1, val);\n\t},\n\tyellow: function (val) {\n\t\treturn this.setChannel('cmyk', 2, val);\n\t},\n\tblack: function (val) {\n\t\treturn this.setChannel('cmyk', 3, val);\n\t},\n\n\thexString: function () {\n\t\treturn string.hexString(this.values.rgb);\n\t},\n\trgbString: function () {\n\t\treturn string.rgbString(this.values.rgb, this.values.alpha);\n\t},\n\trgbaString: function () {\n\t\treturn string.rgbaString(this.values.rgb, this.values.alpha);\n\t},\n\tpercentString: function () {\n\t\treturn string.percentString(this.values.rgb, this.values.alpha);\n\t},\n\thslString: function () {\n\t\treturn string.hslString(this.values.hsl, this.values.alpha);\n\t},\n\thslaString: function () {\n\t\treturn string.hslaString(this.values.hsl, this.values.alpha);\n\t},\n\thwbString: function () {\n\t\treturn string.hwbString(this.values.hwb, this.values.alpha);\n\t},\n\tkeyword: function () {\n\t\treturn string.keyword(this.values.rgb, this.values.alpha);\n\t},\n\n\trgbNumber: function () {\n\t\tvar rgb = this.values.rgb;\n\t\treturn (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];\n\t},\n\n\tluminosity: function () {\n\t\t// http://www.w3.org/TR/WCAG20/#relativeluminancedef\n\t\tvar rgb = this.values.rgb;\n\t\tvar lum = [];\n\t\tfor (var i = 0; i < rgb.length; i++) {\n\t\t\tvar chan = rgb[i] / 255;\n\t\t\tlum[i] = (chan <= 0.03928) ? chan / 12.92 : Math.pow(((chan + 0.055) / 1.055), 2.4);\n\t\t}\n\t\treturn 0.2126 * lum[0] + 0.7152 * lum[1] + 0.0722 * lum[2];\n\t},\n\n\tcontrast: function (color2) {\n\t\t// http://www.w3.org/TR/WCAG20/#contrast-ratiodef\n\t\tvar lum1 = this.luminosity();\n\t\tvar lum2 = color2.luminosity();\n\t\tif (lum1 > lum2) {\n\t\t\treturn (lum1 + 0.05) / (lum2 + 0.05);\n\t\t}\n\t\treturn (lum2 + 0.05) / (lum1 + 0.05);\n\t},\n\n\tlevel: function (color2) {\n\t\tvar contrastRatio = this.contrast(color2);\n\t\tif (contrastRatio >= 7.1) {\n\t\t\treturn 'AAA';\n\t\t}\n\n\t\treturn (contrastRatio >= 4.5) ? 'AA' : '';\n\t},\n\n\tdark: function () {\n\t\t// YIQ equation from http://24ways.org/2010/calculating-color-contrast\n\t\tvar rgb = this.values.rgb;\n\t\tvar yiq = (rgb[0] * 299 + rgb[1] * 587 + rgb[2] * 114) / 1000;\n\t\treturn yiq < 128;\n\t},\n\n\tlight: function () {\n\t\treturn !this.dark();\n\t},\n\n\tnegate: function () {\n\t\tvar rgb = [];\n\t\tfor (var i = 0; i < 3; i++) {\n\t\t\trgb[i] = 255 - this.values.rgb[i];\n\t\t}\n\t\tthis.setValues('rgb', rgb);\n\t\treturn this;\n\t},\n\n\tlighten: function (ratio) {\n\t\tvar hsl = this.values.hsl;\n\t\thsl[2] += hsl[2] * ratio;\n\t\tthis.setValues('hsl', hsl);\n\t\treturn this;\n\t},\n\n\tdarken: function (ratio) {\n\t\tvar hsl = this.values.hsl;\n\t\thsl[2] -= hsl[2] * ratio;\n\t\tthis.setValues('hsl', hsl);\n\t\treturn this;\n\t},\n\n\tsaturate: function (ratio) {\n\t\tvar hsl = this.values.hsl;\n\t\thsl[1] += hsl[1] * ratio;\n\t\tthis.setValues('hsl', hsl);\n\t\treturn this;\n\t},\n\n\tdesaturate: function (ratio) {\n\t\tvar hsl = this.values.hsl;\n\t\thsl[1] -= hsl[1] * ratio;\n\t\tthis.setValues('hsl', hsl);\n\t\treturn this;\n\t},\n\n\twhiten: function (ratio) {\n\t\tvar hwb = this.values.hwb;\n\t\thwb[1] += hwb[1] * ratio;\n\t\tthis.setValues('hwb', hwb);\n\t\treturn this;\n\t},\n\n\tblacken: function (ratio) {\n\t\tvar hwb = this.values.hwb;\n\t\thwb[2] += hwb[2] * ratio;\n\t\tthis.setValues('hwb', hwb);\n\t\treturn this;\n\t},\n\n\tgreyscale: function () {\n\t\tvar rgb = this.values.rgb;\n\t\t// http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale\n\t\tvar val = rgb[0] * 0.3 + rgb[1] * 0.59 + rgb[2] * 0.11;\n\t\tthis.setValues('rgb', [val, val, val]);\n\t\treturn this;\n\t},\n\n\tclearer: function (ratio) {\n\t\tvar alpha = this.values.alpha;\n\t\tthis.setValues('alpha', alpha - (alpha * ratio));\n\t\treturn this;\n\t},\n\n\topaquer: function (ratio) {\n\t\tvar alpha = this.values.alpha;\n\t\tthis.setValues('alpha', alpha + (alpha * ratio));\n\t\treturn this;\n\t},\n\n\trotate: function (degrees) {\n\t\tvar hsl = this.values.hsl;\n\t\tvar hue = (hsl[0] + degrees) % 360;\n\t\thsl[0] = hue < 0 ? 360 + hue : hue;\n\t\tthis.setValues('hsl', hsl);\n\t\treturn this;\n\t},\n\n\t/**\n\t * Ported from sass implementation in C\n\t * https://github.com/sass/libsass/blob/0e6b4a2850092356aa3ece07c6b249f0221caced/functions.cpp#L209\n\t */\n\tmix: function (mixinColor, weight) {\n\t\tvar color1 = this;\n\t\tvar color2 = mixinColor;\n\t\tvar p = weight === undefined ? 0.5 : weight;\n\n\t\tvar w = 2 * p - 1;\n\t\tvar a = color1.alpha() - color2.alpha();\n\n\t\tvar w1 = (((w * a === -1) ? w : (w + a) / (1 + w * a)) + 1) / 2.0;\n\t\tvar w2 = 1 - w1;\n\n\t\treturn this\n\t\t\t.rgb(\n\t\t\t\tw1 * color1.red() + w2 * color2.red(),\n\t\t\t\tw1 * color1.green() + w2 * color2.green(),\n\t\t\t\tw1 * color1.blue() + w2 * color2.blue()\n\t\t\t)\n\t\t\t.alpha(color1.alpha() * p + color2.alpha() * (1 - p));\n\t},\n\n\ttoJSON: function () {\n\t\treturn this.rgb();\n\t},\n\n\tclone: function () {\n\t\t// NOTE(SB): using node-clone creates a dependency to Buffer when using browserify,\n\t\t// making the final build way to big to embed in Chart.js. So let's do it manually,\n\t\t// assuming that values to clone are 1 dimension arrays containing only numbers,\n\t\t// except 'alpha' which is a number.\n\t\tvar result = new Color();\n\t\tvar source = this.values;\n\t\tvar target = result.values;\n\t\tvar value, type;\n\n\t\tfor (var prop in source) {\n\t\t\tif (source.hasOwnProperty(prop)) {\n\t\t\t\tvalue = source[prop];\n\t\t\t\ttype = ({}).toString.call(value);\n\t\t\t\tif (type === '[object Array]') {\n\t\t\t\t\ttarget[prop] = value.slice(0);\n\t\t\t\t} else if (type === '[object Number]') {\n\t\t\t\t\ttarget[prop] = value;\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error('unexpected color value:', value);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn result;\n\t}\n};\n\nColor.prototype.spaces = {\n\trgb: ['red', 'green', 'blue'],\n\thsl: ['hue', 'saturation', 'lightness'],\n\thsv: ['hue', 'saturation', 'value'],\n\thwb: ['hue', 'whiteness', 'blackness'],\n\tcmyk: ['cyan', 'magenta', 'yellow', 'black']\n};\n\nColor.prototype.maxes = {\n\trgb: [255, 255, 255],\n\thsl: [360, 100, 100],\n\thsv: [360, 100, 100],\n\thwb: [360, 100, 100],\n\tcmyk: [100, 100, 100, 100]\n};\n\nColor.prototype.getValues = function (space) {\n\tvar values = this.values;\n\tvar vals = {};\n\n\tfor (var i = 0; i < space.length; i++) {\n\t\tvals[space.charAt(i)] = values[space][i];\n\t}\n\n\tif (values.alpha !== 1) {\n\t\tvals.a = values.alpha;\n\t}\n\n\t// {r: 255, g: 255, b: 255, a: 0.4}\n\treturn vals;\n};\n\nColor.prototype.setValues = function (space, vals) {\n\tvar values = this.values;\n\tvar spaces = this.spaces;\n\tvar maxes = this.maxes;\n\tvar alpha = 1;\n\tvar i;\n\n\tthis.valid = true;\n\n\tif (space === 'alpha') {\n\t\talpha = vals;\n\t} else if (vals.length) {\n\t\t// [10, 10, 10]\n\t\tvalues[space] = vals.slice(0, space.length);\n\t\talpha = vals[space.length];\n\t} else if (vals[space.charAt(0)] !== undefined) {\n\t\t// {r: 10, g: 10, b: 10}\n\t\tfor (i = 0; i < space.length; i++) {\n\t\t\tvalues[space][i] = vals[space.charAt(i)];\n\t\t}\n\n\t\talpha = vals.a;\n\t} else if (vals[spaces[space][0]] !== undefined) {\n\t\t// {red: 10, green: 10, blue: 10}\n\t\tvar chans = spaces[space];\n\n\t\tfor (i = 0; i < space.length; i++) {\n\t\t\tvalues[space][i] = vals[chans[i]];\n\t\t}\n\n\t\talpha = vals.alpha;\n\t}\n\n\tvalues.alpha = Math.max(0, Math.min(1, (alpha === undefined ? values.alpha : alpha)));\n\n\tif (space === 'alpha') {\n\t\treturn false;\n\t}\n\n\tvar capped;\n\n\t// cap values of the space prior converting all values\n\tfor (i = 0; i < space.length; i++) {\n\t\tcapped = Math.max(0, Math.min(maxes[space][i], values[space][i]));\n\t\tvalues[space][i] = Math.round(capped);\n\t}\n\n\t// convert to all the other color spaces\n\tfor (var sname in spaces) {\n\t\tif (sname !== space) {\n\t\t\tvalues[sname] = convert[space][sname](values[space]);\n\t\t}\n\t}\n\n\treturn true;\n};\n\nColor.prototype.setSpace = function (space, args) {\n\tvar vals = args[0];\n\n\tif (vals === undefined) {\n\t\t// color.rgb()\n\t\treturn this.getValues(space);\n\t}\n\n\t// color.rgb(10, 10, 10)\n\tif (typeof vals === 'number') {\n\t\tvals = Array.prototype.slice.call(args);\n\t}\n\n\tthis.setValues(space, vals);\n\treturn this;\n};\n\nColor.prototype.setChannel = function (space, index, val) {\n\tvar svalues = this.values[space];\n\tif (val === undefined) {\n\t\t// color.red()\n\t\treturn svalues[index];\n\t} else if (val === svalues[index]) {\n\t\t// color.red(color.red())\n\t\treturn this;\n\t}\n\n\t// color.red(100)\n\tsvalues[index] = val;\n\tthis.setValues(space, svalues);\n\n\treturn this;\n};\n\nif (typeof window !== 'undefined') {\n\twindow.Color = Color;\n}\n\nmodule.exports = Color;\n\n},{\"1\":1,\"4\":4}],3:[function(require,module,exports){\n/* MIT license */\n\nmodule.exports = {\n  rgb2hsl: rgb2hsl,\n  rgb2hsv: rgb2hsv,\n  rgb2hwb: rgb2hwb,\n  rgb2cmyk: rgb2cmyk,\n  rgb2keyword: rgb2keyword,\n  rgb2xyz: rgb2xyz,\n  rgb2lab: rgb2lab,\n  rgb2lch: rgb2lch,\n\n  hsl2rgb: hsl2rgb,\n  hsl2hsv: hsl2hsv,\n  hsl2hwb: hsl2hwb,\n  hsl2cmyk: hsl2cmyk,\n  hsl2keyword: hsl2keyword,\n\n  hsv2rgb: hsv2rgb,\n  hsv2hsl: hsv2hsl,\n  hsv2hwb: hsv2hwb,\n  hsv2cmyk: hsv2cmyk,\n  hsv2keyword: hsv2keyword,\n\n  hwb2rgb: hwb2rgb,\n  hwb2hsl: hwb2hsl,\n  hwb2hsv: hwb2hsv,\n  hwb2cmyk: hwb2cmyk,\n  hwb2keyword: hwb2keyword,\n\n  cmyk2rgb: cmyk2rgb,\n  cmyk2hsl: cmyk2hsl,\n  cmyk2hsv: cmyk2hsv,\n  cmyk2hwb: cmyk2hwb,\n  cmyk2keyword: cmyk2keyword,\n\n  keyword2rgb: keyword2rgb,\n  keyword2hsl: keyword2hsl,\n  keyword2hsv: keyword2hsv,\n  keyword2hwb: keyword2hwb,\n  keyword2cmyk: keyword2cmyk,\n  keyword2lab: keyword2lab,\n  keyword2xyz: keyword2xyz,\n\n  xyz2rgb: xyz2rgb,\n  xyz2lab: xyz2lab,\n  xyz2lch: xyz2lch,\n\n  lab2xyz: lab2xyz,\n  lab2rgb: lab2rgb,\n  lab2lch: lab2lch,\n\n  lch2lab: lch2lab,\n  lch2xyz: lch2xyz,\n  lch2rgb: lch2rgb\n}\n\n\nfunction rgb2hsl(rgb) {\n  var r = rgb[0]/255,\n      g = rgb[1]/255,\n      b = rgb[2]/255,\n      min = Math.min(r, g, b),\n      max = Math.max(r, g, b),\n      delta = max - min,\n      h, s, l;\n\n  if (max == min)\n    h = 0;\n  else if (r == max)\n    h = (g - b) / delta;\n  else if (g == max)\n    h = 2 + (b - r) / delta;\n  else if (b == max)\n    h = 4 + (r - g)/ delta;\n\n  h = Math.min(h * 60, 360);\n\n  if (h < 0)\n    h += 360;\n\n  l = (min + max) / 2;\n\n  if (max == min)\n    s = 0;\n  else if (l <= 0.5)\n    s = delta / (max + min);\n  else\n    s = delta / (2 - max - min);\n\n  return [h, s * 100, l * 100];\n}\n\nfunction rgb2hsv(rgb) {\n  var r = rgb[0],\n      g = rgb[1],\n      b = rgb[2],\n      min = Math.min(r, g, b),\n      max = Math.max(r, g, b),\n      delta = max - min,\n      h, s, v;\n\n  if (max == 0)\n    s = 0;\n  else\n    s = (delta/max * 1000)/10;\n\n  if (max == min)\n    h = 0;\n  else if (r == max)\n    h = (g - b) / delta;\n  else if (g == max)\n    h = 2 + (b - r) / delta;\n  else if (b == max)\n    h = 4 + (r - g) / delta;\n\n  h = Math.min(h * 60, 360);\n\n  if (h < 0)\n    h += 360;\n\n  v = ((max / 255) * 1000) / 10;\n\n  return [h, s, v];\n}\n\nfunction rgb2hwb(rgb) {\n  var r = rgb[0],\n      g = rgb[1],\n      b = rgb[2],\n      h = rgb2hsl(rgb)[0],\n      w = 1/255 * Math.min(r, Math.min(g, b)),\n      b = 1 - 1/255 * Math.max(r, Math.max(g, b));\n\n  return [h, w * 100, b * 100];\n}\n\nfunction rgb2cmyk(rgb) {\n  var r = rgb[0] / 255,\n      g = rgb[1] / 255,\n      b = rgb[2] / 255,\n      c, m, y, k;\n\n  k = Math.min(1 - r, 1 - g, 1 - b);\n  c = (1 - r - k) / (1 - k) || 0;\n  m = (1 - g - k) / (1 - k) || 0;\n  y = (1 - b - k) / (1 - k) || 0;\n  return [c * 100, m * 100, y * 100, k * 100];\n}\n\nfunction rgb2keyword(rgb) {\n  return reverseKeywords[JSON.stringify(rgb)];\n}\n\nfunction rgb2xyz(rgb) {\n  var r = rgb[0] / 255,\n      g = rgb[1] / 255,\n      b = rgb[2] / 255;\n\n  // assume sRGB\n  r = r > 0.04045 ? Math.pow(((r + 0.055) / 1.055), 2.4) : (r / 12.92);\n  g = g > 0.04045 ? Math.pow(((g + 0.055) / 1.055), 2.4) : (g / 12.92);\n  b = b > 0.04045 ? Math.pow(((b + 0.055) / 1.055), 2.4) : (b / 12.92);\n\n  var x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805);\n  var y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722);\n  var z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505);\n\n  return [x * 100, y *100, z * 100];\n}\n\nfunction rgb2lab(rgb) {\n  var xyz = rgb2xyz(rgb),\n        x = xyz[0],\n        y = xyz[1],\n        z = xyz[2],\n        l, a, b;\n\n  x /= 95.047;\n  y /= 100;\n  z /= 108.883;\n\n  x = x > 0.008856 ? Math.pow(x, 1/3) : (7.787 * x) + (16 / 116);\n  y = y > 0.008856 ? Math.pow(y, 1/3) : (7.787 * y) + (16 / 116);\n  z = z > 0.008856 ? Math.pow(z, 1/3) : (7.787 * z) + (16 / 116);\n\n  l = (116 * y) - 16;\n  a = 500 * (x - y);\n  b = 200 * (y - z);\n\n  return [l, a, b];\n}\n\nfunction rgb2lch(args) {\n  return lab2lch(rgb2lab(args));\n}\n\nfunction hsl2rgb(hsl) {\n  var h = hsl[0] / 360,\n      s = hsl[1] / 100,\n      l = hsl[2] / 100,\n      t1, t2, t3, rgb, val;\n\n  if (s == 0) {\n    val = l * 255;\n    return [val, val, val];\n  }\n\n  if (l < 0.5)\n    t2 = l * (1 + s);\n  else\n    t2 = l + s - l * s;\n  t1 = 2 * l - t2;\n\n  rgb = [0, 0, 0];\n  for (var i = 0; i < 3; i++) {\n    t3 = h + 1 / 3 * - (i - 1);\n    t3 < 0 && t3++;\n    t3 > 1 && t3--;\n\n    if (6 * t3 < 1)\n      val = t1 + (t2 - t1) * 6 * t3;\n    else if (2 * t3 < 1)\n      val = t2;\n    else if (3 * t3 < 2)\n      val = t1 + (t2 - t1) * (2 / 3 - t3) * 6;\n    else\n      val = t1;\n\n    rgb[i] = val * 255;\n  }\n\n  return rgb;\n}\n\nfunction hsl2hsv(hsl) {\n  var h = hsl[0],\n      s = hsl[1] / 100,\n      l = hsl[2] / 100,\n      sv, v;\n\n  if(l === 0) {\n      // no need to do calc on black\n      // also avoids divide by 0 error\n      return [0, 0, 0];\n  }\n\n  l *= 2;\n  s *= (l <= 1) ? l : 2 - l;\n  v = (l + s) / 2;\n  sv = (2 * s) / (l + s);\n  return [h, sv * 100, v * 100];\n}\n\nfunction hsl2hwb(args) {\n  return rgb2hwb(hsl2rgb(args));\n}\n\nfunction hsl2cmyk(args) {\n  return rgb2cmyk(hsl2rgb(args));\n}\n\nfunction hsl2keyword(args) {\n  return rgb2keyword(hsl2rgb(args));\n}\n\n\nfunction hsv2rgb(hsv) {\n  var h = hsv[0] / 60,\n      s = hsv[1] / 100,\n      v = hsv[2] / 100,\n      hi = Math.floor(h) % 6;\n\n  var f = h - Math.floor(h),\n      p = 255 * v * (1 - s),\n      q = 255 * v * (1 - (s * f)),\n      t = 255 * v * (1 - (s * (1 - f))),\n      v = 255 * v;\n\n  switch(hi) {\n    case 0:\n      return [v, t, p];\n    case 1:\n      return [q, v, p];\n    case 2:\n      return [p, v, t];\n    case 3:\n      return [p, q, v];\n    case 4:\n      return [t, p, v];\n    case 5:\n      return [v, p, q];\n  }\n}\n\nfunction hsv2hsl(hsv) {\n  var h = hsv[0],\n      s = hsv[1] / 100,\n      v = hsv[2] / 100,\n      sl, l;\n\n  l = (2 - s) * v;\n  sl = s * v;\n  sl /= (l <= 1) ? l : 2 - l;\n  sl = sl || 0;\n  l /= 2;\n  return [h, sl * 100, l * 100];\n}\n\nfunction hsv2hwb(args) {\n  return rgb2hwb(hsv2rgb(args))\n}\n\nfunction hsv2cmyk(args) {\n  return rgb2cmyk(hsv2rgb(args));\n}\n\nfunction hsv2keyword(args) {\n  return rgb2keyword(hsv2rgb(args));\n}\n\n// http://dev.w3.org/csswg/css-color/#hwb-to-rgb\nfunction hwb2rgb(hwb) {\n  var h = hwb[0] / 360,\n      wh = hwb[1] / 100,\n      bl = hwb[2] / 100,\n      ratio = wh + bl,\n      i, v, f, n;\n\n  // wh + bl cant be > 1\n  if (ratio > 1) {\n    wh /= ratio;\n    bl /= ratio;\n  }\n\n  i = Math.floor(6 * h);\n  v = 1 - bl;\n  f = 6 * h - i;\n  if ((i & 0x01) != 0) {\n    f = 1 - f;\n  }\n  n = wh + f * (v - wh);  // linear interpolation\n\n  switch (i) {\n    default:\n    case 6:\n    case 0: r = v; g = n; b = wh; break;\n    case 1: r = n; g = v; b = wh; break;\n    case 2: r = wh; g = v; b = n; break;\n    case 3: r = wh; g = n; b = v; break;\n    case 4: r = n; g = wh; b = v; break;\n    case 5: r = v; g = wh; b = n; break;\n  }\n\n  return [r * 255, g * 255, b * 255];\n}\n\nfunction hwb2hsl(args) {\n  return rgb2hsl(hwb2rgb(args));\n}\n\nfunction hwb2hsv(args) {\n  return rgb2hsv(hwb2rgb(args));\n}\n\nfunction hwb2cmyk(args) {\n  return rgb2cmyk(hwb2rgb(args));\n}\n\nfunction hwb2keyword(args) {\n  return rgb2keyword(hwb2rgb(args));\n}\n\nfunction cmyk2rgb(cmyk) {\n  var c = cmyk[0] / 100,\n      m = cmyk[1] / 100,\n      y = cmyk[2] / 100,\n      k = cmyk[3] / 100,\n      r, g, b;\n\n  r = 1 - Math.min(1, c * (1 - k) + k);\n  g = 1 - Math.min(1, m * (1 - k) + k);\n  b = 1 - Math.min(1, y * (1 - k) + k);\n  return [r * 255, g * 255, b * 255];\n}\n\nfunction cmyk2hsl(args) {\n  return rgb2hsl(cmyk2rgb(args));\n}\n\nfunction cmyk2hsv(args) {\n  return rgb2hsv(cmyk2rgb(args));\n}\n\nfunction cmyk2hwb(args) {\n  return rgb2hwb(cmyk2rgb(args));\n}\n\nfunction cmyk2keyword(args) {\n  return rgb2keyword(cmyk2rgb(args));\n}\n\n\nfunction xyz2rgb(xyz) {\n  var x = xyz[0] / 100,\n      y = xyz[1] / 100,\n      z = xyz[2] / 100,\n      r, g, b;\n\n  r = (x * 3.2406) + (y * -1.5372) + (z * -0.4986);\n  g = (x * -0.9689) + (y * 1.8758) + (z * 0.0415);\n  b = (x * 0.0557) + (y * -0.2040) + (z * 1.0570);\n\n  // assume sRGB\n  r = r > 0.0031308 ? ((1.055 * Math.pow(r, 1.0 / 2.4)) - 0.055)\n    : r = (r * 12.92);\n\n  g = g > 0.0031308 ? ((1.055 * Math.pow(g, 1.0 / 2.4)) - 0.055)\n    : g = (g * 12.92);\n\n  b = b > 0.0031308 ? ((1.055 * Math.pow(b, 1.0 / 2.4)) - 0.055)\n    : b = (b * 12.92);\n\n  r = Math.min(Math.max(0, r), 1);\n  g = Math.min(Math.max(0, g), 1);\n  b = Math.min(Math.max(0, b), 1);\n\n  return [r * 255, g * 255, b * 255];\n}\n\nfunction xyz2lab(xyz) {\n  var x = xyz[0],\n      y = xyz[1],\n      z = xyz[2],\n      l, a, b;\n\n  x /= 95.047;\n  y /= 100;\n  z /= 108.883;\n\n  x = x > 0.008856 ? Math.pow(x, 1/3) : (7.787 * x) + (16 / 116);\n  y = y > 0.008856 ? Math.pow(y, 1/3) : (7.787 * y) + (16 / 116);\n  z = z > 0.008856 ? Math.pow(z, 1/3) : (7.787 * z) + (16 / 116);\n\n  l = (116 * y) - 16;\n  a = 500 * (x - y);\n  b = 200 * (y - z);\n\n  return [l, a, b];\n}\n\nfunction xyz2lch(args) {\n  return lab2lch(xyz2lab(args));\n}\n\nfunction lab2xyz(lab) {\n  var l = lab[0],\n      a = lab[1],\n      b = lab[2],\n      x, y, z, y2;\n\n  if (l <= 8) {\n    y = (l * 100) / 903.3;\n    y2 = (7.787 * (y / 100)) + (16 / 116);\n  } else {\n    y = 100 * Math.pow((l + 16) / 116, 3);\n    y2 = Math.pow(y / 100, 1/3);\n  }\n\n  x = x / 95.047 <= 0.008856 ? x = (95.047 * ((a / 500) + y2 - (16 / 116))) / 7.787 : 95.047 * Math.pow((a / 500) + y2, 3);\n\n  z = z / 108.883 <= 0.008859 ? z = (108.883 * (y2 - (b / 200) - (16 / 116))) / 7.787 : 108.883 * Math.pow(y2 - (b / 200), 3);\n\n  return [x, y, z];\n}\n\nfunction lab2lch(lab) {\n  var l = lab[0],\n      a = lab[1],\n      b = lab[2],\n      hr, h, c;\n\n  hr = Math.atan2(b, a);\n  h = hr * 360 / 2 / Math.PI;\n  if (h < 0) {\n    h += 360;\n  }\n  c = Math.sqrt(a * a + b * b);\n  return [l, c, h];\n}\n\nfunction lab2rgb(args) {\n  return xyz2rgb(lab2xyz(args));\n}\n\nfunction lch2lab(lch) {\n  var l = lch[0],\n      c = lch[1],\n      h = lch[2],\n      a, b, hr;\n\n  hr = h / 360 * 2 * Math.PI;\n  a = c * Math.cos(hr);\n  b = c * Math.sin(hr);\n  return [l, a, b];\n}\n\nfunction lch2xyz(args) {\n  return lab2xyz(lch2lab(args));\n}\n\nfunction lch2rgb(args) {\n  return lab2rgb(lch2lab(args));\n}\n\nfunction keyword2rgb(keyword) {\n  return cssKeywords[keyword];\n}\n\nfunction keyword2hsl(args) {\n  return rgb2hsl(keyword2rgb(args));\n}\n\nfunction keyword2hsv(args) {\n  return rgb2hsv(keyword2rgb(args));\n}\n\nfunction keyword2hwb(args) {\n  return rgb2hwb(keyword2rgb(args));\n}\n\nfunction keyword2cmyk(args) {\n  return rgb2cmyk(keyword2rgb(args));\n}\n\nfunction keyword2lab(args) {\n  return rgb2lab(keyword2rgb(args));\n}\n\nfunction keyword2xyz(args) {\n  return rgb2xyz(keyword2rgb(args));\n}\n\nvar cssKeywords = {\n  aliceblue:  [240,248,255],\n  antiquewhite: [250,235,215],\n  aqua: [0,255,255],\n  aquamarine: [127,255,212],\n  azure:  [240,255,255],\n  beige:  [245,245,220],\n  bisque: [255,228,196],\n  black:  [0,0,0],\n  blanchedalmond: [255,235,205],\n  blue: [0,0,255],\n  blueviolet: [138,43,226],\n  brown:  [165,42,42],\n  burlywood:  [222,184,135],\n  cadetblue:  [95,158,160],\n  chartreuse: [127,255,0],\n  chocolate:  [210,105,30],\n  coral:  [255,127,80],\n  cornflowerblue: [100,149,237],\n  cornsilk: [255,248,220],\n  crimson:  [220,20,60],\n  cyan: [0,255,255],\n  darkblue: [0,0,139],\n  darkcyan: [0,139,139],\n  darkgoldenrod:  [184,134,11],\n  darkgray: [169,169,169],\n  darkgreen:  [0,100,0],\n  darkgrey: [169,169,169],\n  darkkhaki:  [189,183,107],\n  darkmagenta:  [139,0,139],\n  darkolivegreen: [85,107,47],\n  darkorange: [255,140,0],\n  darkorchid: [153,50,204],\n  darkred:  [139,0,0],\n  darksalmon: [233,150,122],\n  darkseagreen: [143,188,143],\n  darkslateblue:  [72,61,139],\n  darkslategray:  [47,79,79],\n  darkslategrey:  [47,79,79],\n  darkturquoise:  [0,206,209],\n  darkviolet: [148,0,211],\n  deeppink: [255,20,147],\n  deepskyblue:  [0,191,255],\n  dimgray:  [105,105,105],\n  dimgrey:  [105,105,105],\n  dodgerblue: [30,144,255],\n  firebrick:  [178,34,34],\n  floralwhite:  [255,250,240],\n  forestgreen:  [34,139,34],\n  fuchsia:  [255,0,255],\n  gainsboro:  [220,220,220],\n  ghostwhite: [248,248,255],\n  gold: [255,215,0],\n  goldenrod:  [218,165,32],\n  gray: [128,128,128],\n  green:  [0,128,0],\n  greenyellow:  [173,255,47],\n  grey: [128,128,128],\n  honeydew: [240,255,240],\n  hotpink:  [255,105,180],\n  indianred:  [205,92,92],\n  indigo: [75,0,130],\n  ivory:  [255,255,240],\n  khaki:  [240,230,140],\n  lavender: [230,230,250],\n  lavenderblush:  [255,240,245],\n  lawngreen:  [124,252,0],\n  lemonchiffon: [255,250,205],\n  lightblue:  [173,216,230],\n  lightcoral: [240,128,128],\n  lightcyan:  [224,255,255],\n  lightgoldenrodyellow: [250,250,210],\n  lightgray:  [211,211,211],\n  lightgreen: [144,238,144],\n  lightgrey:  [211,211,211],\n  lightpink:  [255,182,193],\n  lightsalmon:  [255,160,122],\n  lightseagreen:  [32,178,170],\n  lightskyblue: [135,206,250],\n  lightslategray: [119,136,153],\n  lightslategrey: [119,136,153],\n  lightsteelblue: [176,196,222],\n  lightyellow:  [255,255,224],\n  lime: [0,255,0],\n  limegreen:  [50,205,50],\n  linen:  [250,240,230],\n  magenta:  [255,0,255],\n  maroon: [128,0,0],\n  mediumaquamarine: [102,205,170],\n  mediumblue: [0,0,205],\n  mediumorchid: [186,85,211],\n  mediumpurple: [147,112,219],\n  mediumseagreen: [60,179,113],\n  mediumslateblue:  [123,104,238],\n  mediumspringgreen:  [0,250,154],\n  mediumturquoise:  [72,209,204],\n  mediumvioletred:  [199,21,133],\n  midnightblue: [25,25,112],\n  mintcream:  [245,255,250],\n  mistyrose:  [255,228,225],\n  moccasin: [255,228,181],\n  navajowhite:  [255,222,173],\n  navy: [0,0,128],\n  oldlace:  [253,245,230],\n  olive:  [128,128,0],\n  olivedrab:  [107,142,35],\n  orange: [255,165,0],\n  orangered:  [255,69,0],\n  orchid: [218,112,214],\n  palegoldenrod:  [238,232,170],\n  palegreen:  [152,251,152],\n  paleturquoise:  [175,238,238],\n  palevioletred:  [219,112,147],\n  papayawhip: [255,239,213],\n  peachpuff:  [255,218,185],\n  peru: [205,133,63],\n  pink: [255,192,203],\n  plum: [221,160,221],\n  powderblue: [176,224,230],\n  purple: [128,0,128],\n  rebeccapurple: [102, 51, 153],\n  red:  [255,0,0],\n  rosybrown:  [188,143,143],\n  royalblue:  [65,105,225],\n  saddlebrown:  [139,69,19],\n  salmon: [250,128,114],\n  sandybrown: [244,164,96],\n  seagreen: [46,139,87],\n  seashell: [255,245,238],\n  sienna: [160,82,45],\n  silver: [192,192,192],\n  skyblue:  [135,206,235],\n  slateblue:  [106,90,205],\n  slategray:  [112,128,144],\n  slategrey:  [112,128,144],\n  snow: [255,250,250],\n  springgreen:  [0,255,127],\n  steelblue:  [70,130,180],\n  tan:  [210,180,140],\n  teal: [0,128,128],\n  thistle:  [216,191,216],\n  tomato: [255,99,71],\n  turquoise:  [64,224,208],\n  violet: [238,130,238],\n  wheat:  [245,222,179],\n  white:  [255,255,255],\n  whitesmoke: [245,245,245],\n  yellow: [255,255,0],\n  yellowgreen:  [154,205,50]\n};\n\nvar reverseKeywords = {};\nfor (var key in cssKeywords) {\n  reverseKeywords[JSON.stringify(cssKeywords[key])] = key;\n}\n\n},{}],4:[function(require,module,exports){\nvar conversions = require(3);\n\nvar convert = function() {\n   return new Converter();\n}\n\nfor (var func in conversions) {\n  // export Raw versions\n  convert[func + \"Raw\"] =  (function(func) {\n    // accept array or plain args\n    return function(arg) {\n      if (typeof arg == \"number\")\n        arg = Array.prototype.slice.call(arguments);\n      return conversions[func](arg);\n    }\n  })(func);\n\n  var pair = /(\\w+)2(\\w+)/.exec(func),\n      from = pair[1],\n      to = pair[2];\n\n  // export rgb2hsl and [\"rgb\"][\"hsl\"]\n  convert[from] = convert[from] || {};\n\n  convert[from][to] = convert[func] = (function(func) { \n    return function(arg) {\n      if (typeof arg == \"number\")\n        arg = Array.prototype.slice.call(arguments);\n      \n      var val = conversions[func](arg);\n      if (typeof val == \"string\" || val === undefined)\n        return val; // keyword\n\n      for (var i = 0; i < val.length; i++)\n        val[i] = Math.round(val[i]);\n      return val;\n    }\n  })(func);\n}\n\n\n/* Converter does lazy conversion and caching */\nvar Converter = function() {\n   this.convs = {};\n};\n\n/* Either get the values for a space or\n  set the values for a space, depending on args */\nConverter.prototype.routeSpace = function(space, args) {\n   var values = args[0];\n   if (values === undefined) {\n      // color.rgb()\n      return this.getValues(space);\n   }\n   // color.rgb(10, 10, 10)\n   if (typeof values == \"number\") {\n      values = Array.prototype.slice.call(args);        \n   }\n\n   return this.setValues(space, values);\n};\n  \n/* Set the values for a space, invalidating cache */\nConverter.prototype.setValues = function(space, values) {\n   this.space = space;\n   this.convs = {};\n   this.convs[space] = values;\n   return this;\n};\n\n/* Get the values for a space. If there's already\n  a conversion for the space, fetch it, otherwise\n  compute it */\nConverter.prototype.getValues = function(space) {\n   var vals = this.convs[space];\n   if (!vals) {\n      var fspace = this.space,\n          from = this.convs[fspace];\n      vals = convert[fspace][space](from);\n\n      this.convs[space] = vals;\n   }\n  return vals;\n};\n\n[\"rgb\", \"hsl\", \"hsv\", \"cmyk\", \"keyword\"].forEach(function(space) {\n   Converter.prototype[space] = function(vals) {\n      return this.routeSpace(space, arguments);\n   }\n});\n\nmodule.exports = convert;\n},{\"3\":3}],5:[function(require,module,exports){\n'use strict'\r\n\r\nmodule.exports = {\r\n\t\"aliceblue\": [240, 248, 255],\r\n\t\"antiquewhite\": [250, 235, 215],\r\n\t\"aqua\": [0, 255, 255],\r\n\t\"aquamarine\": [127, 255, 212],\r\n\t\"azure\": [240, 255, 255],\r\n\t\"beige\": [245, 245, 220],\r\n\t\"bisque\": [255, 228, 196],\r\n\t\"black\": [0, 0, 0],\r\n\t\"blanchedalmond\": [255, 235, 205],\r\n\t\"blue\": [0, 0, 255],\r\n\t\"blueviolet\": [138, 43, 226],\r\n\t\"brown\": [165, 42, 42],\r\n\t\"burlywood\": [222, 184, 135],\r\n\t\"cadetblue\": [95, 158, 160],\r\n\t\"chartreuse\": [127, 255, 0],\r\n\t\"chocolate\": [210, 105, 30],\r\n\t\"coral\": [255, 127, 80],\r\n\t\"cornflowerblue\": [100, 149, 237],\r\n\t\"cornsilk\": [255, 248, 220],\r\n\t\"crimson\": [220, 20, 60],\r\n\t\"cyan\": [0, 255, 255],\r\n\t\"darkblue\": [0, 0, 139],\r\n\t\"darkcyan\": [0, 139, 139],\r\n\t\"darkgoldenrod\": [184, 134, 11],\r\n\t\"darkgray\": [169, 169, 169],\r\n\t\"darkgreen\": [0, 100, 0],\r\n\t\"darkgrey\": [169, 169, 169],\r\n\t\"darkkhaki\": [189, 183, 107],\r\n\t\"darkmagenta\": [139, 0, 139],\r\n\t\"darkolivegreen\": [85, 107, 47],\r\n\t\"darkorange\": [255, 140, 0],\r\n\t\"darkorchid\": [153, 50, 204],\r\n\t\"darkred\": [139, 0, 0],\r\n\t\"darksalmon\": [233, 150, 122],\r\n\t\"darkseagreen\": [143, 188, 143],\r\n\t\"darkslateblue\": [72, 61, 139],\r\n\t\"darkslategray\": [47, 79, 79],\r\n\t\"darkslategrey\": [47, 79, 79],\r\n\t\"darkturquoise\": [0, 206, 209],\r\n\t\"darkviolet\": [148, 0, 211],\r\n\t\"deeppink\": [255, 20, 147],\r\n\t\"deepskyblue\": [0, 191, 255],\r\n\t\"dimgray\": [105, 105, 105],\r\n\t\"dimgrey\": [105, 105, 105],\r\n\t\"dodgerblue\": [30, 144, 255],\r\n\t\"firebrick\": [178, 34, 34],\r\n\t\"floralwhite\": [255, 250, 240],\r\n\t\"forestgreen\": [34, 139, 34],\r\n\t\"fuchsia\": [255, 0, 255],\r\n\t\"gainsboro\": [220, 220, 220],\r\n\t\"ghostwhite\": [248, 248, 255],\r\n\t\"gold\": [255, 215, 0],\r\n\t\"goldenrod\": [218, 165, 32],\r\n\t\"gray\": [128, 128, 128],\r\n\t\"green\": [0, 128, 0],\r\n\t\"greenyellow\": [173, 255, 47],\r\n\t\"grey\": [128, 128, 128],\r\n\t\"honeydew\": [240, 255, 240],\r\n\t\"hotpink\": [255, 105, 180],\r\n\t\"indianred\": [205, 92, 92],\r\n\t\"indigo\": [75, 0, 130],\r\n\t\"ivory\": [255, 255, 240],\r\n\t\"khaki\": [240, 230, 140],\r\n\t\"lavender\": [230, 230, 250],\r\n\t\"lavenderblush\": [255, 240, 245],\r\n\t\"lawngreen\": [124, 252, 0],\r\n\t\"lemonchiffon\": [255, 250, 205],\r\n\t\"lightblue\": [173, 216, 230],\r\n\t\"lightcoral\": [240, 128, 128],\r\n\t\"lightcyan\": [224, 255, 255],\r\n\t\"lightgoldenrodyellow\": [250, 250, 210],\r\n\t\"lightgray\": [211, 211, 211],\r\n\t\"lightgreen\": [144, 238, 144],\r\n\t\"lightgrey\": [211, 211, 211],\r\n\t\"lightpink\": [255, 182, 193],\r\n\t\"lightsalmon\": [255, 160, 122],\r\n\t\"lightseagreen\": [32, 178, 170],\r\n\t\"lightskyblue\": [135, 206, 250],\r\n\t\"lightslategray\": [119, 136, 153],\r\n\t\"lightslategrey\": [119, 136, 153],\r\n\t\"lightsteelblue\": [176, 196, 222],\r\n\t\"lightyellow\": [255, 255, 224],\r\n\t\"lime\": [0, 255, 0],\r\n\t\"limegreen\": [50, 205, 50],\r\n\t\"linen\": [250, 240, 230],\r\n\t\"magenta\": [255, 0, 255],\r\n\t\"maroon\": [128, 0, 0],\r\n\t\"mediumaquamarine\": [102, 205, 170],\r\n\t\"mediumblue\": [0, 0, 205],\r\n\t\"mediumorchid\": [186, 85, 211],\r\n\t\"mediumpurple\": [147, 112, 219],\r\n\t\"mediumseagreen\": [60, 179, 113],\r\n\t\"mediumslateblue\": [123, 104, 238],\r\n\t\"mediumspringgreen\": [0, 250, 154],\r\n\t\"mediumturquoise\": [72, 209, 204],\r\n\t\"mediumvioletred\": [199, 21, 133],\r\n\t\"midnightblue\": [25, 25, 112],\r\n\t\"mintcream\": [245, 255, 250],\r\n\t\"mistyrose\": [255, 228, 225],\r\n\t\"moccasin\": [255, 228, 181],\r\n\t\"navajowhite\": [255, 222, 173],\r\n\t\"navy\": [0, 0, 128],\r\n\t\"oldlace\": [253, 245, 230],\r\n\t\"olive\": [128, 128, 0],\r\n\t\"olivedrab\": [107, 142, 35],\r\n\t\"orange\": [255, 165, 0],\r\n\t\"orangered\": [255, 69, 0],\r\n\t\"orchid\": [218, 112, 214],\r\n\t\"palegoldenrod\": [238, 232, 170],\r\n\t\"palegreen\": [152, 251, 152],\r\n\t\"paleturquoise\": [175, 238, 238],\r\n\t\"palevioletred\": [219, 112, 147],\r\n\t\"papayawhip\": [255, 239, 213],\r\n\t\"peachpuff\": [255, 218, 185],\r\n\t\"peru\": [205, 133, 63],\r\n\t\"pink\": [255, 192, 203],\r\n\t\"plum\": [221, 160, 221],\r\n\t\"powderblue\": [176, 224, 230],\r\n\t\"purple\": [128, 0, 128],\r\n\t\"rebeccapurple\": [102, 51, 153],\r\n\t\"red\": [255, 0, 0],\r\n\t\"rosybrown\": [188, 143, 143],\r\n\t\"royalblue\": [65, 105, 225],\r\n\t\"saddlebrown\": [139, 69, 19],\r\n\t\"salmon\": [250, 128, 114],\r\n\t\"sandybrown\": [244, 164, 96],\r\n\t\"seagreen\": [46, 139, 87],\r\n\t\"seashell\": [255, 245, 238],\r\n\t\"sienna\": [160, 82, 45],\r\n\t\"silver\": [192, 192, 192],\r\n\t\"skyblue\": [135, 206, 235],\r\n\t\"slateblue\": [106, 90, 205],\r\n\t\"slategray\": [112, 128, 144],\r\n\t\"slategrey\": [112, 128, 144],\r\n\t\"snow\": [255, 250, 250],\r\n\t\"springgreen\": [0, 255, 127],\r\n\t\"steelblue\": [70, 130, 180],\r\n\t\"tan\": [210, 180, 140],\r\n\t\"teal\": [0, 128, 128],\r\n\t\"thistle\": [216, 191, 216],\r\n\t\"tomato\": [255, 99, 71],\r\n\t\"turquoise\": [64, 224, 208],\r\n\t\"violet\": [238, 130, 238],\r\n\t\"wheat\": [245, 222, 179],\r\n\t\"white\": [255, 255, 255],\r\n\t\"whitesmoke\": [245, 245, 245],\r\n\t\"yellow\": [255, 255, 0],\r\n\t\"yellowgreen\": [154, 205, 50]\r\n};\r\n\n},{}],6:[function(require,module,exports){\n//! moment.js\n//! version : 2.20.1\n//! authors : Tim Wood, Iskren Chernev, Moment.js contributors\n//! license : MIT\n//! momentjs.com\n\n;(function (global, factory) {\n    typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :\n    typeof define === 'function' && define.amd ? define(factory) :\n    global.moment = factory()\n}(this, (function () { 'use strict';\n\nvar hookCallback;\n\nfunction hooks () {\n    return hookCallback.apply(null, arguments);\n}\n\n// This is done to register the method called with moment()\n// without creating circular dependencies.\nfunction setHookCallback (callback) {\n    hookCallback = callback;\n}\n\nfunction isArray(input) {\n    return input instanceof Array || Object.prototype.toString.call(input) === '[object Array]';\n}\n\nfunction isObject(input) {\n    // IE8 will treat undefined and null as object if it wasn't for\n    // input != null\n    return input != null && Object.prototype.toString.call(input) === '[object Object]';\n}\n\nfunction isObjectEmpty(obj) {\n    if (Object.getOwnPropertyNames) {\n        return (Object.getOwnPropertyNames(obj).length === 0);\n    } else {\n        var k;\n        for (k in obj) {\n            if (obj.hasOwnProperty(k)) {\n                return false;\n            }\n        }\n        return true;\n    }\n}\n\nfunction isUndefined(input) {\n    return input === void 0;\n}\n\nfunction isNumber(input) {\n    return typeof input === 'number' || Object.prototype.toString.call(input) === '[object Number]';\n}\n\nfunction isDate(input) {\n    return input instanceof Date || Object.prototype.toString.call(input) === '[object Date]';\n}\n\nfunction map(arr, fn) {\n    var res = [], i;\n    for (i = 0; i < arr.length; ++i) {\n        res.push(fn(arr[i], i));\n    }\n    return res;\n}\n\nfunction hasOwnProp(a, b) {\n    return Object.prototype.hasOwnProperty.call(a, b);\n}\n\nfunction extend(a, b) {\n    for (var i in b) {\n        if (hasOwnProp(b, i)) {\n            a[i] = b[i];\n        }\n    }\n\n    if (hasOwnProp(b, 'toString')) {\n        a.toString = b.toString;\n    }\n\n    if (hasOwnProp(b, 'valueOf')) {\n        a.valueOf = b.valueOf;\n    }\n\n    return a;\n}\n\nfunction createUTC (input, format, locale, strict) {\n    return createLocalOrUTC(input, format, locale, strict, true).utc();\n}\n\nfunction defaultParsingFlags() {\n    // We need to deep clone this object.\n    return {\n        empty           : false,\n        unusedTokens    : [],\n        unusedInput     : [],\n        overflow        : -2,\n        charsLeftOver   : 0,\n        nullInput       : false,\n        invalidMonth    : null,\n        invalidFormat   : false,\n        userInvalidated : false,\n        iso             : false,\n        parsedDateParts : [],\n        meridiem        : null,\n        rfc2822         : false,\n        weekdayMismatch : false\n    };\n}\n\nfunction getParsingFlags(m) {\n    if (m._pf == null) {\n        m._pf = defaultParsingFlags();\n    }\n    return m._pf;\n}\n\nvar some;\nif (Array.prototype.some) {\n    some = Array.prototype.some;\n} else {\n    some = function (fun) {\n        var t = Object(this);\n        var len = t.length >>> 0;\n\n        for (var i = 0; i < len; i++) {\n            if (i in t && fun.call(this, t[i], i, t)) {\n                return true;\n            }\n        }\n\n        return false;\n    };\n}\n\nfunction isValid(m) {\n    if (m._isValid == null) {\n        var flags = getParsingFlags(m);\n        var parsedParts = some.call(flags.parsedDateParts, function (i) {\n            return i != null;\n        });\n        var isNowValid = !isNaN(m._d.getTime()) &&\n            flags.overflow < 0 &&\n            !flags.empty &&\n            !flags.invalidMonth &&\n            !flags.invalidWeekday &&\n            !flags.weekdayMismatch &&\n            !flags.nullInput &&\n            !flags.invalidFormat &&\n            !flags.userInvalidated &&\n            (!flags.meridiem || (flags.meridiem && parsedParts));\n\n        if (m._strict) {\n            isNowValid = isNowValid &&\n                flags.charsLeftOver === 0 &&\n                flags.unusedTokens.length === 0 &&\n                flags.bigHour === undefined;\n        }\n\n        if (Object.isFrozen == null || !Object.isFrozen(m)) {\n            m._isValid = isNowValid;\n        }\n        else {\n            return isNowValid;\n        }\n    }\n    return m._isValid;\n}\n\nfunction createInvalid (flags) {\n    var m = createUTC(NaN);\n    if (flags != null) {\n        extend(getParsingFlags(m), flags);\n    }\n    else {\n        getParsingFlags(m).userInvalidated = true;\n    }\n\n    return m;\n}\n\n// Plugins that add properties should also add the key here (null value),\n// so we can properly clone ourselves.\nvar momentProperties = hooks.momentProperties = [];\n\nfunction copyConfig(to, from) {\n    var i, prop, val;\n\n    if (!isUndefined(from._isAMomentObject)) {\n        to._isAMomentObject = from._isAMomentObject;\n    }\n    if (!isUndefined(from._i)) {\n        to._i = from._i;\n    }\n    if (!isUndefined(from._f)) {\n        to._f = from._f;\n    }\n    if (!isUndefined(from._l)) {\n        to._l = from._l;\n    }\n    if (!isUndefined(from._strict)) {\n        to._strict = from._strict;\n    }\n    if (!isUndefined(from._tzm)) {\n        to._tzm = from._tzm;\n    }\n    if (!isUndefined(from._isUTC)) {\n        to._isUTC = from._isUTC;\n    }\n    if (!isUndefined(from._offset)) {\n        to._offset = from._offset;\n    }\n    if (!isUndefined(from._pf)) {\n        to._pf = getParsingFlags(from);\n    }\n    if (!isUndefined(from._locale)) {\n        to._locale = from._locale;\n    }\n\n    if (momentProperties.length > 0) {\n        for (i = 0; i < momentProperties.length; i++) {\n            prop = momentProperties[i];\n            val = from[prop];\n            if (!isUndefined(val)) {\n                to[prop] = val;\n            }\n        }\n    }\n\n    return to;\n}\n\nvar updateInProgress = false;\n\n// Moment prototype object\nfunction Moment(config) {\n    copyConfig(this, config);\n    this._d = new Date(config._d != null ? config._d.getTime() : NaN);\n    if (!this.isValid()) {\n        this._d = new Date(NaN);\n    }\n    // Prevent infinite loop in case updateOffset creates new moment\n    // objects.\n    if (updateInProgress === false) {\n        updateInProgress = true;\n        hooks.updateOffset(this);\n        updateInProgress = false;\n    }\n}\n\nfunction isMoment (obj) {\n    return obj instanceof Moment || (obj != null && obj._isAMomentObject != null);\n}\n\nfunction absFloor (number) {\n    if (number < 0) {\n        // -0 -> 0\n        return Math.ceil(number) || 0;\n    } else {\n        return Math.floor(number);\n    }\n}\n\nfunction toInt(argumentForCoercion) {\n    var coercedNumber = +argumentForCoercion,\n        value = 0;\n\n    if (coercedNumber !== 0 && isFinite(coercedNumber)) {\n        value = absFloor(coercedNumber);\n    }\n\n    return value;\n}\n\n// compare two arrays, return the number of differences\nfunction compareArrays(array1, array2, dontConvert) {\n    var len = Math.min(array1.length, array2.length),\n        lengthDiff = Math.abs(array1.length - array2.length),\n        diffs = 0,\n        i;\n    for (i = 0; i < len; i++) {\n        if ((dontConvert && array1[i] !== array2[i]) ||\n            (!dontConvert && toInt(array1[i]) !== toInt(array2[i]))) {\n            diffs++;\n        }\n    }\n    return diffs + lengthDiff;\n}\n\nfunction warn(msg) {\n    if (hooks.suppressDeprecationWarnings === false &&\n            (typeof console !==  'undefined') && console.warn) {\n        console.warn('Deprecation warning: ' + msg);\n    }\n}\n\nfunction deprecate(msg, fn) {\n    var firstTime = true;\n\n    return extend(function () {\n        if (hooks.deprecationHandler != null) {\n            hooks.deprecationHandler(null, msg);\n        }\n        if (firstTime) {\n            var args = [];\n            var arg;\n            for (var i = 0; i < arguments.length; i++) {\n                arg = '';\n                if (typeof arguments[i] === 'object') {\n                    arg += '\\n[' + i + '] ';\n                    for (var key in arguments[0]) {\n                        arg += key + ': ' + arguments[0][key] + ', ';\n                    }\n                    arg = arg.slice(0, -2); // Remove trailing comma and space\n                } else {\n                    arg = arguments[i];\n                }\n                args.push(arg);\n            }\n            warn(msg + '\\nArguments: ' + Array.prototype.slice.call(args).join('') + '\\n' + (new Error()).stack);\n            firstTime = false;\n        }\n        return fn.apply(this, arguments);\n    }, fn);\n}\n\nvar deprecations = {};\n\nfunction deprecateSimple(name, msg) {\n    if (hooks.deprecationHandler != null) {\n        hooks.deprecationHandler(name, msg);\n    }\n    if (!deprecations[name]) {\n        warn(msg);\n        deprecations[name] = true;\n    }\n}\n\nhooks.suppressDeprecationWarnings = false;\nhooks.deprecationHandler = null;\n\nfunction isFunction(input) {\n    return input instanceof Function || Object.prototype.toString.call(input) === '[object Function]';\n}\n\nfunction set (config) {\n    var prop, i;\n    for (i in config) {\n        prop = config[i];\n        if (isFunction(prop)) {\n            this[i] = prop;\n        } else {\n            this['_' + i] = prop;\n        }\n    }\n    this._config = config;\n    // Lenient ordinal parsing accepts just a number in addition to\n    // number + (possibly) stuff coming from _dayOfMonthOrdinalParse.\n    // TODO: Remove \"ordinalParse\" fallback in next major release.\n    this._dayOfMonthOrdinalParseLenient = new RegExp(\n        (this._dayOfMonthOrdinalParse.source || this._ordinalParse.source) +\n            '|' + (/\\d{1,2}/).source);\n}\n\nfunction mergeConfigs(parentConfig, childConfig) {\n    var res = extend({}, parentConfig), prop;\n    for (prop in childConfig) {\n        if (hasOwnProp(childConfig, prop)) {\n            if (isObject(parentConfig[prop]) && isObject(childConfig[prop])) {\n                res[prop] = {};\n                extend(res[prop], parentConfig[prop]);\n                extend(res[prop], childConfig[prop]);\n            } else if (childConfig[prop] != null) {\n                res[prop] = childConfig[prop];\n            } else {\n                delete res[prop];\n            }\n        }\n    }\n    for (prop in parentConfig) {\n        if (hasOwnProp(parentConfig, prop) &&\n                !hasOwnProp(childConfig, prop) &&\n                isObject(parentConfig[prop])) {\n            // make sure changes to properties don't modify parent config\n            res[prop] = extend({}, res[prop]);\n        }\n    }\n    return res;\n}\n\nfunction Locale(config) {\n    if (config != null) {\n        this.set(config);\n    }\n}\n\nvar keys;\n\nif (Object.keys) {\n    keys = Object.keys;\n} else {\n    keys = function (obj) {\n        var i, res = [];\n        for (i in obj) {\n            if (hasOwnProp(obj, i)) {\n                res.push(i);\n            }\n        }\n        return res;\n    };\n}\n\nvar defaultCalendar = {\n    sameDay : '[Today at] LT',\n    nextDay : '[Tomorrow at] LT',\n    nextWeek : 'dddd [at] LT',\n    lastDay : '[Yesterday at] LT',\n    lastWeek : '[Last] dddd [at] LT',\n    sameElse : 'L'\n};\n\nfunction calendar (key, mom, now) {\n    var output = this._calendar[key] || this._calendar['sameElse'];\n    return isFunction(output) ? output.call(mom, now) : output;\n}\n\nvar defaultLongDateFormat = {\n    LTS  : 'h:mm:ss A',\n    LT   : 'h:mm A',\n    L    : 'MM/DD/YYYY',\n    LL   : 'MMMM D, YYYY',\n    LLL  : 'MMMM D, YYYY h:mm A',\n    LLLL : 'dddd, MMMM D, YYYY h:mm A'\n};\n\nfunction longDateFormat (key) {\n    var format = this._longDateFormat[key],\n        formatUpper = this._longDateFormat[key.toUpperCase()];\n\n    if (format || !formatUpper) {\n        return format;\n    }\n\n    this._longDateFormat[key] = formatUpper.replace(/MMMM|MM|DD|dddd/g, function (val) {\n        return val.slice(1);\n    });\n\n    return this._longDateFormat[key];\n}\n\nvar defaultInvalidDate = 'Invalid date';\n\nfunction invalidDate () {\n    return this._invalidDate;\n}\n\nvar defaultOrdinal = '%d';\nvar defaultDayOfMonthOrdinalParse = /\\d{1,2}/;\n\nfunction ordinal (number) {\n    return this._ordinal.replace('%d', number);\n}\n\nvar defaultRelativeTime = {\n    future : 'in %s',\n    past   : '%s ago',\n    s  : 'a few seconds',\n    ss : '%d seconds',\n    m  : 'a minute',\n    mm : '%d minutes',\n    h  : 'an hour',\n    hh : '%d hours',\n    d  : 'a day',\n    dd : '%d days',\n    M  : 'a month',\n    MM : '%d months',\n    y  : 'a year',\n    yy : '%d years'\n};\n\nfunction relativeTime (number, withoutSuffix, string, isFuture) {\n    var output = this._relativeTime[string];\n    return (isFunction(output)) ?\n        output(number, withoutSuffix, string, isFuture) :\n        output.replace(/%d/i, number);\n}\n\nfunction pastFuture (diff, output) {\n    var format = this._relativeTime[diff > 0 ? 'future' : 'past'];\n    return isFunction(format) ? format(output) : format.replace(/%s/i, output);\n}\n\nvar aliases = {};\n\nfunction addUnitAlias (unit, shorthand) {\n    var lowerCase = unit.toLowerCase();\n    aliases[lowerCase] = aliases[lowerCase + 's'] = aliases[shorthand] = unit;\n}\n\nfunction normalizeUnits(units) {\n    return typeof units === 'string' ? aliases[units] || aliases[units.toLowerCase()] : undefined;\n}\n\nfunction normalizeObjectUnits(inputObject) {\n    var normalizedInput = {},\n        normalizedProp,\n        prop;\n\n    for (prop in inputObject) {\n        if (hasOwnProp(inputObject, prop)) {\n            normalizedProp = normalizeUnits(prop);\n            if (normalizedProp) {\n                normalizedInput[normalizedProp] = inputObject[prop];\n            }\n        }\n    }\n\n    return normalizedInput;\n}\n\nvar priorities = {};\n\nfunction addUnitPriority(unit, priority) {\n    priorities[unit] = priority;\n}\n\nfunction getPrioritizedUnits(unitsObj) {\n    var units = [];\n    for (var u in unitsObj) {\n        units.push({unit: u, priority: priorities[u]});\n    }\n    units.sort(function (a, b) {\n        return a.priority - b.priority;\n    });\n    return units;\n}\n\nfunction zeroFill(number, targetLength, forceSign) {\n    var absNumber = '' + Math.abs(number),\n        zerosToFill = targetLength - absNumber.length,\n        sign = number >= 0;\n    return (sign ? (forceSign ? '+' : '') : '-') +\n        Math.pow(10, Math.max(0, zerosToFill)).toString().substr(1) + absNumber;\n}\n\nvar formattingTokens = /(\\[[^\\[]*\\])|(\\\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g;\n\nvar localFormattingTokens = /(\\[[^\\[]*\\])|(\\\\)?(LTS|LT|LL?L?L?|l{1,4})/g;\n\nvar formatFunctions = {};\n\nvar formatTokenFunctions = {};\n\n// token:    'M'\n// padded:   ['MM', 2]\n// ordinal:  'Mo'\n// callback: function () { this.month() + 1 }\nfunction addFormatToken (token, padded, ordinal, callback) {\n    var func = callback;\n    if (typeof callback === 'string') {\n        func = function () {\n            return this[callback]();\n        };\n    }\n    if (token) {\n        formatTokenFunctions[token] = func;\n    }\n    if (padded) {\n        formatTokenFunctions[padded[0]] = function () {\n            return zeroFill(func.apply(this, arguments), padded[1], padded[2]);\n        };\n    }\n    if (ordinal) {\n        formatTokenFunctions[ordinal] = function () {\n            return this.localeData().ordinal(func.apply(this, arguments), token);\n        };\n    }\n}\n\nfunction removeFormattingTokens(input) {\n    if (input.match(/\\[[\\s\\S]/)) {\n        return input.replace(/^\\[|\\]$/g, '');\n    }\n    return input.replace(/\\\\/g, '');\n}\n\nfunction makeFormatFunction(format) {\n    var array = format.match(formattingTokens), i, length;\n\n    for (i = 0, length = array.length; i < length; i++) {\n        if (formatTokenFunctions[array[i]]) {\n            array[i] = formatTokenFunctions[array[i]];\n        } else {\n            array[i] = removeFormattingTokens(array[i]);\n        }\n    }\n\n    return function (mom) {\n        var output = '', i;\n        for (i = 0; i < length; i++) {\n            output += isFunction(array[i]) ? array[i].call(mom, format) : array[i];\n        }\n        return output;\n    };\n}\n\n// format date using native date object\nfunction formatMoment(m, format) {\n    if (!m.isValid()) {\n        return m.localeData().invalidDate();\n    }\n\n    format = expandFormat(format, m.localeData());\n    formatFunctions[format] = formatFunctions[format] || makeFormatFunction(format);\n\n    return formatFunctions[format](m);\n}\n\nfunction expandFormat(format, locale) {\n    var i = 5;\n\n    function replaceLongDateFormatTokens(input) {\n        return locale.longDateFormat(input) || input;\n    }\n\n    localFormattingTokens.lastIndex = 0;\n    while (i >= 0 && localFormattingTokens.test(format)) {\n        format = format.replace(localFormattingTokens, replaceLongDateFormatTokens);\n        localFormattingTokens.lastIndex = 0;\n        i -= 1;\n    }\n\n    return format;\n}\n\nvar match1         = /\\d/;            //       0 - 9\nvar match2         = /\\d\\d/;          //      00 - 99\nvar match3         = /\\d{3}/;         //     000 - 999\nvar match4         = /\\d{4}/;         //    0000 - 9999\nvar match6         = /[+-]?\\d{6}/;    // -999999 - 999999\nvar match1to2      = /\\d\\d?/;         //       0 - 99\nvar match3to4      = /\\d\\d\\d\\d?/;     //     999 - 9999\nvar match5to6      = /\\d\\d\\d\\d\\d\\d?/; //   99999 - 999999\nvar match1to3      = /\\d{1,3}/;       //       0 - 999\nvar match1to4      = /\\d{1,4}/;       //       0 - 9999\nvar match1to6      = /[+-]?\\d{1,6}/;  // -999999 - 999999\n\nvar matchUnsigned  = /\\d+/;           //       0 - inf\nvar matchSigned    = /[+-]?\\d+/;      //    -inf - inf\n\nvar matchOffset    = /Z|[+-]\\d\\d:?\\d\\d/gi; // +00:00 -00:00 +0000 -0000 or Z\nvar matchShortOffset = /Z|[+-]\\d\\d(?::?\\d\\d)?/gi; // +00 -00 +00:00 -00:00 +0000 -0000 or Z\n\nvar matchTimestamp = /[+-]?\\d+(\\.\\d{1,3})?/; // 123456789 123456789.123\n\n// any word (or two) characters or numbers including two/three word month in arabic.\n// includes scottish gaelic two word and hyphenated months\nvar matchWord = /[0-9]{0,256}['a-z\\u00A0-\\u05FF\\u0700-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFF07\\uFF10-\\uFFEF]{1,256}|[\\u0600-\\u06FF\\/]{1,256}(\\s*?[\\u0600-\\u06FF]{1,256}){1,2}/i;\n\n\nvar regexes = {};\n\nfunction addRegexToken (token, regex, strictRegex) {\n    regexes[token] = isFunction(regex) ? regex : function (isStrict, localeData) {\n        return (isStrict && strictRegex) ? strictRegex : regex;\n    };\n}\n\nfunction getParseRegexForToken (token, config) {\n    if (!hasOwnProp(regexes, token)) {\n        return new RegExp(unescapeFormat(token));\n    }\n\n    return regexes[token](config._strict, config._locale);\n}\n\n// Code from http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript\nfunction unescapeFormat(s) {\n    return regexEscape(s.replace('\\\\', '').replace(/\\\\(\\[)|\\\\(\\])|\\[([^\\]\\[]*)\\]|\\\\(.)/g, function (matched, p1, p2, p3, p4) {\n        return p1 || p2 || p3 || p4;\n    }));\n}\n\nfunction regexEscape(s) {\n    return s.replace(/[-\\/\\\\^$*+?.()|[\\]{}]/g, '\\\\$&');\n}\n\nvar tokens = {};\n\nfunction addParseToken (token, callback) {\n    var i, func = callback;\n    if (typeof token === 'string') {\n        token = [token];\n    }\n    if (isNumber(callback)) {\n        func = function (input, array) {\n            array[callback] = toInt(input);\n        };\n    }\n    for (i = 0; i < token.length; i++) {\n        tokens[token[i]] = func;\n    }\n}\n\nfunction addWeekParseToken (token, callback) {\n    addParseToken(token, function (input, array, config, token) {\n        config._w = config._w || {};\n        callback(input, config._w, config, token);\n    });\n}\n\nfunction addTimeToArrayFromToken(token, input, config) {\n    if (input != null && hasOwnProp(tokens, token)) {\n        tokens[token](input, config._a, config, token);\n    }\n}\n\nvar YEAR = 0;\nvar MONTH = 1;\nvar DATE = 2;\nvar HOUR = 3;\nvar MINUTE = 4;\nvar SECOND = 5;\nvar MILLISECOND = 6;\nvar WEEK = 7;\nvar WEEKDAY = 8;\n\n// FORMATTING\n\naddFormatToken('Y', 0, 0, function () {\n    var y = this.year();\n    return y <= 9999 ? '' + y : '+' + y;\n});\n\naddFormatToken(0, ['YY', 2], 0, function () {\n    return this.year() % 100;\n});\n\naddFormatToken(0, ['YYYY',   4],       0, 'year');\naddFormatToken(0, ['YYYYY',  5],       0, 'year');\naddFormatToken(0, ['YYYYYY', 6, true], 0, 'year');\n\n// ALIASES\n\naddUnitAlias('year', 'y');\n\n// PRIORITIES\n\naddUnitPriority('year', 1);\n\n// PARSING\n\naddRegexToken('Y',      matchSigned);\naddRegexToken('YY',     match1to2, match2);\naddRegexToken('YYYY',   match1to4, match4);\naddRegexToken('YYYYY',  match1to6, match6);\naddRegexToken('YYYYYY', match1to6, match6);\n\naddParseToken(['YYYYY', 'YYYYYY'], YEAR);\naddParseToken('YYYY', function (input, array) {\n    array[YEAR] = input.length === 2 ? hooks.parseTwoDigitYear(input) : toInt(input);\n});\naddParseToken('YY', function (input, array) {\n    array[YEAR] = hooks.parseTwoDigitYear(input);\n});\naddParseToken('Y', function (input, array) {\n    array[YEAR] = parseInt(input, 10);\n});\n\n// HELPERS\n\nfunction daysInYear(year) {\n    return isLeapYear(year) ? 366 : 365;\n}\n\nfunction isLeapYear(year) {\n    return (year % 4 === 0 && year % 100 !== 0) || year % 400 === 0;\n}\n\n// HOOKS\n\nhooks.parseTwoDigitYear = function (input) {\n    return toInt(input) + (toInt(input) > 68 ? 1900 : 2000);\n};\n\n// MOMENTS\n\nvar getSetYear = makeGetSet('FullYear', true);\n\nfunction getIsLeapYear () {\n    return isLeapYear(this.year());\n}\n\nfunction makeGetSet (unit, keepTime) {\n    return function (value) {\n        if (value != null) {\n            set$1(this, unit, value);\n            hooks.updateOffset(this, keepTime);\n            return this;\n        } else {\n            return get(this, unit);\n        }\n    };\n}\n\nfunction get (mom, unit) {\n    return mom.isValid() ?\n        mom._d['get' + (mom._isUTC ? 'UTC' : '') + unit]() : NaN;\n}\n\nfunction set$1 (mom, unit, value) {\n    if (mom.isValid() && !isNaN(value)) {\n        if (unit === 'FullYear' && isLeapYear(mom.year()) && mom.month() === 1 && mom.date() === 29) {\n            mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value, mom.month(), daysInMonth(value, mom.month()));\n        }\n        else {\n            mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value);\n        }\n    }\n}\n\n// MOMENTS\n\nfunction stringGet (units) {\n    units = normalizeUnits(units);\n    if (isFunction(this[units])) {\n        return this[units]();\n    }\n    return this;\n}\n\n\nfunction stringSet (units, value) {\n    if (typeof units === 'object') {\n        units = normalizeObjectUnits(units);\n        var prioritized = getPrioritizedUnits(units);\n        for (var i = 0; i < prioritized.length; i++) {\n            this[prioritized[i].unit](units[prioritized[i].unit]);\n        }\n    } else {\n        units = normalizeUnits(units);\n        if (isFunction(this[units])) {\n            return this[units](value);\n        }\n    }\n    return this;\n}\n\nfunction mod(n, x) {\n    return ((n % x) + x) % x;\n}\n\nvar indexOf;\n\nif (Array.prototype.indexOf) {\n    indexOf = Array.prototype.indexOf;\n} else {\n    indexOf = function (o) {\n        // I know\n        var i;\n        for (i = 0; i < this.length; ++i) {\n            if (this[i] === o) {\n                return i;\n            }\n        }\n        return -1;\n    };\n}\n\nfunction daysInMonth(year, month) {\n    if (isNaN(year) || isNaN(month)) {\n        return NaN;\n    }\n    var modMonth = mod(month, 12);\n    year += (month - modMonth) / 12;\n    return modMonth === 1 ? (isLeapYear(year) ? 29 : 28) : (31 - modMonth % 7 % 2);\n}\n\n// FORMATTING\n\naddFormatToken('M', ['MM', 2], 'Mo', function () {\n    return this.month() + 1;\n});\n\naddFormatToken('MMM', 0, 0, function (format) {\n    return this.localeData().monthsShort(this, format);\n});\n\naddFormatToken('MMMM', 0, 0, function (format) {\n    return this.localeData().months(this, format);\n});\n\n// ALIASES\n\naddUnitAlias('month', 'M');\n\n// PRIORITY\n\naddUnitPriority('month', 8);\n\n// PARSING\n\naddRegexToken('M',    match1to2);\naddRegexToken('MM',   match1to2, match2);\naddRegexToken('MMM',  function (isStrict, locale) {\n    return locale.monthsShortRegex(isStrict);\n});\naddRegexToken('MMMM', function (isStrict, locale) {\n    return locale.monthsRegex(isStrict);\n});\n\naddParseToken(['M', 'MM'], function (input, array) {\n    array[MONTH] = toInt(input) - 1;\n});\n\naddParseToken(['MMM', 'MMMM'], function (input, array, config, token) {\n    var month = config._locale.monthsParse(input, token, config._strict);\n    // if we didn't find a month name, mark the date as invalid.\n    if (month != null) {\n        array[MONTH] = month;\n    } else {\n        getParsingFlags(config).invalidMonth = input;\n    }\n});\n\n// LOCALES\n\nvar MONTHS_IN_FORMAT = /D[oD]?(\\[[^\\[\\]]*\\]|\\s)+MMMM?/;\nvar defaultLocaleMonths = 'January_February_March_April_May_June_July_August_September_October_November_December'.split('_');\nfunction localeMonths (m, format) {\n    if (!m) {\n        return isArray(this._months) ? this._months :\n            this._months['standalone'];\n    }\n    return isArray(this._months) ? this._months[m.month()] :\n        this._months[(this._months.isFormat || MONTHS_IN_FORMAT).test(format) ? 'format' : 'standalone'][m.month()];\n}\n\nvar defaultLocaleMonthsShort = 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_');\nfunction localeMonthsShort (m, format) {\n    if (!m) {\n        return isArray(this._monthsShort) ? this._monthsShort :\n            this._monthsShort['standalone'];\n    }\n    return isArray(this._monthsShort) ? this._monthsShort[m.month()] :\n        this._monthsShort[MONTHS_IN_FORMAT.test(format) ? 'format' : 'standalone'][m.month()];\n}\n\nfunction handleStrictParse(monthName, format, strict) {\n    var i, ii, mom, llc = monthName.toLocaleLowerCase();\n    if (!this._monthsParse) {\n        // this is not used\n        this._monthsParse = [];\n        this._longMonthsParse = [];\n        this._shortMonthsParse = [];\n        for (i = 0; i < 12; ++i) {\n            mom = createUTC([2000, i]);\n            this._shortMonthsParse[i] = this.monthsShort(mom, '').toLocaleLowerCase();\n            this._longMonthsParse[i] = this.months(mom, '').toLocaleLowerCase();\n        }\n    }\n\n    if (strict) {\n        if (format === 'MMM') {\n            ii = indexOf.call(this._shortMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf.call(this._longMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    } else {\n        if (format === 'MMM') {\n            ii = indexOf.call(this._shortMonthsParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._longMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf.call(this._longMonthsParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._shortMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    }\n}\n\nfunction localeMonthsParse (monthName, format, strict) {\n    var i, mom, regex;\n\n    if (this._monthsParseExact) {\n        return handleStrictParse.call(this, monthName, format, strict);\n    }\n\n    if (!this._monthsParse) {\n        this._monthsParse = [];\n        this._longMonthsParse = [];\n        this._shortMonthsParse = [];\n    }\n\n    // TODO: add sorting\n    // Sorting makes sure if one month (or abbr) is a prefix of another\n    // see sorting in computeMonthsParse\n    for (i = 0; i < 12; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, i]);\n        if (strict && !this._longMonthsParse[i]) {\n            this._longMonthsParse[i] = new RegExp('^' + this.months(mom, '').replace('.', '') + '$', 'i');\n            this._shortMonthsParse[i] = new RegExp('^' + this.monthsShort(mom, '').replace('.', '') + '$', 'i');\n        }\n        if (!strict && !this._monthsParse[i]) {\n            regex = '^' + this.months(mom, '') + '|^' + this.monthsShort(mom, '');\n            this._monthsParse[i] = new RegExp(regex.replace('.', ''), 'i');\n        }\n        // test the regex\n        if (strict && format === 'MMMM' && this._longMonthsParse[i].test(monthName)) {\n            return i;\n        } else if (strict && format === 'MMM' && this._shortMonthsParse[i].test(monthName)) {\n            return i;\n        } else if (!strict && this._monthsParse[i].test(monthName)) {\n            return i;\n        }\n    }\n}\n\n// MOMENTS\n\nfunction setMonth (mom, value) {\n    var dayOfMonth;\n\n    if (!mom.isValid()) {\n        // No op\n        return mom;\n    }\n\n    if (typeof value === 'string') {\n        if (/^\\d+$/.test(value)) {\n            value = toInt(value);\n        } else {\n            value = mom.localeData().monthsParse(value);\n            // TODO: Another silent failure?\n            if (!isNumber(value)) {\n                return mom;\n            }\n        }\n    }\n\n    dayOfMonth = Math.min(mom.date(), daysInMonth(mom.year(), value));\n    mom._d['set' + (mom._isUTC ? 'UTC' : '') + 'Month'](value, dayOfMonth);\n    return mom;\n}\n\nfunction getSetMonth (value) {\n    if (value != null) {\n        setMonth(this, value);\n        hooks.updateOffset(this, true);\n        return this;\n    } else {\n        return get(this, 'Month');\n    }\n}\n\nfunction getDaysInMonth () {\n    return daysInMonth(this.year(), this.month());\n}\n\nvar defaultMonthsShortRegex = matchWord;\nfunction monthsShortRegex (isStrict) {\n    if (this._monthsParseExact) {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            computeMonthsParse.call(this);\n        }\n        if (isStrict) {\n            return this._monthsShortStrictRegex;\n        } else {\n            return this._monthsShortRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_monthsShortRegex')) {\n            this._monthsShortRegex = defaultMonthsShortRegex;\n        }\n        return this._monthsShortStrictRegex && isStrict ?\n            this._monthsShortStrictRegex : this._monthsShortRegex;\n    }\n}\n\nvar defaultMonthsRegex = matchWord;\nfunction monthsRegex (isStrict) {\n    if (this._monthsParseExact) {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            computeMonthsParse.call(this);\n        }\n        if (isStrict) {\n            return this._monthsStrictRegex;\n        } else {\n            return this._monthsRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            this._monthsRegex = defaultMonthsRegex;\n        }\n        return this._monthsStrictRegex && isStrict ?\n            this._monthsStrictRegex : this._monthsRegex;\n    }\n}\n\nfunction computeMonthsParse () {\n    function cmpLenRev(a, b) {\n        return b.length - a.length;\n    }\n\n    var shortPieces = [], longPieces = [], mixedPieces = [],\n        i, mom;\n    for (i = 0; i < 12; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, i]);\n        shortPieces.push(this.monthsShort(mom, ''));\n        longPieces.push(this.months(mom, ''));\n        mixedPieces.push(this.months(mom, ''));\n        mixedPieces.push(this.monthsShort(mom, ''));\n    }\n    // Sorting makes sure if one month (or abbr) is a prefix of another it\n    // will match the longer piece.\n    shortPieces.sort(cmpLenRev);\n    longPieces.sort(cmpLenRev);\n    mixedPieces.sort(cmpLenRev);\n    for (i = 0; i < 12; i++) {\n        shortPieces[i] = regexEscape(shortPieces[i]);\n        longPieces[i] = regexEscape(longPieces[i]);\n    }\n    for (i = 0; i < 24; i++) {\n        mixedPieces[i] = regexEscape(mixedPieces[i]);\n    }\n\n    this._monthsRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');\n    this._monthsShortRegex = this._monthsRegex;\n    this._monthsStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');\n    this._monthsShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');\n}\n\nfunction createDate (y, m, d, h, M, s, ms) {\n    // can't just apply() to create a date:\n    // https://stackoverflow.com/q/181348\n    var date = new Date(y, m, d, h, M, s, ms);\n\n    // the date constructor remaps years 0-99 to 1900-1999\n    if (y < 100 && y >= 0 && isFinite(date.getFullYear())) {\n        date.setFullYear(y);\n    }\n    return date;\n}\n\nfunction createUTCDate (y) {\n    var date = new Date(Date.UTC.apply(null, arguments));\n\n    // the Date.UTC function remaps years 0-99 to 1900-1999\n    if (y < 100 && y >= 0 && isFinite(date.getUTCFullYear())) {\n        date.setUTCFullYear(y);\n    }\n    return date;\n}\n\n// start-of-first-week - start-of-year\nfunction firstWeekOffset(year, dow, doy) {\n    var // first-week day -- which january is always in the first week (4 for iso, 1 for other)\n        fwd = 7 + dow - doy,\n        // first-week day local weekday -- which local weekday is fwd\n        fwdlw = (7 + createUTCDate(year, 0, fwd).getUTCDay() - dow) % 7;\n\n    return -fwdlw + fwd - 1;\n}\n\n// https://en.wikipedia.org/wiki/ISO_week_date#Calculating_a_date_given_the_year.2C_week_number_and_weekday\nfunction dayOfYearFromWeeks(year, week, weekday, dow, doy) {\n    var localWeekday = (7 + weekday - dow) % 7,\n        weekOffset = firstWeekOffset(year, dow, doy),\n        dayOfYear = 1 + 7 * (week - 1) + localWeekday + weekOffset,\n        resYear, resDayOfYear;\n\n    if (dayOfYear <= 0) {\n        resYear = year - 1;\n        resDayOfYear = daysInYear(resYear) + dayOfYear;\n    } else if (dayOfYear > daysInYear(year)) {\n        resYear = year + 1;\n        resDayOfYear = dayOfYear - daysInYear(year);\n    } else {\n        resYear = year;\n        resDayOfYear = dayOfYear;\n    }\n\n    return {\n        year: resYear,\n        dayOfYear: resDayOfYear\n    };\n}\n\nfunction weekOfYear(mom, dow, doy) {\n    var weekOffset = firstWeekOffset(mom.year(), dow, doy),\n        week = Math.floor((mom.dayOfYear() - weekOffset - 1) / 7) + 1,\n        resWeek, resYear;\n\n    if (week < 1) {\n        resYear = mom.year() - 1;\n        resWeek = week + weeksInYear(resYear, dow, doy);\n    } else if (week > weeksInYear(mom.year(), dow, doy)) {\n        resWeek = week - weeksInYear(mom.year(), dow, doy);\n        resYear = mom.year() + 1;\n    } else {\n        resYear = mom.year();\n        resWeek = week;\n    }\n\n    return {\n        week: resWeek,\n        year: resYear\n    };\n}\n\nfunction weeksInYear(year, dow, doy) {\n    var weekOffset = firstWeekOffset(year, dow, doy),\n        weekOffsetNext = firstWeekOffset(year + 1, dow, doy);\n    return (daysInYear(year) - weekOffset + weekOffsetNext) / 7;\n}\n\n// FORMATTING\n\naddFormatToken('w', ['ww', 2], 'wo', 'week');\naddFormatToken('W', ['WW', 2], 'Wo', 'isoWeek');\n\n// ALIASES\n\naddUnitAlias('week', 'w');\naddUnitAlias('isoWeek', 'W');\n\n// PRIORITIES\n\naddUnitPriority('week', 5);\naddUnitPriority('isoWeek', 5);\n\n// PARSING\n\naddRegexToken('w',  match1to2);\naddRegexToken('ww', match1to2, match2);\naddRegexToken('W',  match1to2);\naddRegexToken('WW', match1to2, match2);\n\naddWeekParseToken(['w', 'ww', 'W', 'WW'], function (input, week, config, token) {\n    week[token.substr(0, 1)] = toInt(input);\n});\n\n// HELPERS\n\n// LOCALES\n\nfunction localeWeek (mom) {\n    return weekOfYear(mom, this._week.dow, this._week.doy).week;\n}\n\nvar defaultLocaleWeek = {\n    dow : 0, // Sunday is the first day of the week.\n    doy : 6  // The week that contains Jan 1st is the first week of the year.\n};\n\nfunction localeFirstDayOfWeek () {\n    return this._week.dow;\n}\n\nfunction localeFirstDayOfYear () {\n    return this._week.doy;\n}\n\n// MOMENTS\n\nfunction getSetWeek (input) {\n    var week = this.localeData().week(this);\n    return input == null ? week : this.add((input - week) * 7, 'd');\n}\n\nfunction getSetISOWeek (input) {\n    var week = weekOfYear(this, 1, 4).week;\n    return input == null ? week : this.add((input - week) * 7, 'd');\n}\n\n// FORMATTING\n\naddFormatToken('d', 0, 'do', 'day');\n\naddFormatToken('dd', 0, 0, function (format) {\n    return this.localeData().weekdaysMin(this, format);\n});\n\naddFormatToken('ddd', 0, 0, function (format) {\n    return this.localeData().weekdaysShort(this, format);\n});\n\naddFormatToken('dddd', 0, 0, function (format) {\n    return this.localeData().weekdays(this, format);\n});\n\naddFormatToken('e', 0, 0, 'weekday');\naddFormatToken('E', 0, 0, 'isoWeekday');\n\n// ALIASES\n\naddUnitAlias('day', 'd');\naddUnitAlias('weekday', 'e');\naddUnitAlias('isoWeekday', 'E');\n\n// PRIORITY\naddUnitPriority('day', 11);\naddUnitPriority('weekday', 11);\naddUnitPriority('isoWeekday', 11);\n\n// PARSING\n\naddRegexToken('d',    match1to2);\naddRegexToken('e',    match1to2);\naddRegexToken('E',    match1to2);\naddRegexToken('dd',   function (isStrict, locale) {\n    return locale.weekdaysMinRegex(isStrict);\n});\naddRegexToken('ddd',   function (isStrict, locale) {\n    return locale.weekdaysShortRegex(isStrict);\n});\naddRegexToken('dddd',   function (isStrict, locale) {\n    return locale.weekdaysRegex(isStrict);\n});\n\naddWeekParseToken(['dd', 'ddd', 'dddd'], function (input, week, config, token) {\n    var weekday = config._locale.weekdaysParse(input, token, config._strict);\n    // if we didn't get a weekday name, mark the date as invalid\n    if (weekday != null) {\n        week.d = weekday;\n    } else {\n        getParsingFlags(config).invalidWeekday = input;\n    }\n});\n\naddWeekParseToken(['d', 'e', 'E'], function (input, week, config, token) {\n    week[token] = toInt(input);\n});\n\n// HELPERS\n\nfunction parseWeekday(input, locale) {\n    if (typeof input !== 'string') {\n        return input;\n    }\n\n    if (!isNaN(input)) {\n        return parseInt(input, 10);\n    }\n\n    input = locale.weekdaysParse(input);\n    if (typeof input === 'number') {\n        return input;\n    }\n\n    return null;\n}\n\nfunction parseIsoWeekday(input, locale) {\n    if (typeof input === 'string') {\n        return locale.weekdaysParse(input) % 7 || 7;\n    }\n    return isNaN(input) ? null : input;\n}\n\n// LOCALES\n\nvar defaultLocaleWeekdays = 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_');\nfunction localeWeekdays (m, format) {\n    if (!m) {\n        return isArray(this._weekdays) ? this._weekdays :\n            this._weekdays['standalone'];\n    }\n    return isArray(this._weekdays) ? this._weekdays[m.day()] :\n        this._weekdays[this._weekdays.isFormat.test(format) ? 'format' : 'standalone'][m.day()];\n}\n\nvar defaultLocaleWeekdaysShort = 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_');\nfunction localeWeekdaysShort (m) {\n    return (m) ? this._weekdaysShort[m.day()] : this._weekdaysShort;\n}\n\nvar defaultLocaleWeekdaysMin = 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_');\nfunction localeWeekdaysMin (m) {\n    return (m) ? this._weekdaysMin[m.day()] : this._weekdaysMin;\n}\n\nfunction handleStrictParse$1(weekdayName, format, strict) {\n    var i, ii, mom, llc = weekdayName.toLocaleLowerCase();\n    if (!this._weekdaysParse) {\n        this._weekdaysParse = [];\n        this._shortWeekdaysParse = [];\n        this._minWeekdaysParse = [];\n\n        for (i = 0; i < 7; ++i) {\n            mom = createUTC([2000, 1]).day(i);\n            this._minWeekdaysParse[i] = this.weekdaysMin(mom, '').toLocaleLowerCase();\n            this._shortWeekdaysParse[i] = this.weekdaysShort(mom, '').toLocaleLowerCase();\n            this._weekdaysParse[i] = this.weekdays(mom, '').toLocaleLowerCase();\n        }\n    }\n\n    if (strict) {\n        if (format === 'dddd') {\n            ii = indexOf.call(this._weekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else if (format === 'ddd') {\n            ii = indexOf.call(this._shortWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    } else {\n        if (format === 'dddd') {\n            ii = indexOf.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._shortWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else if (format === 'ddd') {\n            ii = indexOf.call(this._shortWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf.call(this._minWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf.call(this._shortWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    }\n}\n\nfunction localeWeekdaysParse (weekdayName, format, strict) {\n    var i, mom, regex;\n\n    if (this._weekdaysParseExact) {\n        return handleStrictParse$1.call(this, weekdayName, format, strict);\n    }\n\n    if (!this._weekdaysParse) {\n        this._weekdaysParse = [];\n        this._minWeekdaysParse = [];\n        this._shortWeekdaysParse = [];\n        this._fullWeekdaysParse = [];\n    }\n\n    for (i = 0; i < 7; i++) {\n        // make the regex if we don't have it already\n\n        mom = createUTC([2000, 1]).day(i);\n        if (strict && !this._fullWeekdaysParse[i]) {\n            this._fullWeekdaysParse[i] = new RegExp('^' + this.weekdays(mom, '').replace('.', '\\.?') + '$', 'i');\n            this._shortWeekdaysParse[i] = new RegExp('^' + this.weekdaysShort(mom, '').replace('.', '\\.?') + '$', 'i');\n            this._minWeekdaysParse[i] = new RegExp('^' + this.weekdaysMin(mom, '').replace('.', '\\.?') + '$', 'i');\n        }\n        if (!this._weekdaysParse[i]) {\n            regex = '^' + this.weekdays(mom, '') + '|^' + this.weekdaysShort(mom, '') + '|^' + this.weekdaysMin(mom, '');\n            this._weekdaysParse[i] = new RegExp(regex.replace('.', ''), 'i');\n        }\n        // test the regex\n        if (strict && format === 'dddd' && this._fullWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (strict && format === 'ddd' && this._shortWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (strict && format === 'dd' && this._minWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (!strict && this._weekdaysParse[i].test(weekdayName)) {\n            return i;\n        }\n    }\n}\n\n// MOMENTS\n\nfunction getSetDayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    var day = this._isUTC ? this._d.getUTCDay() : this._d.getDay();\n    if (input != null) {\n        input = parseWeekday(input, this.localeData());\n        return this.add(input - day, 'd');\n    } else {\n        return day;\n    }\n}\n\nfunction getSetLocaleDayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    var weekday = (this.day() + 7 - this.localeData()._week.dow) % 7;\n    return input == null ? weekday : this.add(input - weekday, 'd');\n}\n\nfunction getSetISODayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n\n    // behaves the same as moment#day except\n    // as a getter, returns 7 instead of 0 (1-7 range instead of 0-6)\n    // as a setter, sunday should belong to the previous week.\n\n    if (input != null) {\n        var weekday = parseIsoWeekday(input, this.localeData());\n        return this.day(this.day() % 7 ? weekday : weekday - 7);\n    } else {\n        return this.day() || 7;\n    }\n}\n\nvar defaultWeekdaysRegex = matchWord;\nfunction weekdaysRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysStrictRegex;\n        } else {\n            return this._weekdaysRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            this._weekdaysRegex = defaultWeekdaysRegex;\n        }\n        return this._weekdaysStrictRegex && isStrict ?\n            this._weekdaysStrictRegex : this._weekdaysRegex;\n    }\n}\n\nvar defaultWeekdaysShortRegex = matchWord;\nfunction weekdaysShortRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysShortStrictRegex;\n        } else {\n            return this._weekdaysShortRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysShortRegex')) {\n            this._weekdaysShortRegex = defaultWeekdaysShortRegex;\n        }\n        return this._weekdaysShortStrictRegex && isStrict ?\n            this._weekdaysShortStrictRegex : this._weekdaysShortRegex;\n    }\n}\n\nvar defaultWeekdaysMinRegex = matchWord;\nfunction weekdaysMinRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysMinStrictRegex;\n        } else {\n            return this._weekdaysMinRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysMinRegex')) {\n            this._weekdaysMinRegex = defaultWeekdaysMinRegex;\n        }\n        return this._weekdaysMinStrictRegex && isStrict ?\n            this._weekdaysMinStrictRegex : this._weekdaysMinRegex;\n    }\n}\n\n\nfunction computeWeekdaysParse () {\n    function cmpLenRev(a, b) {\n        return b.length - a.length;\n    }\n\n    var minPieces = [], shortPieces = [], longPieces = [], mixedPieces = [],\n        i, mom, minp, shortp, longp;\n    for (i = 0; i < 7; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, 1]).day(i);\n        minp = this.weekdaysMin(mom, '');\n        shortp = this.weekdaysShort(mom, '');\n        longp = this.weekdays(mom, '');\n        minPieces.push(minp);\n        shortPieces.push(shortp);\n        longPieces.push(longp);\n        mixedPieces.push(minp);\n        mixedPieces.push(shortp);\n        mixedPieces.push(longp);\n    }\n    // Sorting makes sure if one weekday (or abbr) is a prefix of another it\n    // will match the longer piece.\n    minPieces.sort(cmpLenRev);\n    shortPieces.sort(cmpLenRev);\n    longPieces.sort(cmpLenRev);\n    mixedPieces.sort(cmpLenRev);\n    for (i = 0; i < 7; i++) {\n        shortPieces[i] = regexEscape(shortPieces[i]);\n        longPieces[i] = regexEscape(longPieces[i]);\n        mixedPieces[i] = regexEscape(mixedPieces[i]);\n    }\n\n    this._weekdaysRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');\n    this._weekdaysShortRegex = this._weekdaysRegex;\n    this._weekdaysMinRegex = this._weekdaysRegex;\n\n    this._weekdaysStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');\n    this._weekdaysShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');\n    this._weekdaysMinStrictRegex = new RegExp('^(' + minPieces.join('|') + ')', 'i');\n}\n\n// FORMATTING\n\nfunction hFormat() {\n    return this.hours() % 12 || 12;\n}\n\nfunction kFormat() {\n    return this.hours() || 24;\n}\n\naddFormatToken('H', ['HH', 2], 0, 'hour');\naddFormatToken('h', ['hh', 2], 0, hFormat);\naddFormatToken('k', ['kk', 2], 0, kFormat);\n\naddFormatToken('hmm', 0, 0, function () {\n    return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2);\n});\n\naddFormatToken('hmmss', 0, 0, function () {\n    return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2) +\n        zeroFill(this.seconds(), 2);\n});\n\naddFormatToken('Hmm', 0, 0, function () {\n    return '' + this.hours() + zeroFill(this.minutes(), 2);\n});\n\naddFormatToken('Hmmss', 0, 0, function () {\n    return '' + this.hours() + zeroFill(this.minutes(), 2) +\n        zeroFill(this.seconds(), 2);\n});\n\nfunction meridiem (token, lowercase) {\n    addFormatToken(token, 0, 0, function () {\n        return this.localeData().meridiem(this.hours(), this.minutes(), lowercase);\n    });\n}\n\nmeridiem('a', true);\nmeridiem('A', false);\n\n// ALIASES\n\naddUnitAlias('hour', 'h');\n\n// PRIORITY\naddUnitPriority('hour', 13);\n\n// PARSING\n\nfunction matchMeridiem (isStrict, locale) {\n    return locale._meridiemParse;\n}\n\naddRegexToken('a',  matchMeridiem);\naddRegexToken('A',  matchMeridiem);\naddRegexToken('H',  match1to2);\naddRegexToken('h',  match1to2);\naddRegexToken('k',  match1to2);\naddRegexToken('HH', match1to2, match2);\naddRegexToken('hh', match1to2, match2);\naddRegexToken('kk', match1to2, match2);\n\naddRegexToken('hmm', match3to4);\naddRegexToken('hmmss', match5to6);\naddRegexToken('Hmm', match3to4);\naddRegexToken('Hmmss', match5to6);\n\naddParseToken(['H', 'HH'], HOUR);\naddParseToken(['k', 'kk'], function (input, array, config) {\n    var kInput = toInt(input);\n    array[HOUR] = kInput === 24 ? 0 : kInput;\n});\naddParseToken(['a', 'A'], function (input, array, config) {\n    config._isPm = config._locale.isPM(input);\n    config._meridiem = input;\n});\naddParseToken(['h', 'hh'], function (input, array, config) {\n    array[HOUR] = toInt(input);\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('hmm', function (input, array, config) {\n    var pos = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos));\n    array[MINUTE] = toInt(input.substr(pos));\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('hmmss', function (input, array, config) {\n    var pos1 = input.length - 4;\n    var pos2 = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos1));\n    array[MINUTE] = toInt(input.substr(pos1, 2));\n    array[SECOND] = toInt(input.substr(pos2));\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('Hmm', function (input, array, config) {\n    var pos = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos));\n    array[MINUTE] = toInt(input.substr(pos));\n});\naddParseToken('Hmmss', function (input, array, config) {\n    var pos1 = input.length - 4;\n    var pos2 = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos1));\n    array[MINUTE] = toInt(input.substr(pos1, 2));\n    array[SECOND] = toInt(input.substr(pos2));\n});\n\n// LOCALES\n\nfunction localeIsPM (input) {\n    // IE8 Quirks Mode & IE7 Standards Mode do not allow accessing strings like arrays\n    // Using charAt should be more compatible.\n    return ((input + '').toLowerCase().charAt(0) === 'p');\n}\n\nvar defaultLocaleMeridiemParse = /[ap]\\.?m?\\.?/i;\nfunction localeMeridiem (hours, minutes, isLower) {\n    if (hours > 11) {\n        return isLower ? 'pm' : 'PM';\n    } else {\n        return isLower ? 'am' : 'AM';\n    }\n}\n\n\n// MOMENTS\n\n// Setting the hour should keep the time, because the user explicitly\n// specified which hour he wants. So trying to maintain the same hour (in\n// a new timezone) makes sense. Adding/subtracting hours does not follow\n// this rule.\nvar getSetHour = makeGetSet('Hours', true);\n\n// months\n// week\n// weekdays\n// meridiem\nvar baseConfig = {\n    calendar: defaultCalendar,\n    longDateFormat: defaultLongDateFormat,\n    invalidDate: defaultInvalidDate,\n    ordinal: defaultOrdinal,\n    dayOfMonthOrdinalParse: defaultDayOfMonthOrdinalParse,\n    relativeTime: defaultRelativeTime,\n\n    months: defaultLocaleMonths,\n    monthsShort: defaultLocaleMonthsShort,\n\n    week: defaultLocaleWeek,\n\n    weekdays: defaultLocaleWeekdays,\n    weekdaysMin: defaultLocaleWeekdaysMin,\n    weekdaysShort: defaultLocaleWeekdaysShort,\n\n    meridiemParse: defaultLocaleMeridiemParse\n};\n\n// internal storage for locale config files\nvar locales = {};\nvar localeFamilies = {};\nvar globalLocale;\n\nfunction normalizeLocale(key) {\n    return key ? key.toLowerCase().replace('_', '-') : key;\n}\n\n// pick the locale from the array\n// try ['en-au', 'en-gb'] as 'en-au', 'en-gb', 'en', as in move through the list trying each\n// substring from most specific to least, but move to the next array item if it's a more specific variant than the current root\nfunction chooseLocale(names) {\n    var i = 0, j, next, locale, split;\n\n    while (i < names.length) {\n        split = normalizeLocale(names[i]).split('-');\n        j = split.length;\n        next = normalizeLocale(names[i + 1]);\n        next = next ? next.split('-') : null;\n        while (j > 0) {\n            locale = loadLocale(split.slice(0, j).join('-'));\n            if (locale) {\n                return locale;\n            }\n            if (next && next.length >= j && compareArrays(split, next, true) >= j - 1) {\n                //the next array item is better than a shallower substring of this one\n                break;\n            }\n            j--;\n        }\n        i++;\n    }\n    return null;\n}\n\nfunction loadLocale(name) {\n    var oldLocale = null;\n    // TODO: Find a better way to register and load all the locales in Node\n    if (!locales[name] && (typeof module !== 'undefined') &&\n            module && module.exports) {\n        try {\n            oldLocale = globalLocale._abbr;\n            var aliasedRequire = require;\n            aliasedRequire('./locale/' + name);\n            getSetGlobalLocale(oldLocale);\n        } catch (e) {}\n    }\n    return locales[name];\n}\n\n// This function will load locale and then set the global locale.  If\n// no arguments are passed in, it will simply return the current global\n// locale key.\nfunction getSetGlobalLocale (key, values) {\n    var data;\n    if (key) {\n        if (isUndefined(values)) {\n            data = getLocale(key);\n        }\n        else {\n            data = defineLocale(key, values);\n        }\n\n        if (data) {\n            // moment.duration._locale = moment._locale = data;\n            globalLocale = data;\n        }\n    }\n\n    return globalLocale._abbr;\n}\n\nfunction defineLocale (name, config) {\n    if (config !== null) {\n        var parentConfig = baseConfig;\n        config.abbr = name;\n        if (locales[name] != null) {\n            deprecateSimple('defineLocaleOverride',\n                    'use moment.updateLocale(localeName, config) to change ' +\n                    'an existing locale. moment.defineLocale(localeName, ' +\n                    'config) should only be used for creating a new locale ' +\n                    'See http://momentjs.com/guides/#/warnings/define-locale/ for more info.');\n            parentConfig = locales[name]._config;\n        } else if (config.parentLocale != null) {\n            if (locales[config.parentLocale] != null) {\n                parentConfig = locales[config.parentLocale]._config;\n            } else {\n                if (!localeFamilies[config.parentLocale]) {\n                    localeFamilies[config.parentLocale] = [];\n                }\n                localeFamilies[config.parentLocale].push({\n                    name: name,\n                    config: config\n                });\n                return null;\n            }\n        }\n        locales[name] = new Locale(mergeConfigs(parentConfig, config));\n\n        if (localeFamilies[name]) {\n            localeFamilies[name].forEach(function (x) {\n                defineLocale(x.name, x.config);\n            });\n        }\n\n        // backwards compat for now: also set the locale\n        // make sure we set the locale AFTER all child locales have been\n        // created, so we won't end up with the child locale set.\n        getSetGlobalLocale(name);\n\n\n        return locales[name];\n    } else {\n        // useful for testing\n        delete locales[name];\n        return null;\n    }\n}\n\nfunction updateLocale(name, config) {\n    if (config != null) {\n        var locale, tmpLocale, parentConfig = baseConfig;\n        // MERGE\n        tmpLocale = loadLocale(name);\n        if (tmpLocale != null) {\n            parentConfig = tmpLocale._config;\n        }\n        config = mergeConfigs(parentConfig, config);\n        locale = new Locale(config);\n        locale.parentLocale = locales[name];\n        locales[name] = locale;\n\n        // backwards compat for now: also set the locale\n        getSetGlobalLocale(name);\n    } else {\n        // pass null for config to unupdate, useful for tests\n        if (locales[name] != null) {\n            if (locales[name].parentLocale != null) {\n                locales[name] = locales[name].parentLocale;\n            } else if (locales[name] != null) {\n                delete locales[name];\n            }\n        }\n    }\n    return locales[name];\n}\n\n// returns locale data\nfunction getLocale (key) {\n    var locale;\n\n    if (key && key._locale && key._locale._abbr) {\n        key = key._locale._abbr;\n    }\n\n    if (!key) {\n        return globalLocale;\n    }\n\n    if (!isArray(key)) {\n        //short-circuit everything else\n        locale = loadLocale(key);\n        if (locale) {\n            return locale;\n        }\n        key = [key];\n    }\n\n    return chooseLocale(key);\n}\n\nfunction listLocales() {\n    return keys(locales);\n}\n\nfunction checkOverflow (m) {\n    var overflow;\n    var a = m._a;\n\n    if (a && getParsingFlags(m).overflow === -2) {\n        overflow =\n            a[MONTH]       < 0 || a[MONTH]       > 11  ? MONTH :\n            a[DATE]        < 1 || a[DATE]        > daysInMonth(a[YEAR], a[MONTH]) ? DATE :\n            a[HOUR]        < 0 || a[HOUR]        > 24 || (a[HOUR] === 24 && (a[MINUTE] !== 0 || a[SECOND] !== 0 || a[MILLISECOND] !== 0)) ? HOUR :\n            a[MINUTE]      < 0 || a[MINUTE]      > 59  ? MINUTE :\n            a[SECOND]      < 0 || a[SECOND]      > 59  ? SECOND :\n            a[MILLISECOND] < 0 || a[MILLISECOND] > 999 ? MILLISECOND :\n            -1;\n\n        if (getParsingFlags(m)._overflowDayOfYear && (overflow < YEAR || overflow > DATE)) {\n            overflow = DATE;\n        }\n        if (getParsingFlags(m)._overflowWeeks && overflow === -1) {\n            overflow = WEEK;\n        }\n        if (getParsingFlags(m)._overflowWeekday && overflow === -1) {\n            overflow = WEEKDAY;\n        }\n\n        getParsingFlags(m).overflow = overflow;\n    }\n\n    return m;\n}\n\n// Pick the first defined of two or three arguments.\nfunction defaults(a, b, c) {\n    if (a != null) {\n        return a;\n    }\n    if (b != null) {\n        return b;\n    }\n    return c;\n}\n\nfunction currentDateArray(config) {\n    // hooks is actually the exported moment object\n    var nowValue = new Date(hooks.now());\n    if (config._useUTC) {\n        return [nowValue.getUTCFullYear(), nowValue.getUTCMonth(), nowValue.getUTCDate()];\n    }\n    return [nowValue.getFullYear(), nowValue.getMonth(), nowValue.getDate()];\n}\n\n// convert an array to a date.\n// the array should mirror the parameters below\n// note: all values past the year are optional and will default to the lowest possible value.\n// [year, month, day , hour, minute, second, millisecond]\nfunction configFromArray (config) {\n    var i, date, input = [], currentDate, expectedWeekday, yearToUse;\n\n    if (config._d) {\n        return;\n    }\n\n    currentDate = currentDateArray(config);\n\n    //compute day of the year from weeks and weekdays\n    if (config._w && config._a[DATE] == null && config._a[MONTH] == null) {\n        dayOfYearFromWeekInfo(config);\n    }\n\n    //if the day of the year is set, figure out what it is\n    if (config._dayOfYear != null) {\n        yearToUse = defaults(config._a[YEAR], currentDate[YEAR]);\n\n        if (config._dayOfYear > daysInYear(yearToUse) || config._dayOfYear === 0) {\n            getParsingFlags(config)._overflowDayOfYear = true;\n        }\n\n        date = createUTCDate(yearToUse, 0, config._dayOfYear);\n        config._a[MONTH] = date.getUTCMonth();\n        config._a[DATE] = date.getUTCDate();\n    }\n\n    // Default to current date.\n    // * if no year, month, day of month are given, default to today\n    // * if day of month is given, default month and year\n    // * if month is given, default only year\n    // * if year is given, don't default anything\n    for (i = 0; i < 3 && config._a[i] == null; ++i) {\n        config._a[i] = input[i] = currentDate[i];\n    }\n\n    // Zero out whatever was not defaulted, including time\n    for (; i < 7; i++) {\n        config._a[i] = input[i] = (config._a[i] == null) ? (i === 2 ? 1 : 0) : config._a[i];\n    }\n\n    // Check for 24:00:00.000\n    if (config._a[HOUR] === 24 &&\n            config._a[MINUTE] === 0 &&\n            config._a[SECOND] === 0 &&\n            config._a[MILLISECOND] === 0) {\n        config._nextDay = true;\n        config._a[HOUR] = 0;\n    }\n\n    config._d = (config._useUTC ? createUTCDate : createDate).apply(null, input);\n    expectedWeekday = config._useUTC ? config._d.getUTCDay() : config._d.getDay();\n\n    // Apply timezone offset from input. The actual utcOffset can be changed\n    // with parseZone.\n    if (config._tzm != null) {\n        config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);\n    }\n\n    if (config._nextDay) {\n        config._a[HOUR] = 24;\n    }\n\n    // check for mismatching day of week\n    if (config._w && typeof config._w.d !== 'undefined' && config._w.d !== expectedWeekday) {\n        getParsingFlags(config).weekdayMismatch = true;\n    }\n}\n\nfunction dayOfYearFromWeekInfo(config) {\n    var w, weekYear, week, weekday, dow, doy, temp, weekdayOverflow;\n\n    w = config._w;\n    if (w.GG != null || w.W != null || w.E != null) {\n        dow = 1;\n        doy = 4;\n\n        // TODO: We need to take the current isoWeekYear, but that depends on\n        // how we interpret now (local, utc, fixed offset). So create\n        // a now version of current config (take local/utc/offset flags, and\n        // create now).\n        weekYear = defaults(w.GG, config._a[YEAR], weekOfYear(createLocal(), 1, 4).year);\n        week = defaults(w.W, 1);\n        weekday = defaults(w.E, 1);\n        if (weekday < 1 || weekday > 7) {\n            weekdayOverflow = true;\n        }\n    } else {\n        dow = config._locale._week.dow;\n        doy = config._locale._week.doy;\n\n        var curWeek = weekOfYear(createLocal(), dow, doy);\n\n        weekYear = defaults(w.gg, config._a[YEAR], curWeek.year);\n\n        // Default to current week.\n        week = defaults(w.w, curWeek.week);\n\n        if (w.d != null) {\n            // weekday -- low day numbers are considered next week\n            weekday = w.d;\n            if (weekday < 0 || weekday > 6) {\n                weekdayOverflow = true;\n            }\n        } else if (w.e != null) {\n            // local weekday -- counting starts from begining of week\n            weekday = w.e + dow;\n            if (w.e < 0 || w.e > 6) {\n                weekdayOverflow = true;\n            }\n        } else {\n            // default to begining of week\n            weekday = dow;\n        }\n    }\n    if (week < 1 || week > weeksInYear(weekYear, dow, doy)) {\n        getParsingFlags(config)._overflowWeeks = true;\n    } else if (weekdayOverflow != null) {\n        getParsingFlags(config)._overflowWeekday = true;\n    } else {\n        temp = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy);\n        config._a[YEAR] = temp.year;\n        config._dayOfYear = temp.dayOfYear;\n    }\n}\n\n// iso 8601 regex\n// 0000-00-00 0000-W00 or 0000-W00-0 + T + 00 or 00:00 or 00:00:00 or 00:00:00.000 + +00:00 or +0000 or +00)\nvar extendedIsoRegex = /^\\s*((?:[+-]\\d{6}|\\d{4})-(?:\\d\\d-\\d\\d|W\\d\\d-\\d|W\\d\\d|\\d\\d\\d|\\d\\d))(?:(T| )(\\d\\d(?::\\d\\d(?::\\d\\d(?:[.,]\\d+)?)?)?)([\\+\\-]\\d\\d(?::?\\d\\d)?|\\s*Z)?)?$/;\nvar basicIsoRegex = /^\\s*((?:[+-]\\d{6}|\\d{4})(?:\\d\\d\\d\\d|W\\d\\d\\d|W\\d\\d|\\d\\d\\d|\\d\\d))(?:(T| )(\\d\\d(?:\\d\\d(?:\\d\\d(?:[.,]\\d+)?)?)?)([\\+\\-]\\d\\d(?::?\\d\\d)?|\\s*Z)?)?$/;\n\nvar tzRegex = /Z|[+-]\\d\\d(?::?\\d\\d)?/;\n\nvar isoDates = [\n    ['YYYYYY-MM-DD', /[+-]\\d{6}-\\d\\d-\\d\\d/],\n    ['YYYY-MM-DD', /\\d{4}-\\d\\d-\\d\\d/],\n    ['GGGG-[W]WW-E', /\\d{4}-W\\d\\d-\\d/],\n    ['GGGG-[W]WW', /\\d{4}-W\\d\\d/, false],\n    ['YYYY-DDD', /\\d{4}-\\d{3}/],\n    ['YYYY-MM', /\\d{4}-\\d\\d/, false],\n    ['YYYYYYMMDD', /[+-]\\d{10}/],\n    ['YYYYMMDD', /\\d{8}/],\n    // YYYYMM is NOT allowed by the standard\n    ['GGGG[W]WWE', /\\d{4}W\\d{3}/],\n    ['GGGG[W]WW', /\\d{4}W\\d{2}/, false],\n    ['YYYYDDD', /\\d{7}/]\n];\n\n// iso time formats and regexes\nvar isoTimes = [\n    ['HH:mm:ss.SSSS', /\\d\\d:\\d\\d:\\d\\d\\.\\d+/],\n    ['HH:mm:ss,SSSS', /\\d\\d:\\d\\d:\\d\\d,\\d+/],\n    ['HH:mm:ss', /\\d\\d:\\d\\d:\\d\\d/],\n    ['HH:mm', /\\d\\d:\\d\\d/],\n    ['HHmmss.SSSS', /\\d\\d\\d\\d\\d\\d\\.\\d+/],\n    ['HHmmss,SSSS', /\\d\\d\\d\\d\\d\\d,\\d+/],\n    ['HHmmss', /\\d\\d\\d\\d\\d\\d/],\n    ['HHmm', /\\d\\d\\d\\d/],\n    ['HH', /\\d\\d/]\n];\n\nvar aspNetJsonRegex = /^\\/?Date\\((\\-?\\d+)/i;\n\n// date from iso format\nfunction configFromISO(config) {\n    var i, l,\n        string = config._i,\n        match = extendedIsoRegex.exec(string) || basicIsoRegex.exec(string),\n        allowTime, dateFormat, timeFormat, tzFormat;\n\n    if (match) {\n        getParsingFlags(config).iso = true;\n\n        for (i = 0, l = isoDates.length; i < l; i++) {\n            if (isoDates[i][1].exec(match[1])) {\n                dateFormat = isoDates[i][0];\n                allowTime = isoDates[i][2] !== false;\n                break;\n            }\n        }\n        if (dateFormat == null) {\n            config._isValid = false;\n            return;\n        }\n        if (match[3]) {\n            for (i = 0, l = isoTimes.length; i < l; i++) {\n                if (isoTimes[i][1].exec(match[3])) {\n                    // match[2] should be 'T' or space\n                    timeFormat = (match[2] || ' ') + isoTimes[i][0];\n                    break;\n                }\n            }\n            if (timeFormat == null) {\n                config._isValid = false;\n                return;\n            }\n        }\n        if (!allowTime && timeFormat != null) {\n            config._isValid = false;\n            return;\n        }\n        if (match[4]) {\n            if (tzRegex.exec(match[4])) {\n                tzFormat = 'Z';\n            } else {\n                config._isValid = false;\n                return;\n            }\n        }\n        config._f = dateFormat + (timeFormat || '') + (tzFormat || '');\n        configFromStringAndFormat(config);\n    } else {\n        config._isValid = false;\n    }\n}\n\n// RFC 2822 regex: For details see https://tools.ietf.org/html/rfc2822#section-3.3\nvar rfc2822 = /^(?:(Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\\s)?(\\d{1,2})\\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\\s(\\d{2,4})\\s(\\d\\d):(\\d\\d)(?::(\\d\\d))?\\s(?:(UT|GMT|[ECMP][SD]T)|([Zz])|([+-]\\d{4}))$/;\n\nfunction extractFromRFC2822Strings(yearStr, monthStr, dayStr, hourStr, minuteStr, secondStr) {\n    var result = [\n        untruncateYear(yearStr),\n        defaultLocaleMonthsShort.indexOf(monthStr),\n        parseInt(dayStr, 10),\n        parseInt(hourStr, 10),\n        parseInt(minuteStr, 10)\n    ];\n\n    if (secondStr) {\n        result.push(parseInt(secondStr, 10));\n    }\n\n    return result;\n}\n\nfunction untruncateYear(yearStr) {\n    var year = parseInt(yearStr, 10);\n    if (year <= 49) {\n        return 2000 + year;\n    } else if (year <= 999) {\n        return 1900 + year;\n    }\n    return year;\n}\n\nfunction preprocessRFC2822(s) {\n    // Remove comments and folding whitespace and replace multiple-spaces with a single space\n    return s.replace(/\\([^)]*\\)|[\\n\\t]/g, ' ').replace(/(\\s\\s+)/g, ' ').trim();\n}\n\nfunction checkWeekday(weekdayStr, parsedInput, config) {\n    if (weekdayStr) {\n        // TODO: Replace the vanilla JS Date object with an indepentent day-of-week check.\n        var weekdayProvided = defaultLocaleWeekdaysShort.indexOf(weekdayStr),\n            weekdayActual = new Date(parsedInput[0], parsedInput[1], parsedInput[2]).getDay();\n        if (weekdayProvided !== weekdayActual) {\n            getParsingFlags(config).weekdayMismatch = true;\n            config._isValid = false;\n            return false;\n        }\n    }\n    return true;\n}\n\nvar obsOffsets = {\n    UT: 0,\n    GMT: 0,\n    EDT: -4 * 60,\n    EST: -5 * 60,\n    CDT: -5 * 60,\n    CST: -6 * 60,\n    MDT: -6 * 60,\n    MST: -7 * 60,\n    PDT: -7 * 60,\n    PST: -8 * 60\n};\n\nfunction calculateOffset(obsOffset, militaryOffset, numOffset) {\n    if (obsOffset) {\n        return obsOffsets[obsOffset];\n    } else if (militaryOffset) {\n        // the only allowed military tz is Z\n        return 0;\n    } else {\n        var hm = parseInt(numOffset, 10);\n        var m = hm % 100, h = (hm - m) / 100;\n        return h * 60 + m;\n    }\n}\n\n// date and time from ref 2822 format\nfunction configFromRFC2822(config) {\n    var match = rfc2822.exec(preprocessRFC2822(config._i));\n    if (match) {\n        var parsedArray = extractFromRFC2822Strings(match[4], match[3], match[2], match[5], match[6], match[7]);\n        if (!checkWeekday(match[1], parsedArray, config)) {\n            return;\n        }\n\n        config._a = parsedArray;\n        config._tzm = calculateOffset(match[8], match[9], match[10]);\n\n        config._d = createUTCDate.apply(null, config._a);\n        config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);\n\n        getParsingFlags(config).rfc2822 = true;\n    } else {\n        config._isValid = false;\n    }\n}\n\n// date from iso format or fallback\nfunction configFromString(config) {\n    var matched = aspNetJsonRegex.exec(config._i);\n\n    if (matched !== null) {\n        config._d = new Date(+matched[1]);\n        return;\n    }\n\n    configFromISO(config);\n    if (config._isValid === false) {\n        delete config._isValid;\n    } else {\n        return;\n    }\n\n    configFromRFC2822(config);\n    if (config._isValid === false) {\n        delete config._isValid;\n    } else {\n        return;\n    }\n\n    // Final attempt, use Input Fallback\n    hooks.createFromInputFallback(config);\n}\n\nhooks.createFromInputFallback = deprecate(\n    'value provided is not in a recognized RFC2822 or ISO format. moment construction falls back to js Date(), ' +\n    'which is not reliable across all browsers and versions. Non RFC2822/ISO date formats are ' +\n    'discouraged and will be removed in an upcoming major release. Please refer to ' +\n    'http://momentjs.com/guides/#/warnings/js-date/ for more info.',\n    function (config) {\n        config._d = new Date(config._i + (config._useUTC ? ' UTC' : ''));\n    }\n);\n\n// constant that refers to the ISO standard\nhooks.ISO_8601 = function () {};\n\n// constant that refers to the RFC 2822 form\nhooks.RFC_2822 = function () {};\n\n// date from string and format string\nfunction configFromStringAndFormat(config) {\n    // TODO: Move this to another part of the creation flow to prevent circular deps\n    if (config._f === hooks.ISO_8601) {\n        configFromISO(config);\n        return;\n    }\n    if (config._f === hooks.RFC_2822) {\n        configFromRFC2822(config);\n        return;\n    }\n    config._a = [];\n    getParsingFlags(config).empty = true;\n\n    // This array is used to make a Date, either with `new Date` or `Date.UTC`\n    var string = '' + config._i,\n        i, parsedInput, tokens, token, skipped,\n        stringLength = string.length,\n        totalParsedInputLength = 0;\n\n    tokens = expandFormat(config._f, config._locale).match(formattingTokens) || [];\n\n    for (i = 0; i < tokens.length; i++) {\n        token = tokens[i];\n        parsedInput = (string.match(getParseRegexForToken(token, config)) || [])[0];\n        // console.log('token', token, 'parsedInput', parsedInput,\n        //         'regex', getParseRegexForToken(token, config));\n        if (parsedInput) {\n            skipped = string.substr(0, string.indexOf(parsedInput));\n            if (skipped.length > 0) {\n                getParsingFlags(config).unusedInput.push(skipped);\n            }\n            string = string.slice(string.indexOf(parsedInput) + parsedInput.length);\n            totalParsedInputLength += parsedInput.length;\n        }\n        // don't parse if it's not a known token\n        if (formatTokenFunctions[token]) {\n            if (parsedInput) {\n                getParsingFlags(config).empty = false;\n            }\n            else {\n                getParsingFlags(config).unusedTokens.push(token);\n            }\n            addTimeToArrayFromToken(token, parsedInput, config);\n        }\n        else if (config._strict && !parsedInput) {\n            getParsingFlags(config).unusedTokens.push(token);\n        }\n    }\n\n    // add remaining unparsed input length to the string\n    getParsingFlags(config).charsLeftOver = stringLength - totalParsedInputLength;\n    if (string.length > 0) {\n        getParsingFlags(config).unusedInput.push(string);\n    }\n\n    // clear _12h flag if hour is <= 12\n    if (config._a[HOUR] <= 12 &&\n        getParsingFlags(config).bigHour === true &&\n        config._a[HOUR] > 0) {\n        getParsingFlags(config).bigHour = undefined;\n    }\n\n    getParsingFlags(config).parsedDateParts = config._a.slice(0);\n    getParsingFlags(config).meridiem = config._meridiem;\n    // handle meridiem\n    config._a[HOUR] = meridiemFixWrap(config._locale, config._a[HOUR], config._meridiem);\n\n    configFromArray(config);\n    checkOverflow(config);\n}\n\n\nfunction meridiemFixWrap (locale, hour, meridiem) {\n    var isPm;\n\n    if (meridiem == null) {\n        // nothing to do\n        return hour;\n    }\n    if (locale.meridiemHour != null) {\n        return locale.meridiemHour(hour, meridiem);\n    } else if (locale.isPM != null) {\n        // Fallback\n        isPm = locale.isPM(meridiem);\n        if (isPm && hour < 12) {\n            hour += 12;\n        }\n        if (!isPm && hour === 12) {\n            hour = 0;\n        }\n        return hour;\n    } else {\n        // this is not supposed to happen\n        return hour;\n    }\n}\n\n// date from string and array of format strings\nfunction configFromStringAndArray(config) {\n    var tempConfig,\n        bestMoment,\n\n        scoreToBeat,\n        i,\n        currentScore;\n\n    if (config._f.length === 0) {\n        getParsingFlags(config).invalidFormat = true;\n        config._d = new Date(NaN);\n        return;\n    }\n\n    for (i = 0; i < config._f.length; i++) {\n        currentScore = 0;\n        tempConfig = copyConfig({}, config);\n        if (config._useUTC != null) {\n            tempConfig._useUTC = config._useUTC;\n        }\n        tempConfig._f = config._f[i];\n        configFromStringAndFormat(tempConfig);\n\n        if (!isValid(tempConfig)) {\n            continue;\n        }\n\n        // if there is any input that was not parsed add a penalty for that format\n        currentScore += getParsingFlags(tempConfig).charsLeftOver;\n\n        //or tokens\n        currentScore += getParsingFlags(tempConfig).unusedTokens.length * 10;\n\n        getParsingFlags(tempConfig).score = currentScore;\n\n        if (scoreToBeat == null || currentScore < scoreToBeat) {\n            scoreToBeat = currentScore;\n            bestMoment = tempConfig;\n        }\n    }\n\n    extend(config, bestMoment || tempConfig);\n}\n\nfunction configFromObject(config) {\n    if (config._d) {\n        return;\n    }\n\n    var i = normalizeObjectUnits(config._i);\n    config._a = map([i.year, i.month, i.day || i.date, i.hour, i.minute, i.second, i.millisecond], function (obj) {\n        return obj && parseInt(obj, 10);\n    });\n\n    configFromArray(config);\n}\n\nfunction createFromConfig (config) {\n    var res = new Moment(checkOverflow(prepareConfig(config)));\n    if (res._nextDay) {\n        // Adding is smart enough around DST\n        res.add(1, 'd');\n        res._nextDay = undefined;\n    }\n\n    return res;\n}\n\nfunction prepareConfig (config) {\n    var input = config._i,\n        format = config._f;\n\n    config._locale = config._locale || getLocale(config._l);\n\n    if (input === null || (format === undefined && input === '')) {\n        return createInvalid({nullInput: true});\n    }\n\n    if (typeof input === 'string') {\n        config._i = input = config._locale.preparse(input);\n    }\n\n    if (isMoment(input)) {\n        return new Moment(checkOverflow(input));\n    } else if (isDate(input)) {\n        config._d = input;\n    } else if (isArray(format)) {\n        configFromStringAndArray(config);\n    } else if (format) {\n        configFromStringAndFormat(config);\n    }  else {\n        configFromInput(config);\n    }\n\n    if (!isValid(config)) {\n        config._d = null;\n    }\n\n    return config;\n}\n\nfunction configFromInput(config) {\n    var input = config._i;\n    if (isUndefined(input)) {\n        config._d = new Date(hooks.now());\n    } else if (isDate(input)) {\n        config._d = new Date(input.valueOf());\n    } else if (typeof input === 'string') {\n        configFromString(config);\n    } else if (isArray(input)) {\n        config._a = map(input.slice(0), function (obj) {\n            return parseInt(obj, 10);\n        });\n        configFromArray(config);\n    } else if (isObject(input)) {\n        configFromObject(config);\n    } else if (isNumber(input)) {\n        // from milliseconds\n        config._d = new Date(input);\n    } else {\n        hooks.createFromInputFallback(config);\n    }\n}\n\nfunction createLocalOrUTC (input, format, locale, strict, isUTC) {\n    var c = {};\n\n    if (locale === true || locale === false) {\n        strict = locale;\n        locale = undefined;\n    }\n\n    if ((isObject(input) && isObjectEmpty(input)) ||\n            (isArray(input) && input.length === 0)) {\n        input = undefined;\n    }\n    // object construction must be done this way.\n    // https://github.com/moment/moment/issues/1423\n    c._isAMomentObject = true;\n    c._useUTC = c._isUTC = isUTC;\n    c._l = locale;\n    c._i = input;\n    c._f = format;\n    c._strict = strict;\n\n    return createFromConfig(c);\n}\n\nfunction createLocal (input, format, locale, strict) {\n    return createLocalOrUTC(input, format, locale, strict, false);\n}\n\nvar prototypeMin = deprecate(\n    'moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/',\n    function () {\n        var other = createLocal.apply(null, arguments);\n        if (this.isValid() && other.isValid()) {\n            return other < this ? this : other;\n        } else {\n            return createInvalid();\n        }\n    }\n);\n\nvar prototypeMax = deprecate(\n    'moment().max is deprecated, use moment.min instead. http://momentjs.com/guides/#/warnings/min-max/',\n    function () {\n        var other = createLocal.apply(null, arguments);\n        if (this.isValid() && other.isValid()) {\n            return other > this ? this : other;\n        } else {\n            return createInvalid();\n        }\n    }\n);\n\n// Pick a moment m from moments so that m[fn](other) is true for all\n// other. This relies on the function fn to be transitive.\n//\n// moments should either be an array of moment objects or an array, whose\n// first element is an array of moment objects.\nfunction pickBy(fn, moments) {\n    var res, i;\n    if (moments.length === 1 && isArray(moments[0])) {\n        moments = moments[0];\n    }\n    if (!moments.length) {\n        return createLocal();\n    }\n    res = moments[0];\n    for (i = 1; i < moments.length; ++i) {\n        if (!moments[i].isValid() || moments[i][fn](res)) {\n            res = moments[i];\n        }\n    }\n    return res;\n}\n\n// TODO: Use [].sort instead?\nfunction min () {\n    var args = [].slice.call(arguments, 0);\n\n    return pickBy('isBefore', args);\n}\n\nfunction max () {\n    var args = [].slice.call(arguments, 0);\n\n    return pickBy('isAfter', args);\n}\n\nvar now = function () {\n    return Date.now ? Date.now() : +(new Date());\n};\n\nvar ordering = ['year', 'quarter', 'month', 'week', 'day', 'hour', 'minute', 'second', 'millisecond'];\n\nfunction isDurationValid(m) {\n    for (var key in m) {\n        if (!(indexOf.call(ordering, key) !== -1 && (m[key] == null || !isNaN(m[key])))) {\n            return false;\n        }\n    }\n\n    var unitHasDecimal = false;\n    for (var i = 0; i < ordering.length; ++i) {\n        if (m[ordering[i]]) {\n            if (unitHasDecimal) {\n                return false; // only allow non-integers for smallest unit\n            }\n            if (parseFloat(m[ordering[i]]) !== toInt(m[ordering[i]])) {\n                unitHasDecimal = true;\n            }\n        }\n    }\n\n    return true;\n}\n\nfunction isValid$1() {\n    return this._isValid;\n}\n\nfunction createInvalid$1() {\n    return createDuration(NaN);\n}\n\nfunction Duration (duration) {\n    var normalizedInput = normalizeObjectUnits(duration),\n        years = normalizedInput.year || 0,\n        quarters = normalizedInput.quarter || 0,\n        months = normalizedInput.month || 0,\n        weeks = normalizedInput.week || 0,\n        days = normalizedInput.day || 0,\n        hours = normalizedInput.hour || 0,\n        minutes = normalizedInput.minute || 0,\n        seconds = normalizedInput.second || 0,\n        milliseconds = normalizedInput.millisecond || 0;\n\n    this._isValid = isDurationValid(normalizedInput);\n\n    // representation for dateAddRemove\n    this._milliseconds = +milliseconds +\n        seconds * 1e3 + // 1000\n        minutes * 6e4 + // 1000 * 60\n        hours * 1000 * 60 * 60; //using 1000 * 60 * 60 instead of 36e5 to avoid floating point rounding errors https://github.com/moment/moment/issues/2978\n    // Because of dateAddRemove treats 24 hours as different from a\n    // day when working around DST, we need to store them separately\n    this._days = +days +\n        weeks * 7;\n    // It is impossible to translate months into days without knowing\n    // which months you are are talking about, so we have to store\n    // it separately.\n    this._months = +months +\n        quarters * 3 +\n        years * 12;\n\n    this._data = {};\n\n    this._locale = getLocale();\n\n    this._bubble();\n}\n\nfunction isDuration (obj) {\n    return obj instanceof Duration;\n}\n\nfunction absRound (number) {\n    if (number < 0) {\n        return Math.round(-1 * number) * -1;\n    } else {\n        return Math.round(number);\n    }\n}\n\n// FORMATTING\n\nfunction offset (token, separator) {\n    addFormatToken(token, 0, 0, function () {\n        var offset = this.utcOffset();\n        var sign = '+';\n        if (offset < 0) {\n            offset = -offset;\n            sign = '-';\n        }\n        return sign + zeroFill(~~(offset / 60), 2) + separator + zeroFill(~~(offset) % 60, 2);\n    });\n}\n\noffset('Z', ':');\noffset('ZZ', '');\n\n// PARSING\n\naddRegexToken('Z',  matchShortOffset);\naddRegexToken('ZZ', matchShortOffset);\naddParseToken(['Z', 'ZZ'], function (input, array, config) {\n    config._useUTC = true;\n    config._tzm = offsetFromString(matchShortOffset, input);\n});\n\n// HELPERS\n\n// timezone chunker\n// '+10:00' > ['10',  '00']\n// '-1530'  > ['-15', '30']\nvar chunkOffset = /([\\+\\-]|\\d\\d)/gi;\n\nfunction offsetFromString(matcher, string) {\n    var matches = (string || '').match(matcher);\n\n    if (matches === null) {\n        return null;\n    }\n\n    var chunk   = matches[matches.length - 1] || [];\n    var parts   = (chunk + '').match(chunkOffset) || ['-', 0, 0];\n    var minutes = +(parts[1] * 60) + toInt(parts[2]);\n\n    return minutes === 0 ?\n      0 :\n      parts[0] === '+' ? minutes : -minutes;\n}\n\n// Return a moment from input, that is local/utc/zone equivalent to model.\nfunction cloneWithOffset(input, model) {\n    var res, diff;\n    if (model._isUTC) {\n        res = model.clone();\n        diff = (isMoment(input) || isDate(input) ? input.valueOf() : createLocal(input).valueOf()) - res.valueOf();\n        // Use low-level api, because this fn is low-level api.\n        res._d.setTime(res._d.valueOf() + diff);\n        hooks.updateOffset(res, false);\n        return res;\n    } else {\n        return createLocal(input).local();\n    }\n}\n\nfunction getDateOffset (m) {\n    // On Firefox.24 Date#getTimezoneOffset returns a floating point.\n    // https://github.com/moment/moment/pull/1871\n    return -Math.round(m._d.getTimezoneOffset() / 15) * 15;\n}\n\n// HOOKS\n\n// This function will be called whenever a moment is mutated.\n// It is intended to keep the offset in sync with the timezone.\nhooks.updateOffset = function () {};\n\n// MOMENTS\n\n// keepLocalTime = true means only change the timezone, without\n// affecting the local hour. So 5:31:26 +0300 --[utcOffset(2, true)]-->\n// 5:31:26 +0200 It is possible that 5:31:26 doesn't exist with offset\n// +0200, so we adjust the time as needed, to be valid.\n//\n// Keeping the time actually adds/subtracts (one hour)\n// from the actual represented time. That is why we call updateOffset\n// a second time. In case it wants us to change the offset again\n// _changeInProgress == true case, then we have to adjust, because\n// there is no such time in the given timezone.\nfunction getSetOffset (input, keepLocalTime, keepMinutes) {\n    var offset = this._offset || 0,\n        localAdjust;\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    if (input != null) {\n        if (typeof input === 'string') {\n            input = offsetFromString(matchShortOffset, input);\n            if (input === null) {\n                return this;\n            }\n        } else if (Math.abs(input) < 16 && !keepMinutes) {\n            input = input * 60;\n        }\n        if (!this._isUTC && keepLocalTime) {\n            localAdjust = getDateOffset(this);\n        }\n        this._offset = input;\n        this._isUTC = true;\n        if (localAdjust != null) {\n            this.add(localAdjust, 'm');\n        }\n        if (offset !== input) {\n            if (!keepLocalTime || this._changeInProgress) {\n                addSubtract(this, createDuration(input - offset, 'm'), 1, false);\n            } else if (!this._changeInProgress) {\n                this._changeInProgress = true;\n                hooks.updateOffset(this, true);\n                this._changeInProgress = null;\n            }\n        }\n        return this;\n    } else {\n        return this._isUTC ? offset : getDateOffset(this);\n    }\n}\n\nfunction getSetZone (input, keepLocalTime) {\n    if (input != null) {\n        if (typeof input !== 'string') {\n            input = -input;\n        }\n\n        this.utcOffset(input, keepLocalTime);\n\n        return this;\n    } else {\n        return -this.utcOffset();\n    }\n}\n\nfunction setOffsetToUTC (keepLocalTime) {\n    return this.utcOffset(0, keepLocalTime);\n}\n\nfunction setOffsetToLocal (keepLocalTime) {\n    if (this._isUTC) {\n        this.utcOffset(0, keepLocalTime);\n        this._isUTC = false;\n\n        if (keepLocalTime) {\n            this.subtract(getDateOffset(this), 'm');\n        }\n    }\n    return this;\n}\n\nfunction setOffsetToParsedOffset () {\n    if (this._tzm != null) {\n        this.utcOffset(this._tzm, false, true);\n    } else if (typeof this._i === 'string') {\n        var tZone = offsetFromString(matchOffset, this._i);\n        if (tZone != null) {\n            this.utcOffset(tZone);\n        }\n        else {\n            this.utcOffset(0, true);\n        }\n    }\n    return this;\n}\n\nfunction hasAlignedHourOffset (input) {\n    if (!this.isValid()) {\n        return false;\n    }\n    input = input ? createLocal(input).utcOffset() : 0;\n\n    return (this.utcOffset() - input) % 60 === 0;\n}\n\nfunction isDaylightSavingTime () {\n    return (\n        this.utcOffset() > this.clone().month(0).utcOffset() ||\n        this.utcOffset() > this.clone().month(5).utcOffset()\n    );\n}\n\nfunction isDaylightSavingTimeShifted () {\n    if (!isUndefined(this._isDSTShifted)) {\n        return this._isDSTShifted;\n    }\n\n    var c = {};\n\n    copyConfig(c, this);\n    c = prepareConfig(c);\n\n    if (c._a) {\n        var other = c._isUTC ? createUTC(c._a) : createLocal(c._a);\n        this._isDSTShifted = this.isValid() &&\n            compareArrays(c._a, other.toArray()) > 0;\n    } else {\n        this._isDSTShifted = false;\n    }\n\n    return this._isDSTShifted;\n}\n\nfunction isLocal () {\n    return this.isValid() ? !this._isUTC : false;\n}\n\nfunction isUtcOffset () {\n    return this.isValid() ? this._isUTC : false;\n}\n\nfunction isUtc () {\n    return this.isValid() ? this._isUTC && this._offset === 0 : false;\n}\n\n// ASP.NET json date format regex\nvar aspNetRegex = /^(\\-|\\+)?(?:(\\d*)[. ])?(\\d+)\\:(\\d+)(?:\\:(\\d+)(\\.\\d*)?)?$/;\n\n// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html\n// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere\n// and further modified to allow for strings containing both week and day\nvar isoRegex = /^(-|\\+)?P(?:([-+]?[0-9,.]*)Y)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)W)?(?:([-+]?[0-9,.]*)D)?(?:T(?:([-+]?[0-9,.]*)H)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)S)?)?$/;\n\nfunction createDuration (input, key) {\n    var duration = input,\n        // matching against regexp is expensive, do it on demand\n        match = null,\n        sign,\n        ret,\n        diffRes;\n\n    if (isDuration(input)) {\n        duration = {\n            ms : input._milliseconds,\n            d  : input._days,\n            M  : input._months\n        };\n    } else if (isNumber(input)) {\n        duration = {};\n        if (key) {\n            duration[key] = input;\n        } else {\n            duration.milliseconds = input;\n        }\n    } else if (!!(match = aspNetRegex.exec(input))) {\n        sign = (match[1] === '-') ? -1 : 1;\n        duration = {\n            y  : 0,\n            d  : toInt(match[DATE])                         * sign,\n            h  : toInt(match[HOUR])                         * sign,\n            m  : toInt(match[MINUTE])                       * sign,\n            s  : toInt(match[SECOND])                       * sign,\n            ms : toInt(absRound(match[MILLISECOND] * 1000)) * sign // the millisecond decimal point is included in the match\n        };\n    } else if (!!(match = isoRegex.exec(input))) {\n        sign = (match[1] === '-') ? -1 : (match[1] === '+') ? 1 : 1;\n        duration = {\n            y : parseIso(match[2], sign),\n            M : parseIso(match[3], sign),\n            w : parseIso(match[4], sign),\n            d : parseIso(match[5], sign),\n            h : parseIso(match[6], sign),\n            m : parseIso(match[7], sign),\n            s : parseIso(match[8], sign)\n        };\n    } else if (duration == null) {// checks for null or undefined\n        duration = {};\n    } else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {\n        diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));\n\n        duration = {};\n        duration.ms = diffRes.milliseconds;\n        duration.M = diffRes.months;\n    }\n\n    ret = new Duration(duration);\n\n    if (isDuration(input) && hasOwnProp(input, '_locale')) {\n        ret._locale = input._locale;\n    }\n\n    return ret;\n}\n\ncreateDuration.fn = Duration.prototype;\ncreateDuration.invalid = createInvalid$1;\n\nfunction parseIso (inp, sign) {\n    // We'd normally use ~~inp for this, but unfortunately it also\n    // converts floats to ints.\n    // inp may be undefined, so careful calling replace on it.\n    var res = inp && parseFloat(inp.replace(',', '.'));\n    // apply sign while we're at it\n    return (isNaN(res) ? 0 : res) * sign;\n}\n\nfunction positiveMomentsDifference(base, other) {\n    var res = {milliseconds: 0, months: 0};\n\n    res.months = other.month() - base.month() +\n        (other.year() - base.year()) * 12;\n    if (base.clone().add(res.months, 'M').isAfter(other)) {\n        --res.months;\n    }\n\n    res.milliseconds = +other - +(base.clone().add(res.months, 'M'));\n\n    return res;\n}\n\nfunction momentsDifference(base, other) {\n    var res;\n    if (!(base.isValid() && other.isValid())) {\n        return {milliseconds: 0, months: 0};\n    }\n\n    other = cloneWithOffset(other, base);\n    if (base.isBefore(other)) {\n        res = positiveMomentsDifference(base, other);\n    } else {\n        res = positiveMomentsDifference(other, base);\n        res.milliseconds = -res.milliseconds;\n        res.months = -res.months;\n    }\n\n    return res;\n}\n\n// TODO: remove 'name' arg after deprecation is removed\nfunction createAdder(direction, name) {\n    return function (val, period) {\n        var dur, tmp;\n        //invert the arguments, but complain about it\n        if (period !== null && !isNaN(+period)) {\n            deprecateSimple(name, 'moment().' + name  + '(period, number) is deprecated. Please use moment().' + name + '(number, period). ' +\n            'See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info.');\n            tmp = val; val = period; period = tmp;\n        }\n\n        val = typeof val === 'string' ? +val : val;\n        dur = createDuration(val, period);\n        addSubtract(this, dur, direction);\n        return this;\n    };\n}\n\nfunction addSubtract (mom, duration, isAdding, updateOffset) {\n    var milliseconds = duration._milliseconds,\n        days = absRound(duration._days),\n        months = absRound(duration._months);\n\n    if (!mom.isValid()) {\n        // No op\n        return;\n    }\n\n    updateOffset = updateOffset == null ? true : updateOffset;\n\n    if (months) {\n        setMonth(mom, get(mom, 'Month') + months * isAdding);\n    }\n    if (days) {\n        set$1(mom, 'Date', get(mom, 'Date') + days * isAdding);\n    }\n    if (milliseconds) {\n        mom._d.setTime(mom._d.valueOf() + milliseconds * isAdding);\n    }\n    if (updateOffset) {\n        hooks.updateOffset(mom, days || months);\n    }\n}\n\nvar add      = createAdder(1, 'add');\nvar subtract = createAdder(-1, 'subtract');\n\nfunction getCalendarFormat(myMoment, now) {\n    var diff = myMoment.diff(now, 'days', true);\n    return diff < -6 ? 'sameElse' :\n            diff < -1 ? 'lastWeek' :\n            diff < 0 ? 'lastDay' :\n            diff < 1 ? 'sameDay' :\n            diff < 2 ? 'nextDay' :\n            diff < 7 ? 'nextWeek' : 'sameElse';\n}\n\nfunction calendar$1 (time, formats) {\n    // We want to compare the start of today, vs this.\n    // Getting start-of-today depends on whether we're local/utc/offset or not.\n    var now = time || createLocal(),\n        sod = cloneWithOffset(now, this).startOf('day'),\n        format = hooks.calendarFormat(this, sod) || 'sameElse';\n\n    var output = formats && (isFunction(formats[format]) ? formats[format].call(this, now) : formats[format]);\n\n    return this.format(output || this.localeData().calendar(format, this, createLocal(now)));\n}\n\nfunction clone () {\n    return new Moment(this);\n}\n\nfunction isAfter (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input);\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() > localInput.valueOf();\n    } else {\n        return localInput.valueOf() < this.clone().startOf(units).valueOf();\n    }\n}\n\nfunction isBefore (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input);\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() < localInput.valueOf();\n    } else {\n        return this.clone().endOf(units).valueOf() < localInput.valueOf();\n    }\n}\n\nfunction isBetween (from, to, units, inclusivity) {\n    inclusivity = inclusivity || '()';\n    return (inclusivity[0] === '(' ? this.isAfter(from, units) : !this.isBefore(from, units)) &&\n        (inclusivity[1] === ')' ? this.isBefore(to, units) : !this.isAfter(to, units));\n}\n\nfunction isSame (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input),\n        inputMs;\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(units || 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() === localInput.valueOf();\n    } else {\n        inputMs = localInput.valueOf();\n        return this.clone().startOf(units).valueOf() <= inputMs && inputMs <= this.clone().endOf(units).valueOf();\n    }\n}\n\nfunction isSameOrAfter (input, units) {\n    return this.isSame(input, units) || this.isAfter(input,units);\n}\n\nfunction isSameOrBefore (input, units) {\n    return this.isSame(input, units) || this.isBefore(input,units);\n}\n\nfunction diff (input, units, asFloat) {\n    var that,\n        zoneDelta,\n        delta, output;\n\n    if (!this.isValid()) {\n        return NaN;\n    }\n\n    that = cloneWithOffset(input, this);\n\n    if (!that.isValid()) {\n        return NaN;\n    }\n\n    zoneDelta = (that.utcOffset() - this.utcOffset()) * 6e4;\n\n    units = normalizeUnits(units);\n\n    switch (units) {\n        case 'year': output = monthDiff(this, that) / 12; break;\n        case 'month': output = monthDiff(this, that); break;\n        case 'quarter': output = monthDiff(this, that) / 3; break;\n        case 'second': output = (this - that) / 1e3; break; // 1000\n        case 'minute': output = (this - that) / 6e4; break; // 1000 * 60\n        case 'hour': output = (this - that) / 36e5; break; // 1000 * 60 * 60\n        case 'day': output = (this - that - zoneDelta) / 864e5; break; // 1000 * 60 * 60 * 24, negate dst\n        case 'week': output = (this - that - zoneDelta) / 6048e5; break; // 1000 * 60 * 60 * 24 * 7, negate dst\n        default: output = this - that;\n    }\n\n    return asFloat ? output : absFloor(output);\n}\n\nfunction monthDiff (a, b) {\n    // difference in months\n    var wholeMonthDiff = ((b.year() - a.year()) * 12) + (b.month() - a.month()),\n        // b is in (anchor - 1 month, anchor + 1 month)\n        anchor = a.clone().add(wholeMonthDiff, 'months'),\n        anchor2, adjust;\n\n    if (b - anchor < 0) {\n        anchor2 = a.clone().add(wholeMonthDiff - 1, 'months');\n        // linear across the month\n        adjust = (b - anchor) / (anchor - anchor2);\n    } else {\n        anchor2 = a.clone().add(wholeMonthDiff + 1, 'months');\n        // linear across the month\n        adjust = (b - anchor) / (anchor2 - anchor);\n    }\n\n    //check for negative zero, return zero if negative zero\n    return -(wholeMonthDiff + adjust) || 0;\n}\n\nhooks.defaultFormat = 'YYYY-MM-DDTHH:mm:ssZ';\nhooks.defaultFormatUtc = 'YYYY-MM-DDTHH:mm:ss[Z]';\n\nfunction toString () {\n    return this.clone().locale('en').format('ddd MMM DD YYYY HH:mm:ss [GMT]ZZ');\n}\n\nfunction toISOString(keepOffset) {\n    if (!this.isValid()) {\n        return null;\n    }\n    var utc = keepOffset !== true;\n    var m = utc ? this.clone().utc() : this;\n    if (m.year() < 0 || m.year() > 9999) {\n        return formatMoment(m, utc ? 'YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]' : 'YYYYYY-MM-DD[T]HH:mm:ss.SSSZ');\n    }\n    if (isFunction(Date.prototype.toISOString)) {\n        // native implementation is ~50x faster, use it when we can\n        if (utc) {\n            return this.toDate().toISOString();\n        } else {\n            return new Date(this._d.valueOf()).toISOString().replace('Z', formatMoment(m, 'Z'));\n        }\n    }\n    return formatMoment(m, utc ? 'YYYY-MM-DD[T]HH:mm:ss.SSS[Z]' : 'YYYY-MM-DD[T]HH:mm:ss.SSSZ');\n}\n\n/**\n * Return a human readable representation of a moment that can\n * also be evaluated to get a new moment which is the same\n *\n * @link https://nodejs.org/dist/latest/docs/api/util.html#util_custom_inspect_function_on_objects\n */\nfunction inspect () {\n    if (!this.isValid()) {\n        return 'moment.invalid(/* ' + this._i + ' */)';\n    }\n    var func = 'moment';\n    var zone = '';\n    if (!this.isLocal()) {\n        func = this.utcOffset() === 0 ? 'moment.utc' : 'moment.parseZone';\n        zone = 'Z';\n    }\n    var prefix = '[' + func + '(\"]';\n    var year = (0 <= this.year() && this.year() <= 9999) ? 'YYYY' : 'YYYYYY';\n    var datetime = '-MM-DD[T]HH:mm:ss.SSS';\n    var suffix = zone + '[\")]';\n\n    return this.format(prefix + year + datetime + suffix);\n}\n\nfunction format (inputString) {\n    if (!inputString) {\n        inputString = this.isUtc() ? hooks.defaultFormatUtc : hooks.defaultFormat;\n    }\n    var output = formatMoment(this, inputString);\n    return this.localeData().postformat(output);\n}\n\nfunction from (time, withoutSuffix) {\n    if (this.isValid() &&\n            ((isMoment(time) && time.isValid()) ||\n             createLocal(time).isValid())) {\n        return createDuration({to: this, from: time}).locale(this.locale()).humanize(!withoutSuffix);\n    } else {\n        return this.localeData().invalidDate();\n    }\n}\n\nfunction fromNow (withoutSuffix) {\n    return this.from(createLocal(), withoutSuffix);\n}\n\nfunction to (time, withoutSuffix) {\n    if (this.isValid() &&\n            ((isMoment(time) && time.isValid()) ||\n             createLocal(time).isValid())) {\n        return createDuration({from: this, to: time}).locale(this.locale()).humanize(!withoutSuffix);\n    } else {\n        return this.localeData().invalidDate();\n    }\n}\n\nfunction toNow (withoutSuffix) {\n    return this.to(createLocal(), withoutSuffix);\n}\n\n// If passed a locale key, it will set the locale for this\n// instance.  Otherwise, it will return the locale configuration\n// variables for this instance.\nfunction locale (key) {\n    var newLocaleData;\n\n    if (key === undefined) {\n        return this._locale._abbr;\n    } else {\n        newLocaleData = getLocale(key);\n        if (newLocaleData != null) {\n            this._locale = newLocaleData;\n        }\n        return this;\n    }\n}\n\nvar lang = deprecate(\n    'moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.',\n    function (key) {\n        if (key === undefined) {\n            return this.localeData();\n        } else {\n            return this.locale(key);\n        }\n    }\n);\n\nfunction localeData () {\n    return this._locale;\n}\n\nfunction startOf (units) {\n    units = normalizeUnits(units);\n    // the following switch intentionally omits break keywords\n    // to utilize falling through the cases.\n    switch (units) {\n        case 'year':\n            this.month(0);\n            /* falls through */\n        case 'quarter':\n        case 'month':\n            this.date(1);\n            /* falls through */\n        case 'week':\n        case 'isoWeek':\n        case 'day':\n        case 'date':\n            this.hours(0);\n            /* falls through */\n        case 'hour':\n            this.minutes(0);\n            /* falls through */\n        case 'minute':\n            this.seconds(0);\n            /* falls through */\n        case 'second':\n            this.milliseconds(0);\n    }\n\n    // weeks are a special case\n    if (units === 'week') {\n        this.weekday(0);\n    }\n    if (units === 'isoWeek') {\n        this.isoWeekday(1);\n    }\n\n    // quarters are also special\n    if (units === 'quarter') {\n        this.month(Math.floor(this.month() / 3) * 3);\n    }\n\n    return this;\n}\n\nfunction endOf (units) {\n    units = normalizeUnits(units);\n    if (units === undefined || units === 'millisecond') {\n        return this;\n    }\n\n    // 'date' is an alias for 'day', so it should be considered as such.\n    if (units === 'date') {\n        units = 'day';\n    }\n\n    return this.startOf(units).add(1, (units === 'isoWeek' ? 'week' : units)).subtract(1, 'ms');\n}\n\nfunction valueOf () {\n    return this._d.valueOf() - ((this._offset || 0) * 60000);\n}\n\nfunction unix () {\n    return Math.floor(this.valueOf() / 1000);\n}\n\nfunction toDate () {\n    return new Date(this.valueOf());\n}\n\nfunction toArray () {\n    var m = this;\n    return [m.year(), m.month(), m.date(), m.hour(), m.minute(), m.second(), m.millisecond()];\n}\n\nfunction toObject () {\n    var m = this;\n    return {\n        years: m.year(),\n        months: m.month(),\n        date: m.date(),\n        hours: m.hours(),\n        minutes: m.minutes(),\n        seconds: m.seconds(),\n        milliseconds: m.milliseconds()\n    };\n}\n\nfunction toJSON () {\n    // new Date(NaN).toJSON() === null\n    return this.isValid() ? this.toISOString() : null;\n}\n\nfunction isValid$2 () {\n    return isValid(this);\n}\n\nfunction parsingFlags () {\n    return extend({}, getParsingFlags(this));\n}\n\nfunction invalidAt () {\n    return getParsingFlags(this).overflow;\n}\n\nfunction creationData() {\n    return {\n        input: this._i,\n        format: this._f,\n        locale: this._locale,\n        isUTC: this._isUTC,\n        strict: this._strict\n    };\n}\n\n// FORMATTING\n\naddFormatToken(0, ['gg', 2], 0, function () {\n    return this.weekYear() % 100;\n});\n\naddFormatToken(0, ['GG', 2], 0, function () {\n    return this.isoWeekYear() % 100;\n});\n\nfunction addWeekYearFormatToken (token, getter) {\n    addFormatToken(0, [token, token.length], 0, getter);\n}\n\naddWeekYearFormatToken('gggg',     'weekYear');\naddWeekYearFormatToken('ggggg',    'weekYear');\naddWeekYearFormatToken('GGGG',  'isoWeekYear');\naddWeekYearFormatToken('GGGGG', 'isoWeekYear');\n\n// ALIASES\n\naddUnitAlias('weekYear', 'gg');\naddUnitAlias('isoWeekYear', 'GG');\n\n// PRIORITY\n\naddUnitPriority('weekYear', 1);\naddUnitPriority('isoWeekYear', 1);\n\n\n// PARSING\n\naddRegexToken('G',      matchSigned);\naddRegexToken('g',      matchSigned);\naddRegexToken('GG',     match1to2, match2);\naddRegexToken('gg',     match1to2, match2);\naddRegexToken('GGGG',   match1to4, match4);\naddRegexToken('gggg',   match1to4, match4);\naddRegexToken('GGGGG',  match1to6, match6);\naddRegexToken('ggggg',  match1to6, match6);\n\naddWeekParseToken(['gggg', 'ggggg', 'GGGG', 'GGGGG'], function (input, week, config, token) {\n    week[token.substr(0, 2)] = toInt(input);\n});\n\naddWeekParseToken(['gg', 'GG'], function (input, week, config, token) {\n    week[token] = hooks.parseTwoDigitYear(input);\n});\n\n// MOMENTS\n\nfunction getSetWeekYear (input) {\n    return getSetWeekYearHelper.call(this,\n            input,\n            this.week(),\n            this.weekday(),\n            this.localeData()._week.dow,\n            this.localeData()._week.doy);\n}\n\nfunction getSetISOWeekYear (input) {\n    return getSetWeekYearHelper.call(this,\n            input, this.isoWeek(), this.isoWeekday(), 1, 4);\n}\n\nfunction getISOWeeksInYear () {\n    return weeksInYear(this.year(), 1, 4);\n}\n\nfunction getWeeksInYear () {\n    var weekInfo = this.localeData()._week;\n    return weeksInYear(this.year(), weekInfo.dow, weekInfo.doy);\n}\n\nfunction getSetWeekYearHelper(input, week, weekday, dow, doy) {\n    var weeksTarget;\n    if (input == null) {\n        return weekOfYear(this, dow, doy).year;\n    } else {\n        weeksTarget = weeksInYear(input, dow, doy);\n        if (week > weeksTarget) {\n            week = weeksTarget;\n        }\n        return setWeekAll.call(this, input, week, weekday, dow, doy);\n    }\n}\n\nfunction setWeekAll(weekYear, week, weekday, dow, doy) {\n    var dayOfYearData = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy),\n        date = createUTCDate(dayOfYearData.year, 0, dayOfYearData.dayOfYear);\n\n    this.year(date.getUTCFullYear());\n    this.month(date.getUTCMonth());\n    this.date(date.getUTCDate());\n    return this;\n}\n\n// FORMATTING\n\naddFormatToken('Q', 0, 'Qo', 'quarter');\n\n// ALIASES\n\naddUnitAlias('quarter', 'Q');\n\n// PRIORITY\n\naddUnitPriority('quarter', 7);\n\n// PARSING\n\naddRegexToken('Q', match1);\naddParseToken('Q', function (input, array) {\n    array[MONTH] = (toInt(input) - 1) * 3;\n});\n\n// MOMENTS\n\nfunction getSetQuarter (input) {\n    return input == null ? Math.ceil((this.month() + 1) / 3) : this.month((input - 1) * 3 + this.month() % 3);\n}\n\n// FORMATTING\n\naddFormatToken('D', ['DD', 2], 'Do', 'date');\n\n// ALIASES\n\naddUnitAlias('date', 'D');\n\n// PRIOROITY\naddUnitPriority('date', 9);\n\n// PARSING\n\naddRegexToken('D',  match1to2);\naddRegexToken('DD', match1to2, match2);\naddRegexToken('Do', function (isStrict, locale) {\n    // TODO: Remove \"ordinalParse\" fallback in next major release.\n    return isStrict ?\n      (locale._dayOfMonthOrdinalParse || locale._ordinalParse) :\n      locale._dayOfMonthOrdinalParseLenient;\n});\n\naddParseToken(['D', 'DD'], DATE);\naddParseToken('Do', function (input, array) {\n    array[DATE] = toInt(input.match(match1to2)[0]);\n});\n\n// MOMENTS\n\nvar getSetDayOfMonth = makeGetSet('Date', true);\n\n// FORMATTING\n\naddFormatToken('DDD', ['DDDD', 3], 'DDDo', 'dayOfYear');\n\n// ALIASES\n\naddUnitAlias('dayOfYear', 'DDD');\n\n// PRIORITY\naddUnitPriority('dayOfYear', 4);\n\n// PARSING\n\naddRegexToken('DDD',  match1to3);\naddRegexToken('DDDD', match3);\naddParseToken(['DDD', 'DDDD'], function (input, array, config) {\n    config._dayOfYear = toInt(input);\n});\n\n// HELPERS\n\n// MOMENTS\n\nfunction getSetDayOfYear (input) {\n    var dayOfYear = Math.round((this.clone().startOf('day') - this.clone().startOf('year')) / 864e5) + 1;\n    return input == null ? dayOfYear : this.add((input - dayOfYear), 'd');\n}\n\n// FORMATTING\n\naddFormatToken('m', ['mm', 2], 0, 'minute');\n\n// ALIASES\n\naddUnitAlias('minute', 'm');\n\n// PRIORITY\n\naddUnitPriority('minute', 14);\n\n// PARSING\n\naddRegexToken('m',  match1to2);\naddRegexToken('mm', match1to2, match2);\naddParseToken(['m', 'mm'], MINUTE);\n\n// MOMENTS\n\nvar getSetMinute = makeGetSet('Minutes', false);\n\n// FORMATTING\n\naddFormatToken('s', ['ss', 2], 0, 'second');\n\n// ALIASES\n\naddUnitAlias('second', 's');\n\n// PRIORITY\n\naddUnitPriority('second', 15);\n\n// PARSING\n\naddRegexToken('s',  match1to2);\naddRegexToken('ss', match1to2, match2);\naddParseToken(['s', 'ss'], SECOND);\n\n// MOMENTS\n\nvar getSetSecond = makeGetSet('Seconds', false);\n\n// FORMATTING\n\naddFormatToken('S', 0, 0, function () {\n    return ~~(this.millisecond() / 100);\n});\n\naddFormatToken(0, ['SS', 2], 0, function () {\n    return ~~(this.millisecond() / 10);\n});\n\naddFormatToken(0, ['SSS', 3], 0, 'millisecond');\naddFormatToken(0, ['SSSS', 4], 0, function () {\n    return this.millisecond() * 10;\n});\naddFormatToken(0, ['SSSSS', 5], 0, function () {\n    return this.millisecond() * 100;\n});\naddFormatToken(0, ['SSSSSS', 6], 0, function () {\n    return this.millisecond() * 1000;\n});\naddFormatToken(0, ['SSSSSSS', 7], 0, function () {\n    return this.millisecond() * 10000;\n});\naddFormatToken(0, ['SSSSSSSS', 8], 0, function () {\n    return this.millisecond() * 100000;\n});\naddFormatToken(0, ['SSSSSSSSS', 9], 0, function () {\n    return this.millisecond() * 1000000;\n});\n\n\n// ALIASES\n\naddUnitAlias('millisecond', 'ms');\n\n// PRIORITY\n\naddUnitPriority('millisecond', 16);\n\n// PARSING\n\naddRegexToken('S',    match1to3, match1);\naddRegexToken('SS',   match1to3, match2);\naddRegexToken('SSS',  match1to3, match3);\n\nvar token;\nfor (token = 'SSSS'; token.length <= 9; token += 'S') {\n    addRegexToken(token, matchUnsigned);\n}\n\nfunction parseMs(input, array) {\n    array[MILLISECOND] = toInt(('0.' + input) * 1000);\n}\n\nfor (token = 'S'; token.length <= 9; token += 'S') {\n    addParseToken(token, parseMs);\n}\n// MOMENTS\n\nvar getSetMillisecond = makeGetSet('Milliseconds', false);\n\n// FORMATTING\n\naddFormatToken('z',  0, 0, 'zoneAbbr');\naddFormatToken('zz', 0, 0, 'zoneName');\n\n// MOMENTS\n\nfunction getZoneAbbr () {\n    return this._isUTC ? 'UTC' : '';\n}\n\nfunction getZoneName () {\n    return this._isUTC ? 'Coordinated Universal Time' : '';\n}\n\nvar proto = Moment.prototype;\n\nproto.add               = add;\nproto.calendar          = calendar$1;\nproto.clone             = clone;\nproto.diff              = diff;\nproto.endOf             = endOf;\nproto.format            = format;\nproto.from              = from;\nproto.fromNow           = fromNow;\nproto.to                = to;\nproto.toNow             = toNow;\nproto.get               = stringGet;\nproto.invalidAt         = invalidAt;\nproto.isAfter           = isAfter;\nproto.isBefore          = isBefore;\nproto.isBetween         = isBetween;\nproto.isSame            = isSame;\nproto.isSameOrAfter     = isSameOrAfter;\nproto.isSameOrBefore    = isSameOrBefore;\nproto.isValid           = isValid$2;\nproto.lang              = lang;\nproto.locale            = locale;\nproto.localeData        = localeData;\nproto.max               = prototypeMax;\nproto.min               = prototypeMin;\nproto.parsingFlags      = parsingFlags;\nproto.set               = stringSet;\nproto.startOf           = startOf;\nproto.subtract          = subtract;\nproto.toArray           = toArray;\nproto.toObject          = toObject;\nproto.toDate            = toDate;\nproto.toISOString       = toISOString;\nproto.inspect           = inspect;\nproto.toJSON            = toJSON;\nproto.toString          = toString;\nproto.unix              = unix;\nproto.valueOf           = valueOf;\nproto.creationData      = creationData;\n\n// Year\nproto.year       = getSetYear;\nproto.isLeapYear = getIsLeapYear;\n\n// Week Year\nproto.weekYear    = getSetWeekYear;\nproto.isoWeekYear = getSetISOWeekYear;\n\n// Quarter\nproto.quarter = proto.quarters = getSetQuarter;\n\n// Month\nproto.month       = getSetMonth;\nproto.daysInMonth = getDaysInMonth;\n\n// Week\nproto.week           = proto.weeks        = getSetWeek;\nproto.isoWeek        = proto.isoWeeks     = getSetISOWeek;\nproto.weeksInYear    = getWeeksInYear;\nproto.isoWeeksInYear = getISOWeeksInYear;\n\n// Day\nproto.date       = getSetDayOfMonth;\nproto.day        = proto.days             = getSetDayOfWeek;\nproto.weekday    = getSetLocaleDayOfWeek;\nproto.isoWeekday = getSetISODayOfWeek;\nproto.dayOfYear  = getSetDayOfYear;\n\n// Hour\nproto.hour = proto.hours = getSetHour;\n\n// Minute\nproto.minute = proto.minutes = getSetMinute;\n\n// Second\nproto.second = proto.seconds = getSetSecond;\n\n// Millisecond\nproto.millisecond = proto.milliseconds = getSetMillisecond;\n\n// Offset\nproto.utcOffset            = getSetOffset;\nproto.utc                  = setOffsetToUTC;\nproto.local                = setOffsetToLocal;\nproto.parseZone            = setOffsetToParsedOffset;\nproto.hasAlignedHourOffset = hasAlignedHourOffset;\nproto.isDST                = isDaylightSavingTime;\nproto.isLocal              = isLocal;\nproto.isUtcOffset          = isUtcOffset;\nproto.isUtc                = isUtc;\nproto.isUTC                = isUtc;\n\n// Timezone\nproto.zoneAbbr = getZoneAbbr;\nproto.zoneName = getZoneName;\n\n// Deprecations\nproto.dates  = deprecate('dates accessor is deprecated. Use date instead.', getSetDayOfMonth);\nproto.months = deprecate('months accessor is deprecated. Use month instead', getSetMonth);\nproto.years  = deprecate('years accessor is deprecated. Use year instead', getSetYear);\nproto.zone   = deprecate('moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/', getSetZone);\nproto.isDSTShifted = deprecate('isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information', isDaylightSavingTimeShifted);\n\nfunction createUnix (input) {\n    return createLocal(input * 1000);\n}\n\nfunction createInZone () {\n    return createLocal.apply(null, arguments).parseZone();\n}\n\nfunction preParsePostFormat (string) {\n    return string;\n}\n\nvar proto$1 = Locale.prototype;\n\nproto$1.calendar        = calendar;\nproto$1.longDateFormat  = longDateFormat;\nproto$1.invalidDate     = invalidDate;\nproto$1.ordinal         = ordinal;\nproto$1.preparse        = preParsePostFormat;\nproto$1.postformat      = preParsePostFormat;\nproto$1.relativeTime    = relativeTime;\nproto$1.pastFuture      = pastFuture;\nproto$1.set             = set;\n\n// Month\nproto$1.months            =        localeMonths;\nproto$1.monthsShort       =        localeMonthsShort;\nproto$1.monthsParse       =        localeMonthsParse;\nproto$1.monthsRegex       = monthsRegex;\nproto$1.monthsShortRegex  = monthsShortRegex;\n\n// Week\nproto$1.week = localeWeek;\nproto$1.firstDayOfYear = localeFirstDayOfYear;\nproto$1.firstDayOfWeek = localeFirstDayOfWeek;\n\n// Day of Week\nproto$1.weekdays       =        localeWeekdays;\nproto$1.weekdaysMin    =        localeWeekdaysMin;\nproto$1.weekdaysShort  =        localeWeekdaysShort;\nproto$1.weekdaysParse  =        localeWeekdaysParse;\n\nproto$1.weekdaysRegex       =        weekdaysRegex;\nproto$1.weekdaysShortRegex  =        weekdaysShortRegex;\nproto$1.weekdaysMinRegex    =        weekdaysMinRegex;\n\n// Hours\nproto$1.isPM = localeIsPM;\nproto$1.meridiem = localeMeridiem;\n\nfunction get$1 (format, index, field, setter) {\n    var locale = getLocale();\n    var utc = createUTC().set(setter, index);\n    return locale[field](utc, format);\n}\n\nfunction listMonthsImpl (format, index, field) {\n    if (isNumber(format)) {\n        index = format;\n        format = undefined;\n    }\n\n    format = format || '';\n\n    if (index != null) {\n        return get$1(format, index, field, 'month');\n    }\n\n    var i;\n    var out = [];\n    for (i = 0; i < 12; i++) {\n        out[i] = get$1(format, i, field, 'month');\n    }\n    return out;\n}\n\n// ()\n// (5)\n// (fmt, 5)\n// (fmt)\n// (true)\n// (true, 5)\n// (true, fmt, 5)\n// (true, fmt)\nfunction listWeekdaysImpl (localeSorted, format, index, field) {\n    if (typeof localeSorted === 'boolean') {\n        if (isNumber(format)) {\n            index = format;\n            format = undefined;\n        }\n\n        format = format || '';\n    } else {\n        format = localeSorted;\n        index = format;\n        localeSorted = false;\n\n        if (isNumber(format)) {\n            index = format;\n            format = undefined;\n        }\n\n        format = format || '';\n    }\n\n    var locale = getLocale(),\n        shift = localeSorted ? locale._week.dow : 0;\n\n    if (index != null) {\n        return get$1(format, (index + shift) % 7, field, 'day');\n    }\n\n    var i;\n    var out = [];\n    for (i = 0; i < 7; i++) {\n        out[i] = get$1(format, (i + shift) % 7, field, 'day');\n    }\n    return out;\n}\n\nfunction listMonths (format, index) {\n    return listMonthsImpl(format, index, 'months');\n}\n\nfunction listMonthsShort (format, index) {\n    return listMonthsImpl(format, index, 'monthsShort');\n}\n\nfunction listWeekdays (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdays');\n}\n\nfunction listWeekdaysShort (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdaysShort');\n}\n\nfunction listWeekdaysMin (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdaysMin');\n}\n\ngetSetGlobalLocale('en', {\n    dayOfMonthOrdinalParse: /\\d{1,2}(th|st|nd|rd)/,\n    ordinal : function (number) {\n        var b = number % 10,\n            output = (toInt(number % 100 / 10) === 1) ? 'th' :\n            (b === 1) ? 'st' :\n            (b === 2) ? 'nd' :\n            (b === 3) ? 'rd' : 'th';\n        return number + output;\n    }\n});\n\n// Side effect imports\nhooks.lang = deprecate('moment.lang is deprecated. Use moment.locale instead.', getSetGlobalLocale);\nhooks.langData = deprecate('moment.langData is deprecated. Use moment.localeData instead.', getLocale);\n\nvar mathAbs = Math.abs;\n\nfunction abs () {\n    var data           = this._data;\n\n    this._milliseconds = mathAbs(this._milliseconds);\n    this._days         = mathAbs(this._days);\n    this._months       = mathAbs(this._months);\n\n    data.milliseconds  = mathAbs(data.milliseconds);\n    data.seconds       = mathAbs(data.seconds);\n    data.minutes       = mathAbs(data.minutes);\n    data.hours         = mathAbs(data.hours);\n    data.months        = mathAbs(data.months);\n    data.years         = mathAbs(data.years);\n\n    return this;\n}\n\nfunction addSubtract$1 (duration, input, value, direction) {\n    var other = createDuration(input, value);\n\n    duration._milliseconds += direction * other._milliseconds;\n    duration._days         += direction * other._days;\n    duration._months       += direction * other._months;\n\n    return duration._bubble();\n}\n\n// supports only 2.0-style add(1, 's') or add(duration)\nfunction add$1 (input, value) {\n    return addSubtract$1(this, input, value, 1);\n}\n\n// supports only 2.0-style subtract(1, 's') or subtract(duration)\nfunction subtract$1 (input, value) {\n    return addSubtract$1(this, input, value, -1);\n}\n\nfunction absCeil (number) {\n    if (number < 0) {\n        return Math.floor(number);\n    } else {\n        return Math.ceil(number);\n    }\n}\n\nfunction bubble () {\n    var milliseconds = this._milliseconds;\n    var days         = this._days;\n    var months       = this._months;\n    var data         = this._data;\n    var seconds, minutes, hours, years, monthsFromDays;\n\n    // if we have a mix of positive and negative values, bubble down first\n    // check: https://github.com/moment/moment/issues/2166\n    if (!((milliseconds >= 0 && days >= 0 && months >= 0) ||\n            (milliseconds <= 0 && days <= 0 && months <= 0))) {\n        milliseconds += absCeil(monthsToDays(months) + days) * 864e5;\n        days = 0;\n        months = 0;\n    }\n\n    // The following code bubbles up values, see the tests for\n    // examples of what that means.\n    data.milliseconds = milliseconds % 1000;\n\n    seconds           = absFloor(milliseconds / 1000);\n    data.seconds      = seconds % 60;\n\n    minutes           = absFloor(seconds / 60);\n    data.minutes      = minutes % 60;\n\n    hours             = absFloor(minutes / 60);\n    data.hours        = hours % 24;\n\n    days += absFloor(hours / 24);\n\n    // convert days to months\n    monthsFromDays = absFloor(daysToMonths(days));\n    months += monthsFromDays;\n    days -= absCeil(monthsToDays(monthsFromDays));\n\n    // 12 months -> 1 year\n    years = absFloor(months / 12);\n    months %= 12;\n\n    data.days   = days;\n    data.months = months;\n    data.years  = years;\n\n    return this;\n}\n\nfunction daysToMonths (days) {\n    // 400 years have 146097 days (taking into account leap year rules)\n    // 400 years have 12 months === 4800\n    return days * 4800 / 146097;\n}\n\nfunction monthsToDays (months) {\n    // the reverse of daysToMonths\n    return months * 146097 / 4800;\n}\n\nfunction as (units) {\n    if (!this.isValid()) {\n        return NaN;\n    }\n    var days;\n    var months;\n    var milliseconds = this._milliseconds;\n\n    units = normalizeUnits(units);\n\n    if (units === 'month' || units === 'year') {\n        days   = this._days   + milliseconds / 864e5;\n        months = this._months + daysToMonths(days);\n        return units === 'month' ? months : months / 12;\n    } else {\n        // handle milliseconds separately because of floating point math errors (issue #1867)\n        days = this._days + Math.round(monthsToDays(this._months));\n        switch (units) {\n            case 'week'   : return days / 7     + milliseconds / 6048e5;\n            case 'day'    : return days         + milliseconds / 864e5;\n            case 'hour'   : return days * 24    + milliseconds / 36e5;\n            case 'minute' : return days * 1440  + milliseconds / 6e4;\n            case 'second' : return days * 86400 + milliseconds / 1000;\n            // Math.floor prevents floating point math errors here\n            case 'millisecond': return Math.floor(days * 864e5) + milliseconds;\n            default: throw new Error('Unknown unit ' + units);\n        }\n    }\n}\n\n// TODO: Use this.as('ms')?\nfunction valueOf$1 () {\n    if (!this.isValid()) {\n        return NaN;\n    }\n    return (\n        this._milliseconds +\n        this._days * 864e5 +\n        (this._months % 12) * 2592e6 +\n        toInt(this._months / 12) * 31536e6\n    );\n}\n\nfunction makeAs (alias) {\n    return function () {\n        return this.as(alias);\n    };\n}\n\nvar asMilliseconds = makeAs('ms');\nvar asSeconds      = makeAs('s');\nvar asMinutes      = makeAs('m');\nvar asHours        = makeAs('h');\nvar asDays         = makeAs('d');\nvar asWeeks        = makeAs('w');\nvar asMonths       = makeAs('M');\nvar asYears        = makeAs('y');\n\nfunction clone$1 () {\n    return createDuration(this);\n}\n\nfunction get$2 (units) {\n    units = normalizeUnits(units);\n    return this.isValid() ? this[units + 's']() : NaN;\n}\n\nfunction makeGetter(name) {\n    return function () {\n        return this.isValid() ? this._data[name] : NaN;\n    };\n}\n\nvar milliseconds = makeGetter('milliseconds');\nvar seconds      = makeGetter('seconds');\nvar minutes      = makeGetter('minutes');\nvar hours        = makeGetter('hours');\nvar days         = makeGetter('days');\nvar months       = makeGetter('months');\nvar years        = makeGetter('years');\n\nfunction weeks () {\n    return absFloor(this.days() / 7);\n}\n\nvar round = Math.round;\nvar thresholds = {\n    ss: 44,         // a few seconds to seconds\n    s : 45,         // seconds to minute\n    m : 45,         // minutes to hour\n    h : 22,         // hours to day\n    d : 26,         // days to month\n    M : 11          // months to year\n};\n\n// helper function for moment.fn.from, moment.fn.fromNow, and moment.duration.fn.humanize\nfunction substituteTimeAgo(string, number, withoutSuffix, isFuture, locale) {\n    return locale.relativeTime(number || 1, !!withoutSuffix, string, isFuture);\n}\n\nfunction relativeTime$1 (posNegDuration, withoutSuffix, locale) {\n    var duration = createDuration(posNegDuration).abs();\n    var seconds  = round(duration.as('s'));\n    var minutes  = round(duration.as('m'));\n    var hours    = round(duration.as('h'));\n    var days     = round(duration.as('d'));\n    var months   = round(duration.as('M'));\n    var years    = round(duration.as('y'));\n\n    var a = seconds <= thresholds.ss && ['s', seconds]  ||\n            seconds < thresholds.s   && ['ss', seconds] ||\n            minutes <= 1             && ['m']           ||\n            minutes < thresholds.m   && ['mm', minutes] ||\n            hours   <= 1             && ['h']           ||\n            hours   < thresholds.h   && ['hh', hours]   ||\n            days    <= 1             && ['d']           ||\n            days    < thresholds.d   && ['dd', days]    ||\n            months  <= 1             && ['M']           ||\n            months  < thresholds.M   && ['MM', months]  ||\n            years   <= 1             && ['y']           || ['yy', years];\n\n    a[2] = withoutSuffix;\n    a[3] = +posNegDuration > 0;\n    a[4] = locale;\n    return substituteTimeAgo.apply(null, a);\n}\n\n// This function allows you to set the rounding function for relative time strings\nfunction getSetRelativeTimeRounding (roundingFunction) {\n    if (roundingFunction === undefined) {\n        return round;\n    }\n    if (typeof(roundingFunction) === 'function') {\n        round = roundingFunction;\n        return true;\n    }\n    return false;\n}\n\n// This function allows you to set a threshold for relative time strings\nfunction getSetRelativeTimeThreshold (threshold, limit) {\n    if (thresholds[threshold] === undefined) {\n        return false;\n    }\n    if (limit === undefined) {\n        return thresholds[threshold];\n    }\n    thresholds[threshold] = limit;\n    if (threshold === 's') {\n        thresholds.ss = limit - 1;\n    }\n    return true;\n}\n\nfunction humanize (withSuffix) {\n    if (!this.isValid()) {\n        return this.localeData().invalidDate();\n    }\n\n    var locale = this.localeData();\n    var output = relativeTime$1(this, !withSuffix, locale);\n\n    if (withSuffix) {\n        output = locale.pastFuture(+this, output);\n    }\n\n    return locale.postformat(output);\n}\n\nvar abs$1 = Math.abs;\n\nfunction sign(x) {\n    return ((x > 0) - (x < 0)) || +x;\n}\n\nfunction toISOString$1() {\n    // for ISO strings we do not use the normal bubbling rules:\n    //  * milliseconds bubble up until they become hours\n    //  * days do not bubble at all\n    //  * months bubble up until they become years\n    // This is because there is no context-free conversion between hours and days\n    // (think of clock changes)\n    // and also not between days and months (28-31 days per month)\n    if (!this.isValid()) {\n        return this.localeData().invalidDate();\n    }\n\n    var seconds = abs$1(this._milliseconds) / 1000;\n    var days         = abs$1(this._days);\n    var months       = abs$1(this._months);\n    var minutes, hours, years;\n\n    // 3600 seconds -> 60 minutes -> 1 hour\n    minutes           = absFloor(seconds / 60);\n    hours             = absFloor(minutes / 60);\n    seconds %= 60;\n    minutes %= 60;\n\n    // 12 months -> 1 year\n    years  = absFloor(months / 12);\n    months %= 12;\n\n\n    // inspired by https://github.com/dordille/moment-isoduration/blob/master/moment.isoduration.js\n    var Y = years;\n    var M = months;\n    var D = days;\n    var h = hours;\n    var m = minutes;\n    var s = seconds ? seconds.toFixed(3).replace(/\\.?0+$/, '') : '';\n    var total = this.asSeconds();\n\n    if (!total) {\n        // this is the same as C#'s (Noda) and python (isodate)...\n        // but not other JS (goog.date)\n        return 'P0D';\n    }\n\n    var totalSign = total < 0 ? '-' : '';\n    var ymSign = sign(this._months) !== sign(total) ? '-' : '';\n    var daysSign = sign(this._days) !== sign(total) ? '-' : '';\n    var hmsSign = sign(this._milliseconds) !== sign(total) ? '-' : '';\n\n    return totalSign + 'P' +\n        (Y ? ymSign + Y + 'Y' : '') +\n        (M ? ymSign + M + 'M' : '') +\n        (D ? daysSign + D + 'D' : '') +\n        ((h || m || s) ? 'T' : '') +\n        (h ? hmsSign + h + 'H' : '') +\n        (m ? hmsSign + m + 'M' : '') +\n        (s ? hmsSign + s + 'S' : '');\n}\n\nvar proto$2 = Duration.prototype;\n\nproto$2.isValid        = isValid$1;\nproto$2.abs            = abs;\nproto$2.add            = add$1;\nproto$2.subtract       = subtract$1;\nproto$2.as             = as;\nproto$2.asMilliseconds = asMilliseconds;\nproto$2.asSeconds      = asSeconds;\nproto$2.asMinutes      = asMinutes;\nproto$2.asHours        = asHours;\nproto$2.asDays         = asDays;\nproto$2.asWeeks        = asWeeks;\nproto$2.asMonths       = asMonths;\nproto$2.asYears        = asYears;\nproto$2.valueOf        = valueOf$1;\nproto$2._bubble        = bubble;\nproto$2.clone          = clone$1;\nproto$2.get            = get$2;\nproto$2.milliseconds   = milliseconds;\nproto$2.seconds        = seconds;\nproto$2.minutes        = minutes;\nproto$2.hours          = hours;\nproto$2.days           = days;\nproto$2.weeks          = weeks;\nproto$2.months         = months;\nproto$2.years          = years;\nproto$2.humanize       = humanize;\nproto$2.toISOString    = toISOString$1;\nproto$2.toString       = toISOString$1;\nproto$2.toJSON         = toISOString$1;\nproto$2.locale         = locale;\nproto$2.localeData     = localeData;\n\n// Deprecations\nproto$2.toIsoString = deprecate('toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)', toISOString$1);\nproto$2.lang = lang;\n\n// Side effect imports\n\n// FORMATTING\n\naddFormatToken('X', 0, 0, 'unix');\naddFormatToken('x', 0, 0, 'valueOf');\n\n// PARSING\n\naddRegexToken('x', matchSigned);\naddRegexToken('X', matchTimestamp);\naddParseToken('X', function (input, array, config) {\n    config._d = new Date(parseFloat(input, 10) * 1000);\n});\naddParseToken('x', function (input, array, config) {\n    config._d = new Date(toInt(input));\n});\n\n// Side effect imports\n\n\nhooks.version = '2.20.1';\n\nsetHookCallback(createLocal);\n\nhooks.fn                    = proto;\nhooks.min                   = min;\nhooks.max                   = max;\nhooks.now                   = now;\nhooks.utc                   = createUTC;\nhooks.unix                  = createUnix;\nhooks.months                = listMonths;\nhooks.isDate                = isDate;\nhooks.locale                = getSetGlobalLocale;\nhooks.invalid               = createInvalid;\nhooks.duration              = createDuration;\nhooks.isMoment              = isMoment;\nhooks.weekdays              = listWeekdays;\nhooks.parseZone             = createInZone;\nhooks.localeData            = getLocale;\nhooks.isDuration            = isDuration;\nhooks.monthsShort           = listMonthsShort;\nhooks.weekdaysMin           = listWeekdaysMin;\nhooks.defineLocale          = defineLocale;\nhooks.updateLocale          = updateLocale;\nhooks.locales               = listLocales;\nhooks.weekdaysShort         = listWeekdaysShort;\nhooks.normalizeUnits        = normalizeUnits;\nhooks.relativeTimeRounding  = getSetRelativeTimeRounding;\nhooks.relativeTimeThreshold = getSetRelativeTimeThreshold;\nhooks.calendarFormat        = getCalendarFormat;\nhooks.prototype             = proto;\n\n// currently HTML5 input type only supports 24-hour formats\nhooks.HTML5_FMT = {\n    DATETIME_LOCAL: 'YYYY-MM-DDTHH:mm',             // <input type=\"datetime-local\" />\n    DATETIME_LOCAL_SECONDS: 'YYYY-MM-DDTHH:mm:ss',  // <input type=\"datetime-local\" step=\"1\" />\n    DATETIME_LOCAL_MS: 'YYYY-MM-DDTHH:mm:ss.SSS',   // <input type=\"datetime-local\" step=\"0.001\" />\n    DATE: 'YYYY-MM-DD',                             // <input type=\"date\" />\n    TIME: 'HH:mm',                                  // <input type=\"time\" />\n    TIME_SECONDS: 'HH:mm:ss',                       // <input type=\"time\" step=\"1\" />\n    TIME_MS: 'HH:mm:ss.SSS',                        // <input type=\"time\" step=\"0.001\" />\n    WEEK: 'YYYY-[W]WW',                             // <input type=\"week\" />\n    MONTH: 'YYYY-MM'                                // <input type=\"month\" />\n};\n\nreturn hooks;\n\n})));\n\n},{}],7:[function(require,module,exports){\n/**\n * @namespace Chart\n */\nvar Chart = require(29)();\n\nChart.helpers = require(45);\n\n// @todo dispatch these helpers into appropriated helpers/helpers.* file and write unit tests!\nrequire(27)(Chart);\n\nChart.defaults = require(25);\nChart.Element = require(26);\nChart.elements = require(40);\nChart.Interaction = require(28);\nChart.layouts = require(30);\nChart.platform = require(48);\nChart.plugins = require(31);\nChart.Ticks = require(34);\n\nrequire(22)(Chart);\nrequire(23)(Chart);\nrequire(24)(Chart);\nrequire(33)(Chart);\nrequire(32)(Chart);\nrequire(35)(Chart);\n\nrequire(55)(Chart);\nrequire(53)(Chart);\nrequire(54)(Chart);\nrequire(56)(Chart);\nrequire(57)(Chart);\nrequire(58)(Chart);\n\n// Controllers must be loaded after elements\n// See Chart.core.datasetController.dataElementType\nrequire(15)(Chart);\nrequire(16)(Chart);\nrequire(17)(Chart);\nrequire(18)(Chart);\nrequire(19)(Chart);\nrequire(20)(Chart);\nrequire(21)(Chart);\n\nrequire(8)(Chart);\nrequire(9)(Chart);\nrequire(10)(Chart);\nrequire(11)(Chart);\nrequire(12)(Chart);\nrequire(13)(Chart);\nrequire(14)(Chart);\n\n// Loading built-it plugins\nvar plugins = require(49);\nfor (var k in plugins) {\n\tif (plugins.hasOwnProperty(k)) {\n\t\tChart.plugins.register(plugins[k]);\n\t}\n}\n\nChart.platform.initialize();\n\nmodule.exports = Chart;\nif (typeof window !== 'undefined') {\n\twindow.Chart = Chart;\n}\n\n// DEPRECATIONS\n\n/**\n * Provided for backward compatibility, not available anymore\n * @namespace Chart.Legend\n * @deprecated since version 2.1.5\n * @todo remove at version 3\n * @private\n */\nChart.Legend = plugins.legend._element;\n\n/**\n * Provided for backward compatibility, not available anymore\n * @namespace Chart.Title\n * @deprecated since version 2.1.5\n * @todo remove at version 3\n * @private\n */\nChart.Title = plugins.title._element;\n\n/**\n * Provided for backward compatibility, use Chart.plugins instead\n * @namespace Chart.pluginService\n * @deprecated since version 2.1.5\n * @todo remove at version 3\n * @private\n */\nChart.pluginService = Chart.plugins;\n\n/**\n * Provided for backward compatibility, inheriting from Chart.PlugingBase has no\n * effect, instead simply create/register plugins via plain JavaScript objects.\n * @interface Chart.PluginBase\n * @deprecated since version 2.5.0\n * @todo remove at version 3\n * @private\n */\nChart.PluginBase = Chart.Element.extend({});\n\n/**\n * Provided for backward compatibility, use Chart.helpers.canvas instead.\n * @namespace Chart.canvasHelpers\n * @deprecated since version 2.6.0\n * @todo remove at version 3\n * @private\n */\nChart.canvasHelpers = Chart.helpers.canvas;\n\n/**\n * Provided for backward compatibility, use Chart.layouts instead.\n * @namespace Chart.layoutService\n * @deprecated since version 2.8.0\n * @todo remove at version 3\n * @private\n */\nChart.layoutService = Chart.layouts;\n\n},{\"10\":10,\"11\":11,\"12\":12,\"13\":13,\"14\":14,\"15\":15,\"16\":16,\"17\":17,\"18\":18,\"19\":19,\"20\":20,\"21\":21,\"22\":22,\"23\":23,\"24\":24,\"25\":25,\"26\":26,\"27\":27,\"28\":28,\"29\":29,\"30\":30,\"31\":31,\"32\":32,\"33\":33,\"34\":34,\"35\":35,\"40\":40,\"45\":45,\"48\":48,\"49\":49,\"53\":53,\"54\":54,\"55\":55,\"56\":56,\"57\":57,\"58\":58,\"8\":8,\"9\":9}],8:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.Bar = function(context, config) {\n\t\tconfig.type = 'bar';\n\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],9:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.Bubble = function(context, config) {\n\t\tconfig.type = 'bubble';\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],10:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.Doughnut = function(context, config) {\n\t\tconfig.type = 'doughnut';\n\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],11:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.Line = function(context, config) {\n\t\tconfig.type = 'line';\n\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],12:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.PolarArea = function(context, config) {\n\t\tconfig.type = 'polarArea';\n\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],13:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\tChart.Radar = function(context, config) {\n\t\tconfig.type = 'radar';\n\n\t\treturn new Chart(context, config);\n\t};\n\n};\n\n},{}],14:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\tChart.Scatter = function(context, config) {\n\t\tconfig.type = 'scatter';\n\t\treturn new Chart(context, config);\n\t};\n};\n\n},{}],15:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('bar', {\n\thover: {\n\t\tmode: 'label'\n\t},\n\n\tscales: {\n\t\txAxes: [{\n\t\t\ttype: 'category',\n\n\t\t\t// Specific to Bar Controller\n\t\t\tcategoryPercentage: 0.8,\n\t\t\tbarPercentage: 0.9,\n\n\t\t\t// offset settings\n\t\t\toffset: true,\n\n\t\t\t// grid line settings\n\t\t\tgridLines: {\n\t\t\t\toffsetGridLines: true\n\t\t\t}\n\t\t}],\n\n\t\tyAxes: [{\n\t\t\ttype: 'linear'\n\t\t}]\n\t}\n});\n\ndefaults._set('horizontalBar', {\n\thover: {\n\t\tmode: 'index',\n\t\taxis: 'y'\n\t},\n\n\tscales: {\n\t\txAxes: [{\n\t\t\ttype: 'linear',\n\t\t\tposition: 'bottom'\n\t\t}],\n\n\t\tyAxes: [{\n\t\t\tposition: 'left',\n\t\t\ttype: 'category',\n\n\t\t\t// Specific to Horizontal Bar Controller\n\t\t\tcategoryPercentage: 0.8,\n\t\t\tbarPercentage: 0.9,\n\n\t\t\t// offset settings\n\t\t\toffset: true,\n\n\t\t\t// grid line settings\n\t\t\tgridLines: {\n\t\t\t\toffsetGridLines: true\n\t\t\t}\n\t\t}]\n\t},\n\n\telements: {\n\t\trectangle: {\n\t\t\tborderSkipped: 'left'\n\t\t}\n\t},\n\n\ttooltips: {\n\t\tcallbacks: {\n\t\t\ttitle: function(item, data) {\n\t\t\t\t// Pick first xLabel for now\n\t\t\t\tvar title = '';\n\n\t\t\t\tif (item.length > 0) {\n\t\t\t\t\tif (item[0].yLabel) {\n\t\t\t\t\t\ttitle = item[0].yLabel;\n\t\t\t\t\t} else if (data.labels.length > 0 && item[0].index < data.labels.length) {\n\t\t\t\t\t\ttitle = data.labels[item[0].index];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn title;\n\t\t\t},\n\n\t\t\tlabel: function(item, data) {\n\t\t\t\tvar datasetLabel = data.datasets[item.datasetIndex].label || '';\n\t\t\t\treturn datasetLabel + ': ' + item.xLabel;\n\t\t\t}\n\t\t},\n\t\tmode: 'index',\n\t\taxis: 'y'\n\t}\n});\n\n/**\n * Computes the \"optimal\" sample size to maintain bars equally sized while preventing overlap.\n * @private\n */\nfunction computeMinSampleSize(scale, pixels) {\n\tvar min = scale.isHorizontal() ? scale.width : scale.height;\n\tvar ticks = scale.getTicks();\n\tvar prev, curr, i, ilen;\n\n\tfor (i = 1, ilen = pixels.length; i < ilen; ++i) {\n\t\tmin = Math.min(min, pixels[i] - pixels[i - 1]);\n\t}\n\n\tfor (i = 0, ilen = ticks.length; i < ilen; ++i) {\n\t\tcurr = scale.getPixelForTick(i);\n\t\tmin = i > 0 ? Math.min(min, curr - prev) : min;\n\t\tprev = curr;\n\t}\n\n\treturn min;\n}\n\n/**\n * Computes an \"ideal\" category based on the absolute bar thickness or, if undefined or null,\n * uses the smallest interval (see computeMinSampleSize) that prevents bar overlapping. This\n * mode currently always generates bars equally sized (until we introduce scriptable options?).\n * @private\n */\nfunction computeFitCategoryTraits(index, ruler, options) {\n\tvar thickness = options.barThickness;\n\tvar count = ruler.stackCount;\n\tvar curr = ruler.pixels[index];\n\tvar size, ratio;\n\n\tif (helpers.isNullOrUndef(thickness)) {\n\t\tsize = ruler.min * options.categoryPercentage;\n\t\tratio = options.barPercentage;\n\t} else {\n\t\t// When bar thickness is enforced, category and bar percentages are ignored.\n\t\t// Note(SB): we could add support for relative bar thickness (e.g. barThickness: '50%')\n\t\t// and deprecate barPercentage since this value is ignored when thickness is absolute.\n\t\tsize = thickness * count;\n\t\tratio = 1;\n\t}\n\n\treturn {\n\t\tchunk: size / count,\n\t\tratio: ratio,\n\t\tstart: curr - (size / 2)\n\t};\n}\n\n/**\n * Computes an \"optimal\" category that globally arranges bars side by side (no gap when\n * percentage options are 1), based on the previous and following categories. This mode\n * generates bars with different widths when data are not evenly spaced.\n * @private\n */\nfunction computeFlexCategoryTraits(index, ruler, options) {\n\tvar pixels = ruler.pixels;\n\tvar curr = pixels[index];\n\tvar prev = index > 0 ? pixels[index - 1] : null;\n\tvar next = index < pixels.length - 1 ? pixels[index + 1] : null;\n\tvar percent = options.categoryPercentage;\n\tvar start, size;\n\n\tif (prev === null) {\n\t\t// first data: its size is double based on the next point or,\n\t\t// if it's also the last data, we use the scale end extremity.\n\t\tprev = curr - (next === null ? ruler.end - curr : next - curr);\n\t}\n\n\tif (next === null) {\n\t\t// last data: its size is also double based on the previous point.\n\t\tnext = curr + curr - prev;\n\t}\n\n\tstart = curr - ((curr - prev) / 2) * percent;\n\tsize = ((next - prev) / 2) * percent;\n\n\treturn {\n\t\tchunk: size / ruler.stackCount,\n\t\tratio: options.barPercentage,\n\t\tstart: start\n\t};\n}\n\nmodule.exports = function(Chart) {\n\n\tChart.controllers.bar = Chart.DatasetController.extend({\n\n\t\tdataElementType: elements.Rectangle,\n\n\t\tinitialize: function() {\n\t\t\tvar me = this;\n\t\t\tvar meta;\n\n\t\t\tChart.DatasetController.prototype.initialize.apply(me, arguments);\n\n\t\t\tmeta = me.getMeta();\n\t\t\tmeta.stack = me.getDataset().stack;\n\t\t\tmeta.bar = true;\n\t\t},\n\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar rects = me.getMeta().data;\n\t\t\tvar i, ilen;\n\n\t\t\tme._ruler = me.getRuler();\n\n\t\t\tfor (i = 0, ilen = rects.length; i < ilen; ++i) {\n\t\t\t\tme.updateElement(rects[i], i, reset);\n\t\t\t}\n\t\t},\n\n\t\tupdateElement: function(rectangle, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar custom = rectangle.custom || {};\n\t\t\tvar rectangleOptions = chart.options.elements.rectangle;\n\n\t\t\trectangle._xScale = me.getScaleForId(meta.xAxisID);\n\t\t\trectangle._yScale = me.getScaleForId(meta.yAxisID);\n\t\t\trectangle._datasetIndex = me.index;\n\t\t\trectangle._index = index;\n\n\t\t\trectangle._model = {\n\t\t\t\tdatasetLabel: dataset.label,\n\t\t\t\tlabel: chart.data.labels[index],\n\t\t\t\tborderSkipped: custom.borderSkipped ? custom.borderSkipped : rectangleOptions.borderSkipped,\n\t\t\t\tbackgroundColor: custom.backgroundColor ? custom.backgroundColor : helpers.valueAtIndexOrDefault(dataset.backgroundColor, index, rectangleOptions.backgroundColor),\n\t\t\t\tborderColor: custom.borderColor ? custom.borderColor : helpers.valueAtIndexOrDefault(dataset.borderColor, index, rectangleOptions.borderColor),\n\t\t\t\tborderWidth: custom.borderWidth ? custom.borderWidth : helpers.valueAtIndexOrDefault(dataset.borderWidth, index, rectangleOptions.borderWidth)\n\t\t\t};\n\n\t\t\tme.updateElementGeometry(rectangle, index, reset);\n\n\t\t\trectangle.pivot();\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tupdateElementGeometry: function(rectangle, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar model = rectangle._model;\n\t\t\tvar vscale = me.getValueScale();\n\t\t\tvar base = vscale.getBasePixel();\n\t\t\tvar horizontal = vscale.isHorizontal();\n\t\t\tvar ruler = me._ruler || me.getRuler();\n\t\t\tvar vpixels = me.calculateBarValuePixels(me.index, index);\n\t\t\tvar ipixels = me.calculateBarIndexPixels(me.index, index, ruler);\n\n\t\t\tmodel.horizontal = horizontal;\n\t\t\tmodel.base = reset ? base : vpixels.base;\n\t\t\tmodel.x = horizontal ? reset ? base : vpixels.head : ipixels.center;\n\t\t\tmodel.y = horizontal ? ipixels.center : reset ? base : vpixels.head;\n\t\t\tmodel.height = horizontal ? ipixels.size : undefined;\n\t\t\tmodel.width = horizontal ? undefined : ipixels.size;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetValueScaleId: function() {\n\t\t\treturn this.getMeta().yAxisID;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetIndexScaleId: function() {\n\t\t\treturn this.getMeta().xAxisID;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetValueScale: function() {\n\t\t\treturn this.getScaleForId(this.getValueScaleId());\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetIndexScale: function() {\n\t\t\treturn this.getScaleForId(this.getIndexScaleId());\n\t\t},\n\n\t\t/**\n\t\t * Returns the stacks based on groups and bar visibility.\n\t\t * @param {Number} [last] - The dataset index\n\t\t * @returns {Array} The stack list\n\t\t * @private\n\t\t */\n\t\t_getStacks: function(last) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar scale = me.getIndexScale();\n\t\t\tvar stacked = scale.options.stacked;\n\t\t\tvar ilen = last === undefined ? chart.data.datasets.length : last + 1;\n\t\t\tvar stacks = [];\n\t\t\tvar i, meta;\n\n\t\t\tfor (i = 0; i < ilen; ++i) {\n\t\t\t\tmeta = chart.getDatasetMeta(i);\n\t\t\t\tif (meta.bar && chart.isDatasetVisible(i) &&\n\t\t\t\t\t(stacked === false ||\n\t\t\t\t\t(stacked === true && stacks.indexOf(meta.stack) === -1) ||\n\t\t\t\t\t(stacked === undefined && (meta.stack === undefined || stacks.indexOf(meta.stack) === -1)))) {\n\t\t\t\t\tstacks.push(meta.stack);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn stacks;\n\t\t},\n\n\t\t/**\n\t\t * Returns the effective number of stacks based on groups and bar visibility.\n\t\t * @private\n\t\t */\n\t\tgetStackCount: function() {\n\t\t\treturn this._getStacks().length;\n\t\t},\n\n\t\t/**\n\t\t * Returns the stack index for the given dataset based on groups and bar visibility.\n\t\t * @param {Number} [datasetIndex] - The dataset index\n\t\t * @param {String} [name] - The stack name to find\n\t\t * @returns {Number} The stack index\n\t\t * @private\n\t\t */\n\t\tgetStackIndex: function(datasetIndex, name) {\n\t\t\tvar stacks = this._getStacks(datasetIndex);\n\t\t\tvar index = (name !== undefined)\n\t\t\t\t? stacks.indexOf(name)\n\t\t\t\t: -1; // indexOf returns -1 if element is not present\n\n\t\t\treturn (index === -1)\n\t\t\t\t? stacks.length - 1\n\t\t\t\t: index;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetRuler: function() {\n\t\t\tvar me = this;\n\t\t\tvar scale = me.getIndexScale();\n\t\t\tvar stackCount = me.getStackCount();\n\t\t\tvar datasetIndex = me.index;\n\t\t\tvar isHorizontal = scale.isHorizontal();\n\t\t\tvar start = isHorizontal ? scale.left : scale.top;\n\t\t\tvar end = start + (isHorizontal ? scale.width : scale.height);\n\t\t\tvar pixels = [];\n\t\t\tvar i, ilen, min;\n\n\t\t\tfor (i = 0, ilen = me.getMeta().data.length; i < ilen; ++i) {\n\t\t\t\tpixels.push(scale.getPixelForValue(null, i, datasetIndex));\n\t\t\t}\n\n\t\t\tmin = helpers.isNullOrUndef(scale.options.barThickness)\n\t\t\t\t? computeMinSampleSize(scale, pixels)\n\t\t\t\t: -1;\n\n\t\t\treturn {\n\t\t\t\tmin: min,\n\t\t\t\tpixels: pixels,\n\t\t\t\tstart: start,\n\t\t\t\tend: end,\n\t\t\t\tstackCount: stackCount,\n\t\t\t\tscale: scale\n\t\t\t};\n\t\t},\n\n\t\t/**\n\t\t * Note: pixel values are not clamped to the scale area.\n\t\t * @private\n\t\t */\n\t\tcalculateBarValuePixels: function(datasetIndex, index) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar scale = me.getValueScale();\n\t\t\tvar datasets = chart.data.datasets;\n\t\t\tvar value = scale.getRightValue(datasets[datasetIndex].data[index]);\n\t\t\tvar stacked = scale.options.stacked;\n\t\t\tvar stack = meta.stack;\n\t\t\tvar start = 0;\n\t\t\tvar i, imeta, ivalue, base, head, size;\n\n\t\t\tif (stacked || (stacked === undefined && stack !== undefined)) {\n\t\t\t\tfor (i = 0; i < datasetIndex; ++i) {\n\t\t\t\t\timeta = chart.getDatasetMeta(i);\n\n\t\t\t\t\tif (imeta.bar &&\n\t\t\t\t\t\timeta.stack === stack &&\n\t\t\t\t\t\timeta.controller.getValueScaleId() === scale.id &&\n\t\t\t\t\t\tchart.isDatasetVisible(i)) {\n\n\t\t\t\t\t\tivalue = scale.getRightValue(datasets[i].data[index]);\n\t\t\t\t\t\tif ((value < 0 && ivalue < 0) || (value >= 0 && ivalue > 0)) {\n\t\t\t\t\t\t\tstart += ivalue;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbase = scale.getPixelForValue(start);\n\t\t\thead = scale.getPixelForValue(start + value);\n\t\t\tsize = (head - base) / 2;\n\n\t\t\treturn {\n\t\t\t\tsize: size,\n\t\t\t\tbase: base,\n\t\t\t\thead: head,\n\t\t\t\tcenter: head + size / 2\n\t\t\t};\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tcalculateBarIndexPixels: function(datasetIndex, index, ruler) {\n\t\t\tvar me = this;\n\t\t\tvar options = ruler.scale.options;\n\t\t\tvar range = options.barThickness === 'flex'\n\t\t\t\t? computeFlexCategoryTraits(index, ruler, options)\n\t\t\t\t: computeFitCategoryTraits(index, ruler, options);\n\n\t\t\tvar stackIndex = me.getStackIndex(datasetIndex, me.getMeta().stack);\n\t\t\tvar center = range.start + (range.chunk * stackIndex) + (range.chunk / 2);\n\t\t\tvar size = Math.min(\n\t\t\t\thelpers.valueOrDefault(options.maxBarThickness, Infinity),\n\t\t\t\trange.chunk * range.ratio);\n\n\t\t\treturn {\n\t\t\t\tbase: center - size / 2,\n\t\t\t\thead: center + size / 2,\n\t\t\t\tcenter: center,\n\t\t\t\tsize: size\n\t\t\t};\n\t\t},\n\n\t\tdraw: function() {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar scale = me.getValueScale();\n\t\t\tvar rects = me.getMeta().data;\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar ilen = rects.length;\n\t\t\tvar i = 0;\n\n\t\t\thelpers.canvas.clipArea(chart.ctx, chart.chartArea);\n\n\t\t\tfor (; i < ilen; ++i) {\n\t\t\t\tif (!isNaN(scale.getRightValue(dataset.data[i]))) {\n\t\t\t\t\trects[i].draw();\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thelpers.canvas.unclipArea(chart.ctx);\n\t\t},\n\n\t\tsetHoverStyle: function(rectangle) {\n\t\t\tvar dataset = this.chart.data.datasets[rectangle._datasetIndex];\n\t\t\tvar index = rectangle._index;\n\t\t\tvar custom = rectangle.custom || {};\n\t\t\tvar model = rectangle._model;\n\n\t\t\tmodel.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : helpers.valueAtIndexOrDefault(dataset.hoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));\n\t\t\tmodel.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : helpers.valueAtIndexOrDefault(dataset.hoverBorderColor, index, helpers.getHoverColor(model.borderColor));\n\t\t\tmodel.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : helpers.valueAtIndexOrDefault(dataset.hoverBorderWidth, index, model.borderWidth);\n\t\t},\n\n\t\tremoveHoverStyle: function(rectangle) {\n\t\t\tvar dataset = this.chart.data.datasets[rectangle._datasetIndex];\n\t\t\tvar index = rectangle._index;\n\t\t\tvar custom = rectangle.custom || {};\n\t\t\tvar model = rectangle._model;\n\t\t\tvar rectangleElementOptions = this.chart.options.elements.rectangle;\n\n\t\t\tmodel.backgroundColor = custom.backgroundColor ? custom.backgroundColor : helpers.valueAtIndexOrDefault(dataset.backgroundColor, index, rectangleElementOptions.backgroundColor);\n\t\t\tmodel.borderColor = custom.borderColor ? custom.borderColor : helpers.valueAtIndexOrDefault(dataset.borderColor, index, rectangleElementOptions.borderColor);\n\t\t\tmodel.borderWidth = custom.borderWidth ? custom.borderWidth : helpers.valueAtIndexOrDefault(dataset.borderWidth, index, rectangleElementOptions.borderWidth);\n\t\t}\n\t});\n\n\tChart.controllers.horizontalBar = Chart.controllers.bar.extend({\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetValueScaleId: function() {\n\t\t\treturn this.getMeta().xAxisID;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetIndexScaleId: function() {\n\t\t\treturn this.getMeta().yAxisID;\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],16:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('bubble', {\n\thover: {\n\t\tmode: 'single'\n\t},\n\n\tscales: {\n\t\txAxes: [{\n\t\t\ttype: 'linear', // bubble should probably use a linear scale by default\n\t\t\tposition: 'bottom',\n\t\t\tid: 'x-axis-0' // need an ID so datasets can reference the scale\n\t\t}],\n\t\tyAxes: [{\n\t\t\ttype: 'linear',\n\t\t\tposition: 'left',\n\t\t\tid: 'y-axis-0'\n\t\t}]\n\t},\n\n\ttooltips: {\n\t\tcallbacks: {\n\t\t\ttitle: function() {\n\t\t\t\t// Title doesn't make sense for scatter since we format the data as a point\n\t\t\t\treturn '';\n\t\t\t},\n\t\t\tlabel: function(item, data) {\n\t\t\t\tvar datasetLabel = data.datasets[item.datasetIndex].label || '';\n\t\t\t\tvar dataPoint = data.datasets[item.datasetIndex].data[item.index];\n\t\t\t\treturn datasetLabel + ': (' + item.xLabel + ', ' + item.yLabel + ', ' + dataPoint.r + ')';\n\t\t\t}\n\t\t}\n\t}\n});\n\n\nmodule.exports = function(Chart) {\n\n\tChart.controllers.bubble = Chart.DatasetController.extend({\n\t\t/**\n\t\t * @protected\n\t\t */\n\t\tdataElementType: elements.Point,\n\n\t\t/**\n\t\t * @protected\n\t\t */\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar points = meta.data;\n\n\t\t\t// Update Points\n\t\t\thelpers.each(points, function(point, index) {\n\t\t\t\tme.updateElement(point, index, reset);\n\t\t\t});\n\t\t},\n\n\t\t/**\n\t\t * @protected\n\t\t */\n\t\tupdateElement: function(point, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar xScale = me.getScaleForId(meta.xAxisID);\n\t\t\tvar yScale = me.getScaleForId(meta.yAxisID);\n\t\t\tvar options = me._resolveElementOptions(point, index);\n\t\t\tvar data = me.getDataset().data[index];\n\t\t\tvar dsIndex = me.index;\n\n\t\t\tvar x = reset ? xScale.getPixelForDecimal(0.5) : xScale.getPixelForValue(typeof data === 'object' ? data : NaN, index, dsIndex);\n\t\t\tvar y = reset ? yScale.getBasePixel() : yScale.getPixelForValue(data, index, dsIndex);\n\n\t\t\tpoint._xScale = xScale;\n\t\t\tpoint._yScale = yScale;\n\t\t\tpoint._options = options;\n\t\t\tpoint._datasetIndex = dsIndex;\n\t\t\tpoint._index = index;\n\t\t\tpoint._model = {\n\t\t\t\tbackgroundColor: options.backgroundColor,\n\t\t\t\tborderColor: options.borderColor,\n\t\t\t\tborderWidth: options.borderWidth,\n\t\t\t\thitRadius: options.hitRadius,\n\t\t\t\tpointStyle: options.pointStyle,\n\t\t\t\tradius: reset ? 0 : options.radius,\n\t\t\t\tskip: custom.skip || isNaN(x) || isNaN(y),\n\t\t\t\tx: x,\n\t\t\t\ty: y,\n\t\t\t};\n\n\t\t\tpoint.pivot();\n\t\t},\n\n\t\t/**\n\t\t * @protected\n\t\t */\n\t\tsetHoverStyle: function(point) {\n\t\t\tvar model = point._model;\n\t\t\tvar options = point._options;\n\n\t\t\tmodel.backgroundColor = helpers.valueOrDefault(options.hoverBackgroundColor, helpers.getHoverColor(options.backgroundColor));\n\t\t\tmodel.borderColor = helpers.valueOrDefault(options.hoverBorderColor, helpers.getHoverColor(options.borderColor));\n\t\t\tmodel.borderWidth = helpers.valueOrDefault(options.hoverBorderWidth, options.borderWidth);\n\t\t\tmodel.radius = options.radius + options.hoverRadius;\n\t\t},\n\n\t\t/**\n\t\t * @protected\n\t\t */\n\t\tremoveHoverStyle: function(point) {\n\t\t\tvar model = point._model;\n\t\t\tvar options = point._options;\n\n\t\t\tmodel.backgroundColor = options.backgroundColor;\n\t\t\tmodel.borderColor = options.borderColor;\n\t\t\tmodel.borderWidth = options.borderWidth;\n\t\t\tmodel.radius = options.radius;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\t_resolveElementOptions: function(point, index) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar datasets = chart.data.datasets;\n\t\t\tvar dataset = datasets[me.index];\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar options = chart.options.elements.point;\n\t\t\tvar resolve = helpers.options.resolve;\n\t\t\tvar data = dataset.data[index];\n\t\t\tvar values = {};\n\t\t\tvar i, ilen, key;\n\n\t\t\t// Scriptable options\n\t\t\tvar context = {\n\t\t\t\tchart: chart,\n\t\t\t\tdataIndex: index,\n\t\t\t\tdataset: dataset,\n\t\t\t\tdatasetIndex: me.index\n\t\t\t};\n\n\t\t\tvar keys = [\n\t\t\t\t'backgroundColor',\n\t\t\t\t'borderColor',\n\t\t\t\t'borderWidth',\n\t\t\t\t'hoverBackgroundColor',\n\t\t\t\t'hoverBorderColor',\n\t\t\t\t'hoverBorderWidth',\n\t\t\t\t'hoverRadius',\n\t\t\t\t'hitRadius',\n\t\t\t\t'pointStyle'\n\t\t\t];\n\n\t\t\tfor (i = 0, ilen = keys.length; i < ilen; ++i) {\n\t\t\t\tkey = keys[i];\n\t\t\t\tvalues[key] = resolve([\n\t\t\t\t\tcustom[key],\n\t\t\t\t\tdataset[key],\n\t\t\t\t\toptions[key]\n\t\t\t\t], context, index);\n\t\t\t}\n\n\t\t\t// Custom radius resolution\n\t\t\tvalues.radius = resolve([\n\t\t\t\tcustom.radius,\n\t\t\t\tdata ? data.r : undefined,\n\t\t\t\tdataset.radius,\n\t\t\t\toptions.radius\n\t\t\t], context, index);\n\n\t\t\treturn values;\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],17:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('doughnut', {\n\tanimation: {\n\t\t// Boolean - Whether we animate the rotation of the Doughnut\n\t\tanimateRotate: true,\n\t\t// Boolean - Whether we animate scaling the Doughnut from the centre\n\t\tanimateScale: false\n\t},\n\thover: {\n\t\tmode: 'single'\n\t},\n\tlegendCallback: function(chart) {\n\t\tvar text = [];\n\t\ttext.push('<ul class=\"' + chart.id + '-legend\">');\n\n\t\tvar data = chart.data;\n\t\tvar datasets = data.datasets;\n\t\tvar labels = data.labels;\n\n\t\tif (datasets.length) {\n\t\t\tfor (var i = 0; i < datasets[0].data.length; ++i) {\n\t\t\t\ttext.push('<li><span style=\"background-color:' + datasets[0].backgroundColor[i] + '\"></span>');\n\t\t\t\tif (labels[i]) {\n\t\t\t\t\ttext.push(labels[i]);\n\t\t\t\t}\n\t\t\t\ttext.push('</li>');\n\t\t\t}\n\t\t}\n\n\t\ttext.push('</ul>');\n\t\treturn text.join('');\n\t},\n\tlegend: {\n\t\tlabels: {\n\t\t\tgenerateLabels: function(chart) {\n\t\t\t\tvar data = chart.data;\n\t\t\t\tif (data.labels.length && data.datasets.length) {\n\t\t\t\t\treturn data.labels.map(function(label, i) {\n\t\t\t\t\t\tvar meta = chart.getDatasetMeta(0);\n\t\t\t\t\t\tvar ds = data.datasets[0];\n\t\t\t\t\t\tvar arc = meta.data[i];\n\t\t\t\t\t\tvar custom = arc && arc.custom || {};\n\t\t\t\t\t\tvar valueAtIndexOrDefault = helpers.valueAtIndexOrDefault;\n\t\t\t\t\t\tvar arcOpts = chart.options.elements.arc;\n\t\t\t\t\t\tvar fill = custom.backgroundColor ? custom.backgroundColor : valueAtIndexOrDefault(ds.backgroundColor, i, arcOpts.backgroundColor);\n\t\t\t\t\t\tvar stroke = custom.borderColor ? custom.borderColor : valueAtIndexOrDefault(ds.borderColor, i, arcOpts.borderColor);\n\t\t\t\t\t\tvar bw = custom.borderWidth ? custom.borderWidth : valueAtIndexOrDefault(ds.borderWidth, i, arcOpts.borderWidth);\n\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttext: label,\n\t\t\t\t\t\t\tfillStyle: fill,\n\t\t\t\t\t\t\tstrokeStyle: stroke,\n\t\t\t\t\t\t\tlineWidth: bw,\n\t\t\t\t\t\t\thidden: isNaN(ds.data[i]) || meta.data[i].hidden,\n\n\t\t\t\t\t\t\t// Extra data used for toggling the correct item\n\t\t\t\t\t\t\tindex: i\n\t\t\t\t\t\t};\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\treturn [];\n\t\t\t}\n\t\t},\n\n\t\tonClick: function(e, legendItem) {\n\t\t\tvar index = legendItem.index;\n\t\t\tvar chart = this.chart;\n\t\t\tvar i, ilen, meta;\n\n\t\t\tfor (i = 0, ilen = (chart.data.datasets || []).length; i < ilen; ++i) {\n\t\t\t\tmeta = chart.getDatasetMeta(i);\n\t\t\t\t// toggle visibility of index if exists\n\t\t\t\tif (meta.data[index]) {\n\t\t\t\t\tmeta.data[index].hidden = !meta.data[index].hidden;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tchart.update();\n\t\t}\n\t},\n\n\t// The percentage of the chart that we cut out of the middle.\n\tcutoutPercentage: 50,\n\n\t// The rotation of the chart, where the first data arc begins.\n\trotation: Math.PI * -0.5,\n\n\t// The total circumference of the chart.\n\tcircumference: Math.PI * 2.0,\n\n\t// Need to override these to give a nice default\n\ttooltips: {\n\t\tcallbacks: {\n\t\t\ttitle: function() {\n\t\t\t\treturn '';\n\t\t\t},\n\t\t\tlabel: function(tooltipItem, data) {\n\t\t\t\tvar dataLabel = data.labels[tooltipItem.index];\n\t\t\t\tvar value = ': ' + data.datasets[tooltipItem.datasetIndex].data[tooltipItem.index];\n\n\t\t\t\tif (helpers.isArray(dataLabel)) {\n\t\t\t\t\t// show value on first line of multiline label\n\t\t\t\t\t// need to clone because we are changing the value\n\t\t\t\t\tdataLabel = dataLabel.slice();\n\t\t\t\t\tdataLabel[0] += value;\n\t\t\t\t} else {\n\t\t\t\t\tdataLabel += value;\n\t\t\t\t}\n\n\t\t\t\treturn dataLabel;\n\t\t\t}\n\t\t}\n\t}\n});\n\ndefaults._set('pie', helpers.clone(defaults.doughnut));\ndefaults._set('pie', {\n\tcutoutPercentage: 0\n});\n\nmodule.exports = function(Chart) {\n\n\tChart.controllers.doughnut = Chart.controllers.pie = Chart.DatasetController.extend({\n\n\t\tdataElementType: elements.Arc,\n\n\t\tlinkScales: helpers.noop,\n\n\t\t// Get index of the dataset in relation to the visible datasets. This allows determining the inner and outer radius correctly\n\t\tgetRingIndex: function(datasetIndex) {\n\t\t\tvar ringIndex = 0;\n\n\t\t\tfor (var j = 0; j < datasetIndex; ++j) {\n\t\t\t\tif (this.chart.isDatasetVisible(j)) {\n\t\t\t\t\t++ringIndex;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ringIndex;\n\t\t},\n\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar chartArea = chart.chartArea;\n\t\t\tvar opts = chart.options;\n\t\t\tvar arcOpts = opts.elements.arc;\n\t\t\tvar availableWidth = chartArea.right - chartArea.left - arcOpts.borderWidth;\n\t\t\tvar availableHeight = chartArea.bottom - chartArea.top - arcOpts.borderWidth;\n\t\t\tvar minSize = Math.min(availableWidth, availableHeight);\n\t\t\tvar offset = {x: 0, y: 0};\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar cutoutPercentage = opts.cutoutPercentage;\n\t\t\tvar circumference = opts.circumference;\n\n\t\t\t// If the chart's circumference isn't a full circle, calculate minSize as a ratio of the width/height of the arc\n\t\t\tif (circumference < Math.PI * 2.0) {\n\t\t\t\tvar startAngle = opts.rotation % (Math.PI * 2.0);\n\t\t\t\tstartAngle += Math.PI * 2.0 * (startAngle >= Math.PI ? -1 : startAngle < -Math.PI ? 1 : 0);\n\t\t\t\tvar endAngle = startAngle + circumference;\n\t\t\t\tvar start = {x: Math.cos(startAngle), y: Math.sin(startAngle)};\n\t\t\t\tvar end = {x: Math.cos(endAngle), y: Math.sin(endAngle)};\n\t\t\t\tvar contains0 = (startAngle <= 0 && endAngle >= 0) || (startAngle <= Math.PI * 2.0 && Math.PI * 2.0 <= endAngle);\n\t\t\t\tvar contains90 = (startAngle <= Math.PI * 0.5 && Math.PI * 0.5 <= endAngle) || (startAngle <= Math.PI * 2.5 && Math.PI * 2.5 <= endAngle);\n\t\t\t\tvar contains180 = (startAngle <= -Math.PI && -Math.PI <= endAngle) || (startAngle <= Math.PI && Math.PI <= endAngle);\n\t\t\t\tvar contains270 = (startAngle <= -Math.PI * 0.5 && -Math.PI * 0.5 <= endAngle) || (startAngle <= Math.PI * 1.5 && Math.PI * 1.5 <= endAngle);\n\t\t\t\tvar cutout = cutoutPercentage / 100.0;\n\t\t\t\tvar min = {x: contains180 ? -1 : Math.min(start.x * (start.x < 0 ? 1 : cutout), end.x * (end.x < 0 ? 1 : cutout)), y: contains270 ? -1 : Math.min(start.y * (start.y < 0 ? 1 : cutout), end.y * (end.y < 0 ? 1 : cutout))};\n\t\t\t\tvar max = {x: contains0 ? 1 : Math.max(start.x * (start.x > 0 ? 1 : cutout), end.x * (end.x > 0 ? 1 : cutout)), y: contains90 ? 1 : Math.max(start.y * (start.y > 0 ? 1 : cutout), end.y * (end.y > 0 ? 1 : cutout))};\n\t\t\t\tvar size = {width: (max.x - min.x) * 0.5, height: (max.y - min.y) * 0.5};\n\t\t\t\tminSize = Math.min(availableWidth / size.width, availableHeight / size.height);\n\t\t\t\toffset = {x: (max.x + min.x) * -0.5, y: (max.y + min.y) * -0.5};\n\t\t\t}\n\n\t\t\tchart.borderWidth = me.getMaxBorderWidth(meta.data);\n\t\t\tchart.outerRadius = Math.max((minSize - chart.borderWidth) / 2, 0);\n\t\t\tchart.innerRadius = Math.max(cutoutPercentage ? (chart.outerRadius / 100) * (cutoutPercentage) : 0, 0);\n\t\t\tchart.radiusLength = (chart.outerRadius - chart.innerRadius) / chart.getVisibleDatasetCount();\n\t\t\tchart.offsetX = offset.x * chart.outerRadius;\n\t\t\tchart.offsetY = offset.y * chart.outerRadius;\n\n\t\t\tmeta.total = me.calculateTotal();\n\n\t\t\tme.outerRadius = chart.outerRadius - (chart.radiusLength * me.getRingIndex(me.index));\n\t\t\tme.innerRadius = Math.max(me.outerRadius - chart.radiusLength, 0);\n\n\t\t\thelpers.each(meta.data, function(arc, index) {\n\t\t\t\tme.updateElement(arc, index, reset);\n\t\t\t});\n\t\t},\n\n\t\tupdateElement: function(arc, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar chartArea = chart.chartArea;\n\t\t\tvar opts = chart.options;\n\t\t\tvar animationOpts = opts.animation;\n\t\t\tvar centerX = (chartArea.left + chartArea.right) / 2;\n\t\t\tvar centerY = (chartArea.top + chartArea.bottom) / 2;\n\t\t\tvar startAngle = opts.rotation; // non reset case handled later\n\t\t\tvar endAngle = opts.rotation; // non reset case handled later\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar circumference = reset && animationOpts.animateRotate ? 0 : arc.hidden ? 0 : me.calculateCircumference(dataset.data[index]) * (opts.circumference / (2.0 * Math.PI));\n\t\t\tvar innerRadius = reset && animationOpts.animateScale ? 0 : me.innerRadius;\n\t\t\tvar outerRadius = reset && animationOpts.animateScale ? 0 : me.outerRadius;\n\t\t\tvar valueAtIndexOrDefault = helpers.valueAtIndexOrDefault;\n\n\t\t\thelpers.extend(arc, {\n\t\t\t\t// Utility\n\t\t\t\t_datasetIndex: me.index,\n\t\t\t\t_index: index,\n\n\t\t\t\t// Desired view properties\n\t\t\t\t_model: {\n\t\t\t\t\tx: centerX + chart.offsetX,\n\t\t\t\t\ty: centerY + chart.offsetY,\n\t\t\t\t\tstartAngle: startAngle,\n\t\t\t\t\tendAngle: endAngle,\n\t\t\t\t\tcircumference: circumference,\n\t\t\t\t\touterRadius: outerRadius,\n\t\t\t\t\tinnerRadius: innerRadius,\n\t\t\t\t\tlabel: valueAtIndexOrDefault(dataset.label, index, chart.data.labels[index])\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tvar model = arc._model;\n\t\t\t// Resets the visual styles\n\t\t\tthis.removeHoverStyle(arc);\n\n\t\t\t// Set correct angles if not resetting\n\t\t\tif (!reset || !animationOpts.animateRotate) {\n\t\t\t\tif (index === 0) {\n\t\t\t\t\tmodel.startAngle = opts.rotation;\n\t\t\t\t} else {\n\t\t\t\t\tmodel.startAngle = me.getMeta().data[index - 1]._model.endAngle;\n\t\t\t\t}\n\n\t\t\t\tmodel.endAngle = model.startAngle + model.circumference;\n\t\t\t}\n\n\t\t\tarc.pivot();\n\t\t},\n\n\t\tremoveHoverStyle: function(arc) {\n\t\t\tChart.DatasetController.prototype.removeHoverStyle.call(this, arc, this.chart.options.elements.arc);\n\t\t},\n\n\t\tcalculateTotal: function() {\n\t\t\tvar dataset = this.getDataset();\n\t\t\tvar meta = this.getMeta();\n\t\t\tvar total = 0;\n\t\t\tvar value;\n\n\t\t\thelpers.each(meta.data, function(element, index) {\n\t\t\t\tvalue = dataset.data[index];\n\t\t\t\tif (!isNaN(value) && !element.hidden) {\n\t\t\t\t\ttotal += Math.abs(value);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t/* if (total === 0) {\n\t\t\t\ttotal = NaN;\n\t\t\t}*/\n\n\t\t\treturn total;\n\t\t},\n\n\t\tcalculateCircumference: function(value) {\n\t\t\tvar total = this.getMeta().total;\n\t\t\tif (total > 0 && !isNaN(value)) {\n\t\t\t\treturn (Math.PI * 2.0) * (Math.abs(value) / total);\n\t\t\t}\n\t\t\treturn 0;\n\t\t},\n\n\t\t// gets the max border or hover width to properly scale pie charts\n\t\tgetMaxBorderWidth: function(arcs) {\n\t\t\tvar max = 0;\n\t\t\tvar index = this.index;\n\t\t\tvar length = arcs.length;\n\t\t\tvar borderWidth;\n\t\t\tvar hoverWidth;\n\n\t\t\tfor (var i = 0; i < length; i++) {\n\t\t\t\tborderWidth = arcs[i]._model ? arcs[i]._model.borderWidth : 0;\n\t\t\t\thoverWidth = arcs[i]._chart ? arcs[i]._chart.config.data.datasets[index].hoverBorderWidth : 0;\n\n\t\t\t\tmax = borderWidth > max ? borderWidth : max;\n\t\t\t\tmax = hoverWidth > max ? hoverWidth : max;\n\t\t\t}\n\t\t\treturn max;\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],18:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('line', {\n\tshowLines: true,\n\tspanGaps: false,\n\n\thover: {\n\t\tmode: 'label'\n\t},\n\n\tscales: {\n\t\txAxes: [{\n\t\t\ttype: 'category',\n\t\t\tid: 'x-axis-0'\n\t\t}],\n\t\tyAxes: [{\n\t\t\ttype: 'linear',\n\t\t\tid: 'y-axis-0'\n\t\t}]\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\tfunction lineEnabled(dataset, options) {\n\t\treturn helpers.valueOrDefault(dataset.showLine, options.showLines);\n\t}\n\n\tChart.controllers.line = Chart.DatasetController.extend({\n\n\t\tdatasetElementType: elements.Line,\n\n\t\tdataElementType: elements.Point,\n\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar line = meta.dataset;\n\t\t\tvar points = meta.data || [];\n\t\t\tvar options = me.chart.options;\n\t\t\tvar lineElementOptions = options.elements.line;\n\t\t\tvar scale = me.getScaleForId(meta.yAxisID);\n\t\t\tvar i, ilen, custom;\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar showLine = lineEnabled(dataset, options);\n\n\t\t\t// Update Line\n\t\t\tif (showLine) {\n\t\t\t\tcustom = line.custom || {};\n\n\t\t\t\t// Compatibility: If the properties are defined with only the old name, use those values\n\t\t\t\tif ((dataset.tension !== undefined) && (dataset.lineTension === undefined)) {\n\t\t\t\t\tdataset.lineTension = dataset.tension;\n\t\t\t\t}\n\n\t\t\t\t// Utility\n\t\t\t\tline._scale = scale;\n\t\t\t\tline._datasetIndex = me.index;\n\t\t\t\t// Data\n\t\t\t\tline._children = points;\n\t\t\t\t// Model\n\t\t\t\tline._model = {\n\t\t\t\t\t// Appearance\n\t\t\t\t\t// The default behavior of lines is to break at null values, according\n\t\t\t\t\t// to https://github.com/chartjs/Chart.js/issues/2435#issuecomment-216718158\n\t\t\t\t\t// This option gives lines the ability to span gaps\n\t\t\t\t\tspanGaps: dataset.spanGaps ? dataset.spanGaps : options.spanGaps,\n\t\t\t\t\ttension: custom.tension ? custom.tension : helpers.valueOrDefault(dataset.lineTension, lineElementOptions.tension),\n\t\t\t\t\tbackgroundColor: custom.backgroundColor ? custom.backgroundColor : (dataset.backgroundColor || lineElementOptions.backgroundColor),\n\t\t\t\t\tborderWidth: custom.borderWidth ? custom.borderWidth : (dataset.borderWidth || lineElementOptions.borderWidth),\n\t\t\t\t\tborderColor: custom.borderColor ? custom.borderColor : (dataset.borderColor || lineElementOptions.borderColor),\n\t\t\t\t\tborderCapStyle: custom.borderCapStyle ? custom.borderCapStyle : (dataset.borderCapStyle || lineElementOptions.borderCapStyle),\n\t\t\t\t\tborderDash: custom.borderDash ? custom.borderDash : (dataset.borderDash || lineElementOptions.borderDash),\n\t\t\t\t\tborderDashOffset: custom.borderDashOffset ? custom.borderDashOffset : (dataset.borderDashOffset || lineElementOptions.borderDashOffset),\n\t\t\t\t\tborderJoinStyle: custom.borderJoinStyle ? custom.borderJoinStyle : (dataset.borderJoinStyle || lineElementOptions.borderJoinStyle),\n\t\t\t\t\tfill: custom.fill ? custom.fill : (dataset.fill !== undefined ? dataset.fill : lineElementOptions.fill),\n\t\t\t\t\tsteppedLine: custom.steppedLine ? custom.steppedLine : helpers.valueOrDefault(dataset.steppedLine, lineElementOptions.stepped),\n\t\t\t\t\tcubicInterpolationMode: custom.cubicInterpolationMode ? custom.cubicInterpolationMode : helpers.valueOrDefault(dataset.cubicInterpolationMode, lineElementOptions.cubicInterpolationMode),\n\t\t\t\t};\n\n\t\t\t\tline.pivot();\n\t\t\t}\n\n\t\t\t// Update Points\n\t\t\tfor (i = 0, ilen = points.length; i < ilen; ++i) {\n\t\t\t\tme.updateElement(points[i], i, reset);\n\t\t\t}\n\n\t\t\tif (showLine && line._model.tension !== 0) {\n\t\t\t\tme.updateBezierControlPoints();\n\t\t\t}\n\n\t\t\t// Now pivot the point for animation\n\t\t\tfor (i = 0, ilen = points.length; i < ilen; ++i) {\n\t\t\t\tpoints[i].pivot();\n\t\t\t}\n\t\t},\n\n\t\tgetPointBackgroundColor: function(point, index) {\n\t\t\tvar backgroundColor = this.chart.options.elements.point.backgroundColor;\n\t\t\tvar dataset = this.getDataset();\n\t\t\tvar custom = point.custom || {};\n\n\t\t\tif (custom.backgroundColor) {\n\t\t\t\tbackgroundColor = custom.backgroundColor;\n\t\t\t} else if (dataset.pointBackgroundColor) {\n\t\t\t\tbackgroundColor = helpers.valueAtIndexOrDefault(dataset.pointBackgroundColor, index, backgroundColor);\n\t\t\t} else if (dataset.backgroundColor) {\n\t\t\t\tbackgroundColor = dataset.backgroundColor;\n\t\t\t}\n\n\t\t\treturn backgroundColor;\n\t\t},\n\n\t\tgetPointBorderColor: function(point, index) {\n\t\t\tvar borderColor = this.chart.options.elements.point.borderColor;\n\t\t\tvar dataset = this.getDataset();\n\t\t\tvar custom = point.custom || {};\n\n\t\t\tif (custom.borderColor) {\n\t\t\t\tborderColor = custom.borderColor;\n\t\t\t} else if (dataset.pointBorderColor) {\n\t\t\t\tborderColor = helpers.valueAtIndexOrDefault(dataset.pointBorderColor, index, borderColor);\n\t\t\t} else if (dataset.borderColor) {\n\t\t\t\tborderColor = dataset.borderColor;\n\t\t\t}\n\n\t\t\treturn borderColor;\n\t\t},\n\n\t\tgetPointBorderWidth: function(point, index) {\n\t\t\tvar borderWidth = this.chart.options.elements.point.borderWidth;\n\t\t\tvar dataset = this.getDataset();\n\t\t\tvar custom = point.custom || {};\n\n\t\t\tif (!isNaN(custom.borderWidth)) {\n\t\t\t\tborderWidth = custom.borderWidth;\n\t\t\t} else if (!isNaN(dataset.pointBorderWidth) || helpers.isArray(dataset.pointBorderWidth)) {\n\t\t\t\tborderWidth = helpers.valueAtIndexOrDefault(dataset.pointBorderWidth, index, borderWidth);\n\t\t\t} else if (!isNaN(dataset.borderWidth)) {\n\t\t\t\tborderWidth = dataset.borderWidth;\n\t\t\t}\n\n\t\t\treturn borderWidth;\n\t\t},\n\n\t\tupdateElement: function(point, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar datasetIndex = me.index;\n\t\t\tvar value = dataset.data[index];\n\t\t\tvar yScale = me.getScaleForId(meta.yAxisID);\n\t\t\tvar xScale = me.getScaleForId(meta.xAxisID);\n\t\t\tvar pointOptions = me.chart.options.elements.point;\n\t\t\tvar x, y;\n\n\t\t\t// Compatibility: If the properties are defined with only the old name, use those values\n\t\t\tif ((dataset.radius !== undefined) && (dataset.pointRadius === undefined)) {\n\t\t\t\tdataset.pointRadius = dataset.radius;\n\t\t\t}\n\t\t\tif ((dataset.hitRadius !== undefined) && (dataset.pointHitRadius === undefined)) {\n\t\t\t\tdataset.pointHitRadius = dataset.hitRadius;\n\t\t\t}\n\n\t\t\tx = xScale.getPixelForValue(typeof value === 'object' ? value : NaN, index, datasetIndex);\n\t\t\ty = reset ? yScale.getBasePixel() : me.calculatePointY(value, index, datasetIndex);\n\n\t\t\t// Utility\n\t\t\tpoint._xScale = xScale;\n\t\t\tpoint._yScale = yScale;\n\t\t\tpoint._datasetIndex = datasetIndex;\n\t\t\tpoint._index = index;\n\n\t\t\t// Desired view properties\n\t\t\tpoint._model = {\n\t\t\t\tx: x,\n\t\t\t\ty: y,\n\t\t\t\tskip: custom.skip || isNaN(x) || isNaN(y),\n\t\t\t\t// Appearance\n\t\t\t\tradius: custom.radius || helpers.valueAtIndexOrDefault(dataset.pointRadius, index, pointOptions.radius),\n\t\t\t\tpointStyle: custom.pointStyle || helpers.valueAtIndexOrDefault(dataset.pointStyle, index, pointOptions.pointStyle),\n\t\t\t\tbackgroundColor: me.getPointBackgroundColor(point, index),\n\t\t\t\tborderColor: me.getPointBorderColor(point, index),\n\t\t\t\tborderWidth: me.getPointBorderWidth(point, index),\n\t\t\t\ttension: meta.dataset._model ? meta.dataset._model.tension : 0,\n\t\t\t\tsteppedLine: meta.dataset._model ? meta.dataset._model.steppedLine : false,\n\t\t\t\t// Tooltip\n\t\t\t\thitRadius: custom.hitRadius || helpers.valueAtIndexOrDefault(dataset.pointHitRadius, index, pointOptions.hitRadius)\n\t\t\t};\n\t\t},\n\n\t\tcalculatePointY: function(value, index, datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar yScale = me.getScaleForId(meta.yAxisID);\n\t\t\tvar sumPos = 0;\n\t\t\tvar sumNeg = 0;\n\t\t\tvar i, ds, dsMeta;\n\n\t\t\tif (yScale.options.stacked) {\n\t\t\t\tfor (i = 0; i < datasetIndex; i++) {\n\t\t\t\t\tds = chart.data.datasets[i];\n\t\t\t\t\tdsMeta = chart.getDatasetMeta(i);\n\t\t\t\t\tif (dsMeta.type === 'line' && dsMeta.yAxisID === yScale.id && chart.isDatasetVisible(i)) {\n\t\t\t\t\t\tvar stackedRightValue = Number(yScale.getRightValue(ds.data[index]));\n\t\t\t\t\t\tif (stackedRightValue < 0) {\n\t\t\t\t\t\t\tsumNeg += stackedRightValue || 0;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tsumPos += stackedRightValue || 0;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar rightValue = Number(yScale.getRightValue(value));\n\t\t\t\tif (rightValue < 0) {\n\t\t\t\t\treturn yScale.getPixelForValue(sumNeg + rightValue);\n\t\t\t\t}\n\t\t\t\treturn yScale.getPixelForValue(sumPos + rightValue);\n\t\t\t}\n\n\t\t\treturn yScale.getPixelForValue(value);\n\t\t},\n\n\t\tupdateBezierControlPoints: function() {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar area = me.chart.chartArea;\n\t\t\tvar points = (meta.data || []);\n\t\t\tvar i, ilen, point, model, controlPoints;\n\n\t\t\t// Only consider points that are drawn in case the spanGaps option is used\n\t\t\tif (meta.dataset._model.spanGaps) {\n\t\t\t\tpoints = points.filter(function(pt) {\n\t\t\t\t\treturn !pt._model.skip;\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tfunction capControlPoint(pt, min, max) {\n\t\t\t\treturn Math.max(Math.min(pt, max), min);\n\t\t\t}\n\n\t\t\tif (meta.dataset._model.cubicInterpolationMode === 'monotone') {\n\t\t\t\thelpers.splineCurveMonotone(points);\n\t\t\t} else {\n\t\t\t\tfor (i = 0, ilen = points.length; i < ilen; ++i) {\n\t\t\t\t\tpoint = points[i];\n\t\t\t\t\tmodel = point._model;\n\t\t\t\t\tcontrolPoints = helpers.splineCurve(\n\t\t\t\t\t\thelpers.previousItem(points, i)._model,\n\t\t\t\t\t\tmodel,\n\t\t\t\t\t\thelpers.nextItem(points, i)._model,\n\t\t\t\t\t\tmeta.dataset._model.tension\n\t\t\t\t\t);\n\t\t\t\t\tmodel.controlPointPreviousX = controlPoints.previous.x;\n\t\t\t\t\tmodel.controlPointPreviousY = controlPoints.previous.y;\n\t\t\t\t\tmodel.controlPointNextX = controlPoints.next.x;\n\t\t\t\t\tmodel.controlPointNextY = controlPoints.next.y;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (me.chart.options.elements.line.capBezierPoints) {\n\t\t\t\tfor (i = 0, ilen = points.length; i < ilen; ++i) {\n\t\t\t\t\tmodel = points[i]._model;\n\t\t\t\t\tmodel.controlPointPreviousX = capControlPoint(model.controlPointPreviousX, area.left, area.right);\n\t\t\t\t\tmodel.controlPointPreviousY = capControlPoint(model.controlPointPreviousY, area.top, area.bottom);\n\t\t\t\t\tmodel.controlPointNextX = capControlPoint(model.controlPointNextX, area.left, area.right);\n\t\t\t\t\tmodel.controlPointNextY = capControlPoint(model.controlPointNextY, area.top, area.bottom);\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\tdraw: function() {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar points = meta.data || [];\n\t\t\tvar area = chart.chartArea;\n\t\t\tvar ilen = points.length;\n\t\t\tvar i = 0;\n\n\t\t\thelpers.canvas.clipArea(chart.ctx, area);\n\n\t\t\tif (lineEnabled(me.getDataset(), chart.options)) {\n\t\t\t\tmeta.dataset.draw();\n\t\t\t}\n\n\t\t\thelpers.canvas.unclipArea(chart.ctx);\n\n\t\t\t// Draw the points\n\t\t\tfor (; i < ilen; ++i) {\n\t\t\t\tpoints[i].draw(area);\n\t\t\t}\n\t\t},\n\n\t\tsetHoverStyle: function(point) {\n\t\t\t// Point\n\t\t\tvar dataset = this.chart.data.datasets[point._datasetIndex];\n\t\t\tvar index = point._index;\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar model = point._model;\n\n\t\t\tmodel.radius = custom.hoverRadius || helpers.valueAtIndexOrDefault(dataset.pointHoverRadius, index, this.chart.options.elements.point.hoverRadius);\n\t\t\tmodel.backgroundColor = custom.hoverBackgroundColor || helpers.valueAtIndexOrDefault(dataset.pointHoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));\n\t\t\tmodel.borderColor = custom.hoverBorderColor || helpers.valueAtIndexOrDefault(dataset.pointHoverBorderColor, index, helpers.getHoverColor(model.borderColor));\n\t\t\tmodel.borderWidth = custom.hoverBorderWidth || helpers.valueAtIndexOrDefault(dataset.pointHoverBorderWidth, index, model.borderWidth);\n\t\t},\n\n\t\tremoveHoverStyle: function(point) {\n\t\t\tvar me = this;\n\t\t\tvar dataset = me.chart.data.datasets[point._datasetIndex];\n\t\t\tvar index = point._index;\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar model = point._model;\n\n\t\t\t// Compatibility: If the properties are defined with only the old name, use those values\n\t\t\tif ((dataset.radius !== undefined) && (dataset.pointRadius === undefined)) {\n\t\t\t\tdataset.pointRadius = dataset.radius;\n\t\t\t}\n\n\t\t\tmodel.radius = custom.radius || helpers.valueAtIndexOrDefault(dataset.pointRadius, index, me.chart.options.elements.point.radius);\n\t\t\tmodel.backgroundColor = me.getPointBackgroundColor(point, index);\n\t\t\tmodel.borderColor = me.getPointBorderColor(point, index);\n\t\t\tmodel.borderWidth = me.getPointBorderWidth(point, index);\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],19:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('polarArea', {\n\tscale: {\n\t\ttype: 'radialLinear',\n\t\tangleLines: {\n\t\t\tdisplay: false\n\t\t},\n\t\tgridLines: {\n\t\t\tcircular: true\n\t\t},\n\t\tpointLabels: {\n\t\t\tdisplay: false\n\t\t},\n\t\tticks: {\n\t\t\tbeginAtZero: true\n\t\t}\n\t},\n\n\t// Boolean - Whether to animate the rotation of the chart\n\tanimation: {\n\t\tanimateRotate: true,\n\t\tanimateScale: true\n\t},\n\n\tstartAngle: -0.5 * Math.PI,\n\tlegendCallback: function(chart) {\n\t\tvar text = [];\n\t\ttext.push('<ul class=\"' + chart.id + '-legend\">');\n\n\t\tvar data = chart.data;\n\t\tvar datasets = data.datasets;\n\t\tvar labels = data.labels;\n\n\t\tif (datasets.length) {\n\t\t\tfor (var i = 0; i < datasets[0].data.length; ++i) {\n\t\t\t\ttext.push('<li><span style=\"background-color:' + datasets[0].backgroundColor[i] + '\"></span>');\n\t\t\t\tif (labels[i]) {\n\t\t\t\t\ttext.push(labels[i]);\n\t\t\t\t}\n\t\t\t\ttext.push('</li>');\n\t\t\t}\n\t\t}\n\n\t\ttext.push('</ul>');\n\t\treturn text.join('');\n\t},\n\tlegend: {\n\t\tlabels: {\n\t\t\tgenerateLabels: function(chart) {\n\t\t\t\tvar data = chart.data;\n\t\t\t\tif (data.labels.length && data.datasets.length) {\n\t\t\t\t\treturn data.labels.map(function(label, i) {\n\t\t\t\t\t\tvar meta = chart.getDatasetMeta(0);\n\t\t\t\t\t\tvar ds = data.datasets[0];\n\t\t\t\t\t\tvar arc = meta.data[i];\n\t\t\t\t\t\tvar custom = arc.custom || {};\n\t\t\t\t\t\tvar valueAtIndexOrDefault = helpers.valueAtIndexOrDefault;\n\t\t\t\t\t\tvar arcOpts = chart.options.elements.arc;\n\t\t\t\t\t\tvar fill = custom.backgroundColor ? custom.backgroundColor : valueAtIndexOrDefault(ds.backgroundColor, i, arcOpts.backgroundColor);\n\t\t\t\t\t\tvar stroke = custom.borderColor ? custom.borderColor : valueAtIndexOrDefault(ds.borderColor, i, arcOpts.borderColor);\n\t\t\t\t\t\tvar bw = custom.borderWidth ? custom.borderWidth : valueAtIndexOrDefault(ds.borderWidth, i, arcOpts.borderWidth);\n\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\ttext: label,\n\t\t\t\t\t\t\tfillStyle: fill,\n\t\t\t\t\t\t\tstrokeStyle: stroke,\n\t\t\t\t\t\t\tlineWidth: bw,\n\t\t\t\t\t\t\thidden: isNaN(ds.data[i]) || meta.data[i].hidden,\n\n\t\t\t\t\t\t\t// Extra data used for toggling the correct item\n\t\t\t\t\t\t\tindex: i\n\t\t\t\t\t\t};\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\treturn [];\n\t\t\t}\n\t\t},\n\n\t\tonClick: function(e, legendItem) {\n\t\t\tvar index = legendItem.index;\n\t\t\tvar chart = this.chart;\n\t\t\tvar i, ilen, meta;\n\n\t\t\tfor (i = 0, ilen = (chart.data.datasets || []).length; i < ilen; ++i) {\n\t\t\t\tmeta = chart.getDatasetMeta(i);\n\t\t\t\tmeta.data[index].hidden = !meta.data[index].hidden;\n\t\t\t}\n\n\t\t\tchart.update();\n\t\t}\n\t},\n\n\t// Need to override these to give a nice default\n\ttooltips: {\n\t\tcallbacks: {\n\t\t\ttitle: function() {\n\t\t\t\treturn '';\n\t\t\t},\n\t\t\tlabel: function(item, data) {\n\t\t\t\treturn data.labels[item.index] + ': ' + item.yLabel;\n\t\t\t}\n\t\t}\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\tChart.controllers.polarArea = Chart.DatasetController.extend({\n\n\t\tdataElementType: elements.Arc,\n\n\t\tlinkScales: helpers.noop,\n\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar chartArea = chart.chartArea;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar opts = chart.options;\n\t\t\tvar arcOpts = opts.elements.arc;\n\t\t\tvar minSize = Math.min(chartArea.right - chartArea.left, chartArea.bottom - chartArea.top);\n\t\t\tchart.outerRadius = Math.max((minSize - arcOpts.borderWidth / 2) / 2, 0);\n\t\t\tchart.innerRadius = Math.max(opts.cutoutPercentage ? (chart.outerRadius / 100) * (opts.cutoutPercentage) : 1, 0);\n\t\t\tchart.radiusLength = (chart.outerRadius - chart.innerRadius) / chart.getVisibleDatasetCount();\n\n\t\t\tme.outerRadius = chart.outerRadius - (chart.radiusLength * me.index);\n\t\t\tme.innerRadius = me.outerRadius - chart.radiusLength;\n\n\t\t\tmeta.count = me.countVisibleElements();\n\n\t\t\thelpers.each(meta.data, function(arc, index) {\n\t\t\t\tme.updateElement(arc, index, reset);\n\t\t\t});\n\t\t},\n\n\t\tupdateElement: function(arc, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar opts = chart.options;\n\t\t\tvar animationOpts = opts.animation;\n\t\t\tvar scale = chart.scale;\n\t\t\tvar labels = chart.data.labels;\n\n\t\t\tvar circumference = me.calculateCircumference(dataset.data[index]);\n\t\t\tvar centerX = scale.xCenter;\n\t\t\tvar centerY = scale.yCenter;\n\n\t\t\t// If there is NaN data before us, we need to calculate the starting angle correctly.\n\t\t\t// We could be way more efficient here, but its unlikely that the polar area chart will have a lot of data\n\t\t\tvar visibleCount = 0;\n\t\t\tvar meta = me.getMeta();\n\t\t\tfor (var i = 0; i < index; ++i) {\n\t\t\t\tif (!isNaN(dataset.data[i]) && !meta.data[i].hidden) {\n\t\t\t\t\t++visibleCount;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// var negHalfPI = -0.5 * Math.PI;\n\t\t\tvar datasetStartAngle = opts.startAngle;\n\t\t\tvar distance = arc.hidden ? 0 : scale.getDistanceFromCenterForValue(dataset.data[index]);\n\t\t\tvar startAngle = datasetStartAngle + (circumference * visibleCount);\n\t\t\tvar endAngle = startAngle + (arc.hidden ? 0 : circumference);\n\n\t\t\tvar resetRadius = animationOpts.animateScale ? 0 : scale.getDistanceFromCenterForValue(dataset.data[index]);\n\n\t\t\thelpers.extend(arc, {\n\t\t\t\t// Utility\n\t\t\t\t_datasetIndex: me.index,\n\t\t\t\t_index: index,\n\t\t\t\t_scale: scale,\n\n\t\t\t\t// Desired view properties\n\t\t\t\t_model: {\n\t\t\t\t\tx: centerX,\n\t\t\t\t\ty: centerY,\n\t\t\t\t\tinnerRadius: 0,\n\t\t\t\t\touterRadius: reset ? resetRadius : distance,\n\t\t\t\t\tstartAngle: reset && animationOpts.animateRotate ? datasetStartAngle : startAngle,\n\t\t\t\t\tendAngle: reset && animationOpts.animateRotate ? datasetStartAngle : endAngle,\n\t\t\t\t\tlabel: helpers.valueAtIndexOrDefault(labels, index, labels[index])\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// Apply border and fill style\n\t\t\tme.removeHoverStyle(arc);\n\n\t\t\tarc.pivot();\n\t\t},\n\n\t\tremoveHoverStyle: function(arc) {\n\t\t\tChart.DatasetController.prototype.removeHoverStyle.call(this, arc, this.chart.options.elements.arc);\n\t\t},\n\n\t\tcountVisibleElements: function() {\n\t\t\tvar dataset = this.getDataset();\n\t\t\tvar meta = this.getMeta();\n\t\t\tvar count = 0;\n\n\t\t\thelpers.each(meta.data, function(element, index) {\n\t\t\t\tif (!isNaN(dataset.data[index]) && !element.hidden) {\n\t\t\t\t\tcount++;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\treturn count;\n\t\t},\n\n\t\tcalculateCircumference: function(value) {\n\t\t\tvar count = this.getMeta().count;\n\t\t\tif (count > 0 && !isNaN(value)) {\n\t\t\t\treturn (2 * Math.PI) / count;\n\t\t\t}\n\t\t\treturn 0;\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],20:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('radar', {\n\tscale: {\n\t\ttype: 'radialLinear'\n\t},\n\telements: {\n\t\tline: {\n\t\t\ttension: 0 // no bezier in radar\n\t\t}\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\tChart.controllers.radar = Chart.DatasetController.extend({\n\n\t\tdatasetElementType: elements.Line,\n\n\t\tdataElementType: elements.Point,\n\n\t\tlinkScales: helpers.noop,\n\n\t\tupdate: function(reset) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar line = meta.dataset;\n\t\t\tvar points = meta.data;\n\t\t\tvar custom = line.custom || {};\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar lineElementOptions = me.chart.options.elements.line;\n\t\t\tvar scale = me.chart.scale;\n\n\t\t\t// Compatibility: If the properties are defined with only the old name, use those values\n\t\t\tif ((dataset.tension !== undefined) && (dataset.lineTension === undefined)) {\n\t\t\t\tdataset.lineTension = dataset.tension;\n\t\t\t}\n\n\t\t\thelpers.extend(meta.dataset, {\n\t\t\t\t// Utility\n\t\t\t\t_datasetIndex: me.index,\n\t\t\t\t_scale: scale,\n\t\t\t\t// Data\n\t\t\t\t_children: points,\n\t\t\t\t_loop: true,\n\t\t\t\t// Model\n\t\t\t\t_model: {\n\t\t\t\t\t// Appearance\n\t\t\t\t\ttension: custom.tension ? custom.tension : helpers.valueOrDefault(dataset.lineTension, lineElementOptions.tension),\n\t\t\t\t\tbackgroundColor: custom.backgroundColor ? custom.backgroundColor : (dataset.backgroundColor || lineElementOptions.backgroundColor),\n\t\t\t\t\tborderWidth: custom.borderWidth ? custom.borderWidth : (dataset.borderWidth || lineElementOptions.borderWidth),\n\t\t\t\t\tborderColor: custom.borderColor ? custom.borderColor : (dataset.borderColor || lineElementOptions.borderColor),\n\t\t\t\t\tfill: custom.fill ? custom.fill : (dataset.fill !== undefined ? dataset.fill : lineElementOptions.fill),\n\t\t\t\t\tborderCapStyle: custom.borderCapStyle ? custom.borderCapStyle : (dataset.borderCapStyle || lineElementOptions.borderCapStyle),\n\t\t\t\t\tborderDash: custom.borderDash ? custom.borderDash : (dataset.borderDash || lineElementOptions.borderDash),\n\t\t\t\t\tborderDashOffset: custom.borderDashOffset ? custom.borderDashOffset : (dataset.borderDashOffset || lineElementOptions.borderDashOffset),\n\t\t\t\t\tborderJoinStyle: custom.borderJoinStyle ? custom.borderJoinStyle : (dataset.borderJoinStyle || lineElementOptions.borderJoinStyle),\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tmeta.dataset.pivot();\n\n\t\t\t// Update Points\n\t\t\thelpers.each(points, function(point, index) {\n\t\t\t\tme.updateElement(point, index, reset);\n\t\t\t}, me);\n\n\t\t\t// Update bezier control points\n\t\t\tme.updateBezierControlPoints();\n\t\t},\n\t\tupdateElement: function(point, index, reset) {\n\t\t\tvar me = this;\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar scale = me.chart.scale;\n\t\t\tvar pointElementOptions = me.chart.options.elements.point;\n\t\t\tvar pointPosition = scale.getPointPositionForValue(index, dataset.data[index]);\n\n\t\t\t// Compatibility: If the properties are defined with only the old name, use those values\n\t\t\tif ((dataset.radius !== undefined) && (dataset.pointRadius === undefined)) {\n\t\t\t\tdataset.pointRadius = dataset.radius;\n\t\t\t}\n\t\t\tif ((dataset.hitRadius !== undefined) && (dataset.pointHitRadius === undefined)) {\n\t\t\t\tdataset.pointHitRadius = dataset.hitRadius;\n\t\t\t}\n\n\t\t\thelpers.extend(point, {\n\t\t\t\t// Utility\n\t\t\t\t_datasetIndex: me.index,\n\t\t\t\t_index: index,\n\t\t\t\t_scale: scale,\n\n\t\t\t\t// Desired view properties\n\t\t\t\t_model: {\n\t\t\t\t\tx: reset ? scale.xCenter : pointPosition.x, // value not used in dataset scale, but we want a consistent API between scales\n\t\t\t\t\ty: reset ? scale.yCenter : pointPosition.y,\n\n\t\t\t\t\t// Appearance\n\t\t\t\t\ttension: custom.tension ? custom.tension : helpers.valueOrDefault(dataset.lineTension, me.chart.options.elements.line.tension),\n\t\t\t\t\tradius: custom.radius ? custom.radius : helpers.valueAtIndexOrDefault(dataset.pointRadius, index, pointElementOptions.radius),\n\t\t\t\t\tbackgroundColor: custom.backgroundColor ? custom.backgroundColor : helpers.valueAtIndexOrDefault(dataset.pointBackgroundColor, index, pointElementOptions.backgroundColor),\n\t\t\t\t\tborderColor: custom.borderColor ? custom.borderColor : helpers.valueAtIndexOrDefault(dataset.pointBorderColor, index, pointElementOptions.borderColor),\n\t\t\t\t\tborderWidth: custom.borderWidth ? custom.borderWidth : helpers.valueAtIndexOrDefault(dataset.pointBorderWidth, index, pointElementOptions.borderWidth),\n\t\t\t\t\tpointStyle: custom.pointStyle ? custom.pointStyle : helpers.valueAtIndexOrDefault(dataset.pointStyle, index, pointElementOptions.pointStyle),\n\n\t\t\t\t\t// Tooltip\n\t\t\t\t\thitRadius: custom.hitRadius ? custom.hitRadius : helpers.valueAtIndexOrDefault(dataset.pointHitRadius, index, pointElementOptions.hitRadius)\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tpoint._model.skip = custom.skip ? custom.skip : (isNaN(point._model.x) || isNaN(point._model.y));\n\t\t},\n\t\tupdateBezierControlPoints: function() {\n\t\t\tvar chartArea = this.chart.chartArea;\n\t\t\tvar meta = this.getMeta();\n\n\t\t\thelpers.each(meta.data, function(point, index) {\n\t\t\t\tvar model = point._model;\n\t\t\t\tvar controlPoints = helpers.splineCurve(\n\t\t\t\t\thelpers.previousItem(meta.data, index, true)._model,\n\t\t\t\t\tmodel,\n\t\t\t\t\thelpers.nextItem(meta.data, index, true)._model,\n\t\t\t\t\tmodel.tension\n\t\t\t\t);\n\n\t\t\t\t// Prevent the bezier going outside of the bounds of the graph\n\t\t\t\tmodel.controlPointPreviousX = Math.max(Math.min(controlPoints.previous.x, chartArea.right), chartArea.left);\n\t\t\t\tmodel.controlPointPreviousY = Math.max(Math.min(controlPoints.previous.y, chartArea.bottom), chartArea.top);\n\n\t\t\t\tmodel.controlPointNextX = Math.max(Math.min(controlPoints.next.x, chartArea.right), chartArea.left);\n\t\t\t\tmodel.controlPointNextY = Math.max(Math.min(controlPoints.next.y, chartArea.bottom), chartArea.top);\n\n\t\t\t\t// Now pivot the point for animation\n\t\t\t\tpoint.pivot();\n\t\t\t});\n\t\t},\n\n\t\tsetHoverStyle: function(point) {\n\t\t\t// Point\n\t\t\tvar dataset = this.chart.data.datasets[point._datasetIndex];\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar index = point._index;\n\t\t\tvar model = point._model;\n\n\t\t\tmodel.radius = custom.hoverRadius ? custom.hoverRadius : helpers.valueAtIndexOrDefault(dataset.pointHoverRadius, index, this.chart.options.elements.point.hoverRadius);\n\t\t\tmodel.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : helpers.valueAtIndexOrDefault(dataset.pointHoverBackgroundColor, index, helpers.getHoverColor(model.backgroundColor));\n\t\t\tmodel.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : helpers.valueAtIndexOrDefault(dataset.pointHoverBorderColor, index, helpers.getHoverColor(model.borderColor));\n\t\t\tmodel.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : helpers.valueAtIndexOrDefault(dataset.pointHoverBorderWidth, index, model.borderWidth);\n\t\t},\n\n\t\tremoveHoverStyle: function(point) {\n\t\t\tvar dataset = this.chart.data.datasets[point._datasetIndex];\n\t\t\tvar custom = point.custom || {};\n\t\t\tvar index = point._index;\n\t\t\tvar model = point._model;\n\t\t\tvar pointElementOptions = this.chart.options.elements.point;\n\n\t\t\tmodel.radius = custom.radius ? custom.radius : helpers.valueAtIndexOrDefault(dataset.pointRadius, index, pointElementOptions.radius);\n\t\t\tmodel.backgroundColor = custom.backgroundColor ? custom.backgroundColor : helpers.valueAtIndexOrDefault(dataset.pointBackgroundColor, index, pointElementOptions.backgroundColor);\n\t\t\tmodel.borderColor = custom.borderColor ? custom.borderColor : helpers.valueAtIndexOrDefault(dataset.pointBorderColor, index, pointElementOptions.borderColor);\n\t\t\tmodel.borderWidth = custom.borderWidth ? custom.borderWidth : helpers.valueAtIndexOrDefault(dataset.pointBorderWidth, index, pointElementOptions.borderWidth);\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],21:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\n\ndefaults._set('scatter', {\n\thover: {\n\t\tmode: 'single'\n\t},\n\n\tscales: {\n\t\txAxes: [{\n\t\t\tid: 'x-axis-1',    // need an ID so datasets can reference the scale\n\t\t\ttype: 'linear',    // scatter should not use a category axis\n\t\t\tposition: 'bottom'\n\t\t}],\n\t\tyAxes: [{\n\t\t\tid: 'y-axis-1',\n\t\t\ttype: 'linear',\n\t\t\tposition: 'left'\n\t\t}]\n\t},\n\n\tshowLines: false,\n\n\ttooltips: {\n\t\tcallbacks: {\n\t\t\ttitle: function() {\n\t\t\t\treturn '';     // doesn't make sense for scatter since data are formatted as a point\n\t\t\t},\n\t\t\tlabel: function(item) {\n\t\t\t\treturn '(' + item.xLabel + ', ' + item.yLabel + ')';\n\t\t\t}\n\t\t}\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\t// Scatter charts use line controllers\n\tChart.controllers.scatter = Chart.controllers.line;\n\n};\n\n},{\"25\":25}],22:[function(require,module,exports){\n/* global window: false */\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\n\ndefaults._set('global', {\n\tanimation: {\n\t\tduration: 1000,\n\t\teasing: 'easeOutQuart',\n\t\tonProgress: helpers.noop,\n\t\tonComplete: helpers.noop\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\tChart.Animation = Element.extend({\n\t\tchart: null, // the animation associated chart instance\n\t\tcurrentStep: 0, // the current animation step\n\t\tnumSteps: 60, // default number of steps\n\t\teasing: '', // the easing to use for this animation\n\t\trender: null, // render function used by the animation service\n\n\t\tonAnimationProgress: null, // user specified callback to fire on each step of the animation\n\t\tonAnimationComplete: null, // user specified callback to fire when the animation finishes\n\t});\n\n\tChart.animationService = {\n\t\tframeDuration: 17,\n\t\tanimations: [],\n\t\tdropFrames: 0,\n\t\trequest: null,\n\n\t\t/**\n\t\t * @param {Chart} chart - The chart to animate.\n\t\t * @param {Chart.Animation} animation - The animation that we will animate.\n\t\t * @param {Number} duration - The animation duration in ms.\n\t\t * @param {Boolean} lazy - if true, the chart is not marked as animating to enable more responsive interactions\n\t\t */\n\t\taddAnimation: function(chart, animation, duration, lazy) {\n\t\t\tvar animations = this.animations;\n\t\t\tvar i, ilen;\n\n\t\t\tanimation.chart = chart;\n\n\t\t\tif (!lazy) {\n\t\t\t\tchart.animating = true;\n\t\t\t}\n\n\t\t\tfor (i = 0, ilen = animations.length; i < ilen; ++i) {\n\t\t\t\tif (animations[i].chart === chart) {\n\t\t\t\t\tanimations[i] = animation;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tanimations.push(animation);\n\n\t\t\t// If there are no animations queued, manually kickstart a digest, for lack of a better word\n\t\t\tif (animations.length === 1) {\n\t\t\t\tthis.requestAnimationFrame();\n\t\t\t}\n\t\t},\n\n\t\tcancelAnimation: function(chart) {\n\t\t\tvar index = helpers.findIndex(this.animations, function(animation) {\n\t\t\t\treturn animation.chart === chart;\n\t\t\t});\n\n\t\t\tif (index !== -1) {\n\t\t\t\tthis.animations.splice(index, 1);\n\t\t\t\tchart.animating = false;\n\t\t\t}\n\t\t},\n\n\t\trequestAnimationFrame: function() {\n\t\t\tvar me = this;\n\t\t\tif (me.request === null) {\n\t\t\t\t// Skip animation frame requests until the active one is executed.\n\t\t\t\t// This can happen when processing mouse events, e.g. 'mousemove'\n\t\t\t\t// and 'mouseout' events will trigger multiple renders.\n\t\t\t\tme.request = helpers.requestAnimFrame.call(window, function() {\n\t\t\t\t\tme.request = null;\n\t\t\t\t\tme.startDigest();\n\t\t\t\t});\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tstartDigest: function() {\n\t\t\tvar me = this;\n\t\t\tvar startTime = Date.now();\n\t\t\tvar framesToDrop = 0;\n\n\t\t\tif (me.dropFrames > 1) {\n\t\t\t\tframesToDrop = Math.floor(me.dropFrames);\n\t\t\t\tme.dropFrames = me.dropFrames % 1;\n\t\t\t}\n\n\t\t\tme.advance(1 + framesToDrop);\n\n\t\t\tvar endTime = Date.now();\n\n\t\t\tme.dropFrames += (endTime - startTime) / me.frameDuration;\n\n\t\t\t// Do we have more stuff to animate?\n\t\t\tif (me.animations.length > 0) {\n\t\t\t\tme.requestAnimationFrame();\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tadvance: function(count) {\n\t\t\tvar animations = this.animations;\n\t\t\tvar animation, chart;\n\t\t\tvar i = 0;\n\n\t\t\twhile (i < animations.length) {\n\t\t\t\tanimation = animations[i];\n\t\t\t\tchart = animation.chart;\n\n\t\t\t\tanimation.currentStep = (animation.currentStep || 0) + count;\n\t\t\t\tanimation.currentStep = Math.min(animation.currentStep, animation.numSteps);\n\n\t\t\t\thelpers.callback(animation.render, [chart, animation], chart);\n\t\t\t\thelpers.callback(animation.onAnimationProgress, [animation], chart);\n\n\t\t\t\tif (animation.currentStep >= animation.numSteps) {\n\t\t\t\t\thelpers.callback(animation.onAnimationComplete, [animation], chart);\n\t\t\t\t\tchart.animating = false;\n\t\t\t\t\tanimations.splice(i, 1);\n\t\t\t\t} else {\n\t\t\t\t\t++i;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t/**\n\t * Provided for backward compatibility, use Chart.Animation instead\n\t * @prop Chart.Animation#animationObject\n\t * @deprecated since version 2.6.0\n\t * @todo remove at version 3\n\t */\n\tObject.defineProperty(Chart.Animation.prototype, 'animationObject', {\n\t\tget: function() {\n\t\t\treturn this;\n\t\t}\n\t});\n\n\t/**\n\t * Provided for backward compatibility, use Chart.Animation#chart instead\n\t * @prop Chart.Animation#chartInstance\n\t * @deprecated since version 2.6.0\n\t * @todo remove at version 3\n\t */\n\tObject.defineProperty(Chart.Animation.prototype, 'chartInstance', {\n\t\tget: function() {\n\t\t\treturn this.chart;\n\t\t},\n\t\tset: function(value) {\n\t\t\tthis.chart = value;\n\t\t}\n\t});\n\n};\n\n},{\"25\":25,\"26\":26,\"45\":45}],23:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar helpers = require(45);\nvar Interaction = require(28);\nvar layouts = require(30);\nvar platform = require(48);\nvar plugins = require(31);\n\nmodule.exports = function(Chart) {\n\n\t// Create a dictionary of chart types, to allow for extension of existing types\n\tChart.types = {};\n\n\t// Store a reference to each instance - allowing us to globally resize chart instances on window resize.\n\t// Destroy method on the chart will remove the instance of the chart from this reference.\n\tChart.instances = {};\n\n\t// Controllers available for dataset visualization eg. bar, line, slice, etc.\n\tChart.controllers = {};\n\n\t/**\n\t * Initializes the given config with global and chart default values.\n\t */\n\tfunction initConfig(config) {\n\t\tconfig = config || {};\n\n\t\t// Do NOT use configMerge() for the data object because this method merges arrays\n\t\t// and so would change references to labels and datasets, preventing data updates.\n\t\tvar data = config.data = config.data || {};\n\t\tdata.datasets = data.datasets || [];\n\t\tdata.labels = data.labels || [];\n\n\t\tconfig.options = helpers.configMerge(\n\t\t\tdefaults.global,\n\t\t\tdefaults[config.type],\n\t\t\tconfig.options || {});\n\n\t\treturn config;\n\t}\n\n\t/**\n\t * Updates the config of the chart\n\t * @param chart {Chart} chart to update the options for\n\t */\n\tfunction updateConfig(chart) {\n\t\tvar newOptions = chart.options;\n\n\t\thelpers.each(chart.scales, function(scale) {\n\t\t\tlayouts.removeBox(chart, scale);\n\t\t});\n\n\t\tnewOptions = helpers.configMerge(\n\t\t\tChart.defaults.global,\n\t\t\tChart.defaults[chart.config.type],\n\t\t\tnewOptions);\n\n\t\tchart.options = chart.config.options = newOptions;\n\t\tchart.ensureScalesHaveIDs();\n\t\tchart.buildOrUpdateScales();\n\t\t// Tooltip\n\t\tchart.tooltip._options = newOptions.tooltips;\n\t\tchart.tooltip.initialize();\n\t}\n\n\tfunction positionIsHorizontal(position) {\n\t\treturn position === 'top' || position === 'bottom';\n\t}\n\n\thelpers.extend(Chart.prototype, /** @lends Chart */ {\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tconstruct: function(item, config) {\n\t\t\tvar me = this;\n\n\t\t\tconfig = initConfig(config);\n\n\t\t\tvar context = platform.acquireContext(item, config);\n\t\t\tvar canvas = context && context.canvas;\n\t\t\tvar height = canvas && canvas.height;\n\t\t\tvar width = canvas && canvas.width;\n\n\t\t\tme.id = helpers.uid();\n\t\t\tme.ctx = context;\n\t\t\tme.canvas = canvas;\n\t\t\tme.config = config;\n\t\t\tme.width = width;\n\t\t\tme.height = height;\n\t\t\tme.aspectRatio = height ? width / height : null;\n\t\t\tme.options = config.options;\n\t\t\tme._bufferedRender = false;\n\n\t\t\t/**\n\t\t\t * Provided for backward compatibility, Chart and Chart.Controller have been merged,\n\t\t\t * the \"instance\" still need to be defined since it might be called from plugins.\n\t\t\t * @prop Chart#chart\n\t\t\t * @deprecated since version 2.6.0\n\t\t\t * @todo remove at version 3\n\t\t\t * @private\n\t\t\t */\n\t\t\tme.chart = me;\n\t\t\tme.controller = me; // chart.chart.controller #inception\n\n\t\t\t// Add the chart instance to the global namespace\n\t\t\tChart.instances[me.id] = me;\n\n\t\t\t// Define alias to the config data: `chart.data === chart.config.data`\n\t\t\tObject.defineProperty(me, 'data', {\n\t\t\t\tget: function() {\n\t\t\t\t\treturn me.config.data;\n\t\t\t\t},\n\t\t\t\tset: function(value) {\n\t\t\t\t\tme.config.data = value;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tif (!context || !canvas) {\n\t\t\t\t// The given item is not a compatible context2d element, let's return before finalizing\n\t\t\t\t// the chart initialization but after setting basic chart / controller properties that\n\t\t\t\t// can help to figure out that the chart is not valid (e.g chart.canvas !== null);\n\t\t\t\t// https://github.com/chartjs/Chart.js/issues/2807\n\t\t\t\tconsole.error(\"Failed to create chart: can't acquire context from the given item\");\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tme.initialize();\n\t\t\tme.update();\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tinitialize: function() {\n\t\t\tvar me = this;\n\n\t\t\t// Before init plugin notification\n\t\t\tplugins.notify(me, 'beforeInit');\n\n\t\t\thelpers.retinaScale(me, me.options.devicePixelRatio);\n\n\t\t\tme.bindEvents();\n\n\t\t\tif (me.options.responsive) {\n\t\t\t\t// Initial resize before chart draws (must be silent to preserve initial animations).\n\t\t\t\tme.resize(true);\n\t\t\t}\n\n\t\t\t// Make sure scales have IDs and are built before we build any controllers.\n\t\t\tme.ensureScalesHaveIDs();\n\t\t\tme.buildOrUpdateScales();\n\t\t\tme.initToolTip();\n\n\t\t\t// After init plugin notification\n\t\t\tplugins.notify(me, 'afterInit');\n\n\t\t\treturn me;\n\t\t},\n\n\t\tclear: function() {\n\t\t\thelpers.canvas.clear(this);\n\t\t\treturn this;\n\t\t},\n\n\t\tstop: function() {\n\t\t\t// Stops any current animation loop occurring\n\t\t\tChart.animationService.cancelAnimation(this);\n\t\t\treturn this;\n\t\t},\n\n\t\tresize: function(silent) {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options;\n\t\t\tvar canvas = me.canvas;\n\t\t\tvar aspectRatio = (options.maintainAspectRatio && me.aspectRatio) || null;\n\n\t\t\t// the canvas render width and height will be casted to integers so make sure that\n\t\t\t// the canvas display style uses the same integer values to avoid blurring effect.\n\n\t\t\t// Set to 0 instead of canvas.size because the size defaults to 300x150 if the element is collased\n\t\t\tvar newWidth = Math.max(0, Math.floor(helpers.getMaximumWidth(canvas)));\n\t\t\tvar newHeight = Math.max(0, Math.floor(aspectRatio ? newWidth / aspectRatio : helpers.getMaximumHeight(canvas)));\n\n\t\t\tif (me.width === newWidth && me.height === newHeight) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tcanvas.width = me.width = newWidth;\n\t\t\tcanvas.height = me.height = newHeight;\n\t\t\tcanvas.style.width = newWidth + 'px';\n\t\t\tcanvas.style.height = newHeight + 'px';\n\n\t\t\thelpers.retinaScale(me, options.devicePixelRatio);\n\n\t\t\tif (!silent) {\n\t\t\t\t// Notify any plugins about the resize\n\t\t\t\tvar newSize = {width: newWidth, height: newHeight};\n\t\t\t\tplugins.notify(me, 'resize', [newSize]);\n\n\t\t\t\t// Notify of resize\n\t\t\t\tif (me.options.onResize) {\n\t\t\t\t\tme.options.onResize(me, newSize);\n\t\t\t\t}\n\n\t\t\t\tme.stop();\n\t\t\t\tme.update(me.options.responsiveAnimationDuration);\n\t\t\t}\n\t\t},\n\n\t\tensureScalesHaveIDs: function() {\n\t\t\tvar options = this.options;\n\t\t\tvar scalesOptions = options.scales || {};\n\t\t\tvar scaleOptions = options.scale;\n\n\t\t\thelpers.each(scalesOptions.xAxes, function(xAxisOptions, index) {\n\t\t\t\txAxisOptions.id = xAxisOptions.id || ('x-axis-' + index);\n\t\t\t});\n\n\t\t\thelpers.each(scalesOptions.yAxes, function(yAxisOptions, index) {\n\t\t\t\tyAxisOptions.id = yAxisOptions.id || ('y-axis-' + index);\n\t\t\t});\n\n\t\t\tif (scaleOptions) {\n\t\t\t\tscaleOptions.id = scaleOptions.id || 'scale';\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * Builds a map of scale ID to scale object for future lookup.\n\t\t */\n\t\tbuildOrUpdateScales: function() {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options;\n\t\t\tvar scales = me.scales || {};\n\t\t\tvar items = [];\n\t\t\tvar updated = Object.keys(scales).reduce(function(obj, id) {\n\t\t\t\tobj[id] = false;\n\t\t\t\treturn obj;\n\t\t\t}, {});\n\n\t\t\tif (options.scales) {\n\t\t\t\titems = items.concat(\n\t\t\t\t\t(options.scales.xAxes || []).map(function(xAxisOptions) {\n\t\t\t\t\t\treturn {options: xAxisOptions, dtype: 'category', dposition: 'bottom'};\n\t\t\t\t\t}),\n\t\t\t\t\t(options.scales.yAxes || []).map(function(yAxisOptions) {\n\t\t\t\t\t\treturn {options: yAxisOptions, dtype: 'linear', dposition: 'left'};\n\t\t\t\t\t})\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (options.scale) {\n\t\t\t\titems.push({\n\t\t\t\t\toptions: options.scale,\n\t\t\t\t\tdtype: 'radialLinear',\n\t\t\t\t\tisDefault: true,\n\t\t\t\t\tdposition: 'chartArea'\n\t\t\t\t});\n\t\t\t}\n\n\t\t\thelpers.each(items, function(item) {\n\t\t\t\tvar scaleOptions = item.options;\n\t\t\t\tvar id = scaleOptions.id;\n\t\t\t\tvar scaleType = helpers.valueOrDefault(scaleOptions.type, item.dtype);\n\n\t\t\t\tif (positionIsHorizontal(scaleOptions.position) !== positionIsHorizontal(item.dposition)) {\n\t\t\t\t\tscaleOptions.position = item.dposition;\n\t\t\t\t}\n\n\t\t\t\tupdated[id] = true;\n\t\t\t\tvar scale = null;\n\t\t\t\tif (id in scales && scales[id].type === scaleType) {\n\t\t\t\t\tscale = scales[id];\n\t\t\t\t\tscale.options = scaleOptions;\n\t\t\t\t\tscale.ctx = me.ctx;\n\t\t\t\t\tscale.chart = me;\n\t\t\t\t} else {\n\t\t\t\t\tvar scaleClass = Chart.scaleService.getScaleConstructor(scaleType);\n\t\t\t\t\tif (!scaleClass) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tscale = new scaleClass({\n\t\t\t\t\t\tid: id,\n\t\t\t\t\t\ttype: scaleType,\n\t\t\t\t\t\toptions: scaleOptions,\n\t\t\t\t\t\tctx: me.ctx,\n\t\t\t\t\t\tchart: me\n\t\t\t\t\t});\n\t\t\t\t\tscales[scale.id] = scale;\n\t\t\t\t}\n\n\t\t\t\tscale.mergeTicksOptions();\n\n\t\t\t\t// TODO(SB): I think we should be able to remove this custom case (options.scale)\n\t\t\t\t// and consider it as a regular scale part of the \"scales\"\" map only! This would\n\t\t\t\t// make the logic easier and remove some useless? custom code.\n\t\t\t\tif (item.isDefault) {\n\t\t\t\t\tme.scale = scale;\n\t\t\t\t}\n\t\t\t});\n\t\t\t// clear up discarded scales\n\t\t\thelpers.each(updated, function(hasUpdated, id) {\n\t\t\t\tif (!hasUpdated) {\n\t\t\t\t\tdelete scales[id];\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tme.scales = scales;\n\n\t\t\tChart.scaleService.addScalesToLayout(this);\n\t\t},\n\n\t\tbuildOrUpdateControllers: function() {\n\t\t\tvar me = this;\n\t\t\tvar types = [];\n\t\t\tvar newControllers = [];\n\n\t\t\thelpers.each(me.data.datasets, function(dataset, datasetIndex) {\n\t\t\t\tvar meta = me.getDatasetMeta(datasetIndex);\n\t\t\t\tvar type = dataset.type || me.config.type;\n\n\t\t\t\tif (meta.type && meta.type !== type) {\n\t\t\t\t\tme.destroyDatasetMeta(datasetIndex);\n\t\t\t\t\tmeta = me.getDatasetMeta(datasetIndex);\n\t\t\t\t}\n\t\t\t\tmeta.type = type;\n\n\t\t\t\ttypes.push(meta.type);\n\n\t\t\t\tif (meta.controller) {\n\t\t\t\t\tmeta.controller.updateIndex(datasetIndex);\n\t\t\t\t\tmeta.controller.linkScales();\n\t\t\t\t} else {\n\t\t\t\t\tvar ControllerClass = Chart.controllers[meta.type];\n\t\t\t\t\tif (ControllerClass === undefined) {\n\t\t\t\t\t\tthrow new Error('\"' + meta.type + '\" is not a chart type.');\n\t\t\t\t\t}\n\n\t\t\t\t\tmeta.controller = new ControllerClass(me, datasetIndex);\n\t\t\t\t\tnewControllers.push(meta.controller);\n\t\t\t\t}\n\t\t\t}, me);\n\n\t\t\treturn newControllers;\n\t\t},\n\n\t\t/**\n\t\t * Reset the elements of all datasets\n\t\t * @private\n\t\t */\n\t\tresetElements: function() {\n\t\t\tvar me = this;\n\t\t\thelpers.each(me.data.datasets, function(dataset, datasetIndex) {\n\t\t\t\tme.getDatasetMeta(datasetIndex).controller.reset();\n\t\t\t}, me);\n\t\t},\n\n\t\t/**\n\t\t* Resets the chart back to it's state before the initial animation\n\t\t*/\n\t\treset: function() {\n\t\t\tthis.resetElements();\n\t\t\tthis.tooltip.initialize();\n\t\t},\n\n\t\tupdate: function(config) {\n\t\t\tvar me = this;\n\n\t\t\tif (!config || typeof config !== 'object') {\n\t\t\t\t// backwards compatibility\n\t\t\t\tconfig = {\n\t\t\t\t\tduration: config,\n\t\t\t\t\tlazy: arguments[1]\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tupdateConfig(me);\n\n\t\t\t// plugins options references might have change, let's invalidate the cache\n\t\t\t// https://github.com/chartjs/Chart.js/issues/5111#issuecomment-355934167\n\t\t\tplugins._invalidate(me);\n\n\t\t\tif (plugins.notify(me, 'beforeUpdate') === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// In case the entire data object changed\n\t\t\tme.tooltip._data = me.data;\n\n\t\t\t// Make sure dataset controllers are updated and new controllers are reset\n\t\t\tvar newControllers = me.buildOrUpdateControllers();\n\n\t\t\t// Make sure all dataset controllers have correct meta data counts\n\t\t\thelpers.each(me.data.datasets, function(dataset, datasetIndex) {\n\t\t\t\tme.getDatasetMeta(datasetIndex).controller.buildOrUpdateElements();\n\t\t\t}, me);\n\n\t\t\tme.updateLayout();\n\n\t\t\t// Can only reset the new controllers after the scales have been updated\n\t\t\tif (me.options.animation && me.options.animation.duration) {\n\t\t\t\thelpers.each(newControllers, function(controller) {\n\t\t\t\t\tcontroller.reset();\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tme.updateDatasets();\n\n\t\t\t// Need to reset tooltip in case it is displayed with elements that are removed\n\t\t\t// after update.\n\t\t\tme.tooltip.initialize();\n\n\t\t\t// Last active contains items that were previously in the tooltip.\n\t\t\t// When we reset the tooltip, we need to clear it\n\t\t\tme.lastActive = [];\n\n\t\t\t// Do this before render so that any plugins that need final scale updates can use it\n\t\t\tplugins.notify(me, 'afterUpdate');\n\n\t\t\tif (me._bufferedRender) {\n\t\t\t\tme._bufferedRequest = {\n\t\t\t\t\tduration: config.duration,\n\t\t\t\t\teasing: config.easing,\n\t\t\t\t\tlazy: config.lazy\n\t\t\t\t};\n\t\t\t} else {\n\t\t\t\tme.render(config);\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * Updates the chart layout unless a plugin returns `false` to the `beforeLayout`\n\t\t * hook, in which case, plugins will not be called on `afterLayout`.\n\t\t * @private\n\t\t */\n\t\tupdateLayout: function() {\n\t\t\tvar me = this;\n\n\t\t\tif (plugins.notify(me, 'beforeLayout') === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tlayouts.update(this, this.width, this.height);\n\n\t\t\t/**\n\t\t\t * Provided for backward compatibility, use `afterLayout` instead.\n\t\t\t * @method IPlugin#afterScaleUpdate\n\t\t\t * @deprecated since version 2.5.0\n\t\t\t * @todo remove at version 3\n\t\t\t * @private\n\t\t\t */\n\t\t\tplugins.notify(me, 'afterScaleUpdate');\n\t\t\tplugins.notify(me, 'afterLayout');\n\t\t},\n\n\t\t/**\n\t\t * Updates all datasets unless a plugin returns `false` to the `beforeDatasetsUpdate`\n\t\t * hook, in which case, plugins will not be called on `afterDatasetsUpdate`.\n\t\t * @private\n\t\t */\n\t\tupdateDatasets: function() {\n\t\t\tvar me = this;\n\n\t\t\tif (plugins.notify(me, 'beforeDatasetsUpdate') === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tfor (var i = 0, ilen = me.data.datasets.length; i < ilen; ++i) {\n\t\t\t\tme.updateDataset(i);\n\t\t\t}\n\n\t\t\tplugins.notify(me, 'afterDatasetsUpdate');\n\t\t},\n\n\t\t/**\n\t\t * Updates dataset at index unless a plugin returns `false` to the `beforeDatasetUpdate`\n\t\t * hook, in which case, plugins will not be called on `afterDatasetUpdate`.\n\t\t * @private\n\t\t */\n\t\tupdateDataset: function(index) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getDatasetMeta(index);\n\t\t\tvar args = {\n\t\t\t\tmeta: meta,\n\t\t\t\tindex: index\n\t\t\t};\n\n\t\t\tif (plugins.notify(me, 'beforeDatasetUpdate', [args]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tmeta.controller.update();\n\n\t\t\tplugins.notify(me, 'afterDatasetUpdate', [args]);\n\t\t},\n\n\t\trender: function(config) {\n\t\t\tvar me = this;\n\n\t\t\tif (!config || typeof config !== 'object') {\n\t\t\t\t// backwards compatibility\n\t\t\t\tconfig = {\n\t\t\t\t\tduration: config,\n\t\t\t\t\tlazy: arguments[1]\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tvar duration = config.duration;\n\t\t\tvar lazy = config.lazy;\n\n\t\t\tif (plugins.notify(me, 'beforeRender') === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tvar animationOptions = me.options.animation;\n\t\t\tvar onComplete = function(animation) {\n\t\t\t\tplugins.notify(me, 'afterRender');\n\t\t\t\thelpers.callback(animationOptions && animationOptions.onComplete, [animation], me);\n\t\t\t};\n\n\t\t\tif (animationOptions && ((typeof duration !== 'undefined' && duration !== 0) || (typeof duration === 'undefined' && animationOptions.duration !== 0))) {\n\t\t\t\tvar animation = new Chart.Animation({\n\t\t\t\t\tnumSteps: (duration || animationOptions.duration) / 16.66, // 60 fps\n\t\t\t\t\teasing: config.easing || animationOptions.easing,\n\n\t\t\t\t\trender: function(chart, animationObject) {\n\t\t\t\t\t\tvar easingFunction = helpers.easing.effects[animationObject.easing];\n\t\t\t\t\t\tvar currentStep = animationObject.currentStep;\n\t\t\t\t\t\tvar stepDecimal = currentStep / animationObject.numSteps;\n\n\t\t\t\t\t\tchart.draw(easingFunction(stepDecimal), stepDecimal, currentStep);\n\t\t\t\t\t},\n\n\t\t\t\t\tonAnimationProgress: animationOptions.onProgress,\n\t\t\t\t\tonAnimationComplete: onComplete\n\t\t\t\t});\n\n\t\t\t\tChart.animationService.addAnimation(me, animation, duration, lazy);\n\t\t\t} else {\n\t\t\t\tme.draw();\n\n\t\t\t\t// See https://github.com/chartjs/Chart.js/issues/3781\n\t\t\t\tonComplete(new Chart.Animation({numSteps: 0, chart: me}));\n\t\t\t}\n\n\t\t\treturn me;\n\t\t},\n\n\t\tdraw: function(easingValue) {\n\t\t\tvar me = this;\n\n\t\t\tme.clear();\n\n\t\t\tif (helpers.isNullOrUndef(easingValue)) {\n\t\t\t\teasingValue = 1;\n\t\t\t}\n\n\t\t\tme.transition(easingValue);\n\n\t\t\tif (plugins.notify(me, 'beforeDraw', [easingValue]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Draw all the scales\n\t\t\thelpers.each(me.boxes, function(box) {\n\t\t\t\tbox.draw(me.chartArea);\n\t\t\t}, me);\n\n\t\t\tif (me.scale) {\n\t\t\t\tme.scale.draw();\n\t\t\t}\n\n\t\t\tme.drawDatasets(easingValue);\n\t\t\tme._drawTooltip(easingValue);\n\n\t\t\tplugins.notify(me, 'afterDraw', [easingValue]);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\ttransition: function(easingValue) {\n\t\t\tvar me = this;\n\n\t\t\tfor (var i = 0, ilen = (me.data.datasets || []).length; i < ilen; ++i) {\n\t\t\t\tif (me.isDatasetVisible(i)) {\n\t\t\t\t\tme.getDatasetMeta(i).controller.transition(easingValue);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tme.tooltip.transition(easingValue);\n\t\t},\n\n\t\t/**\n\t\t * Draws all datasets unless a plugin returns `false` to the `beforeDatasetsDraw`\n\t\t * hook, in which case, plugins will not be called on `afterDatasetsDraw`.\n\t\t * @private\n\t\t */\n\t\tdrawDatasets: function(easingValue) {\n\t\t\tvar me = this;\n\n\t\t\tif (plugins.notify(me, 'beforeDatasetsDraw', [easingValue]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Draw datasets reversed to support proper line stacking\n\t\t\tfor (var i = (me.data.datasets || []).length - 1; i >= 0; --i) {\n\t\t\t\tif (me.isDatasetVisible(i)) {\n\t\t\t\t\tme.drawDataset(i, easingValue);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tplugins.notify(me, 'afterDatasetsDraw', [easingValue]);\n\t\t},\n\n\t\t/**\n\t\t * Draws dataset at index unless a plugin returns `false` to the `beforeDatasetDraw`\n\t\t * hook, in which case, plugins will not be called on `afterDatasetDraw`.\n\t\t * @private\n\t\t */\n\t\tdrawDataset: function(index, easingValue) {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getDatasetMeta(index);\n\t\t\tvar args = {\n\t\t\t\tmeta: meta,\n\t\t\t\tindex: index,\n\t\t\t\teasingValue: easingValue\n\t\t\t};\n\n\t\t\tif (plugins.notify(me, 'beforeDatasetDraw', [args]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tmeta.controller.draw(easingValue);\n\n\t\t\tplugins.notify(me, 'afterDatasetDraw', [args]);\n\t\t},\n\n\t\t/**\n\t\t * Draws tooltip unless a plugin returns `false` to the `beforeTooltipDraw`\n\t\t * hook, in which case, plugins will not be called on `afterTooltipDraw`.\n\t\t * @private\n\t\t */\n\t\t_drawTooltip: function(easingValue) {\n\t\t\tvar me = this;\n\t\t\tvar tooltip = me.tooltip;\n\t\t\tvar args = {\n\t\t\t\ttooltip: tooltip,\n\t\t\t\teasingValue: easingValue\n\t\t\t};\n\n\t\t\tif (plugins.notify(me, 'beforeTooltipDraw', [args]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\ttooltip.draw();\n\n\t\t\tplugins.notify(me, 'afterTooltipDraw', [args]);\n\t\t},\n\n\t\t// Get the single element that was clicked on\n\t\t// @return : An object containing the dataset index and element index of the matching element. Also contains the rectangle that was draw\n\t\tgetElementAtEvent: function(e) {\n\t\t\treturn Interaction.modes.single(this, e);\n\t\t},\n\n\t\tgetElementsAtEvent: function(e) {\n\t\t\treturn Interaction.modes.label(this, e, {intersect: true});\n\t\t},\n\n\t\tgetElementsAtXAxis: function(e) {\n\t\t\treturn Interaction.modes['x-axis'](this, e, {intersect: true});\n\t\t},\n\n\t\tgetElementsAtEventForMode: function(e, mode, options) {\n\t\t\tvar method = Interaction.modes[mode];\n\t\t\tif (typeof method === 'function') {\n\t\t\t\treturn method(this, e, options);\n\t\t\t}\n\n\t\t\treturn [];\n\t\t},\n\n\t\tgetDatasetAtEvent: function(e) {\n\t\t\treturn Interaction.modes.dataset(this, e, {intersect: true});\n\t\t},\n\n\t\tgetDatasetMeta: function(datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tvar dataset = me.data.datasets[datasetIndex];\n\t\t\tif (!dataset._meta) {\n\t\t\t\tdataset._meta = {};\n\t\t\t}\n\n\t\t\tvar meta = dataset._meta[me.id];\n\t\t\tif (!meta) {\n\t\t\t\tmeta = dataset._meta[me.id] = {\n\t\t\t\t\ttype: null,\n\t\t\t\t\tdata: [],\n\t\t\t\t\tdataset: null,\n\t\t\t\t\tcontroller: null,\n\t\t\t\t\thidden: null,\t\t\t// See isDatasetVisible() comment\n\t\t\t\t\txAxisID: null,\n\t\t\t\t\tyAxisID: null\n\t\t\t\t};\n\t\t\t}\n\n\t\t\treturn meta;\n\t\t},\n\n\t\tgetVisibleDatasetCount: function() {\n\t\t\tvar count = 0;\n\t\t\tfor (var i = 0, ilen = this.data.datasets.length; i < ilen; ++i) {\n\t\t\t\tif (this.isDatasetVisible(i)) {\n\t\t\t\t\tcount++;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn count;\n\t\t},\n\n\t\tisDatasetVisible: function(datasetIndex) {\n\t\t\tvar meta = this.getDatasetMeta(datasetIndex);\n\n\t\t\t// meta.hidden is a per chart dataset hidden flag override with 3 states: if true or false,\n\t\t\t// the dataset.hidden value is ignored, else if null, the dataset hidden state is returned.\n\t\t\treturn typeof meta.hidden === 'boolean' ? !meta.hidden : !this.data.datasets[datasetIndex].hidden;\n\t\t},\n\n\t\tgenerateLegend: function() {\n\t\t\treturn this.options.legendCallback(this);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tdestroyDatasetMeta: function(datasetIndex) {\n\t\t\tvar id = this.id;\n\t\t\tvar dataset = this.data.datasets[datasetIndex];\n\t\t\tvar meta = dataset._meta && dataset._meta[id];\n\n\t\t\tif (meta) {\n\t\t\t\tmeta.controller.destroy();\n\t\t\t\tdelete dataset._meta[id];\n\t\t\t}\n\t\t},\n\n\t\tdestroy: function() {\n\t\t\tvar me = this;\n\t\t\tvar canvas = me.canvas;\n\t\t\tvar i, ilen;\n\n\t\t\tme.stop();\n\n\t\t\t// dataset controllers need to cleanup associated data\n\t\t\tfor (i = 0, ilen = me.data.datasets.length; i < ilen; ++i) {\n\t\t\t\tme.destroyDatasetMeta(i);\n\t\t\t}\n\n\t\t\tif (canvas) {\n\t\t\t\tme.unbindEvents();\n\t\t\t\thelpers.canvas.clear(me);\n\t\t\t\tplatform.releaseContext(me.ctx);\n\t\t\t\tme.canvas = null;\n\t\t\t\tme.ctx = null;\n\t\t\t}\n\n\t\t\tplugins.notify(me, 'destroy');\n\n\t\t\tdelete Chart.instances[me.id];\n\t\t},\n\n\t\ttoBase64Image: function() {\n\t\t\treturn this.canvas.toDataURL.apply(this.canvas, arguments);\n\t\t},\n\n\t\tinitToolTip: function() {\n\t\t\tvar me = this;\n\t\t\tme.tooltip = new Chart.Tooltip({\n\t\t\t\t_chart: me,\n\t\t\t\t_chartInstance: me, // deprecated, backward compatibility\n\t\t\t\t_data: me.data,\n\t\t\t\t_options: me.options.tooltips\n\t\t\t}, me);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tbindEvents: function() {\n\t\t\tvar me = this;\n\t\t\tvar listeners = me._listeners = {};\n\t\t\tvar listener = function() {\n\t\t\t\tme.eventHandler.apply(me, arguments);\n\t\t\t};\n\n\t\t\thelpers.each(me.options.events, function(type) {\n\t\t\t\tplatform.addEventListener(me, type, listener);\n\t\t\t\tlisteners[type] = listener;\n\t\t\t});\n\n\t\t\t// Elements used to detect size change should not be injected for non responsive charts.\n\t\t\t// See https://github.com/chartjs/Chart.js/issues/2210\n\t\t\tif (me.options.responsive) {\n\t\t\t\tlistener = function() {\n\t\t\t\t\tme.resize();\n\t\t\t\t};\n\n\t\t\t\tplatform.addEventListener(me, 'resize', listener);\n\t\t\t\tlisteners.resize = listener;\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tunbindEvents: function() {\n\t\t\tvar me = this;\n\t\t\tvar listeners = me._listeners;\n\t\t\tif (!listeners) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tdelete me._listeners;\n\t\t\thelpers.each(listeners, function(listener, type) {\n\t\t\t\tplatform.removeEventListener(me, type, listener);\n\t\t\t});\n\t\t},\n\n\t\tupdateHoverStyle: function(elements, mode, enabled) {\n\t\t\tvar method = enabled ? 'setHoverStyle' : 'removeHoverStyle';\n\t\t\tvar element, i, ilen;\n\n\t\t\tfor (i = 0, ilen = elements.length; i < ilen; ++i) {\n\t\t\t\telement = elements[i];\n\t\t\t\tif (element) {\n\t\t\t\t\tthis.getDatasetMeta(element._datasetIndex).controller[method](element);\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\teventHandler: function(e) {\n\t\t\tvar me = this;\n\t\t\tvar tooltip = me.tooltip;\n\n\t\t\tif (plugins.notify(me, 'beforeEvent', [e]) === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Buffer any update calls so that renders do not occur\n\t\t\tme._bufferedRender = true;\n\t\t\tme._bufferedRequest = null;\n\n\t\t\tvar changed = me.handleEvent(e);\n\t\t\t// for smooth tooltip animations issue #4989\n\t\t\t// the tooltip should be the source of change\n\t\t\t// Animation check workaround:\n\t\t\t// tooltip._start will be null when tooltip isn't animating\n\t\t\tif (tooltip) {\n\t\t\t\tchanged = tooltip._start\n\t\t\t\t\t? tooltip.handleEvent(e)\n\t\t\t\t\t: changed | tooltip.handleEvent(e);\n\t\t\t}\n\n\t\t\tplugins.notify(me, 'afterEvent', [e]);\n\n\t\t\tvar bufferedRequest = me._bufferedRequest;\n\t\t\tif (bufferedRequest) {\n\t\t\t\t// If we have an update that was triggered, we need to do a normal render\n\t\t\t\tme.render(bufferedRequest);\n\t\t\t} else if (changed && !me.animating) {\n\t\t\t\t// If entering, leaving, or changing elements, animate the change via pivot\n\t\t\t\tme.stop();\n\n\t\t\t\t// We only need to render at this point. Updating will cause scales to be\n\t\t\t\t// recomputed generating flicker & using more memory than necessary.\n\t\t\t\tme.render(me.options.hover.animationDuration, true);\n\t\t\t}\n\n\t\t\tme._bufferedRender = false;\n\t\t\tme._bufferedRequest = null;\n\n\t\t\treturn me;\n\t\t},\n\n\t\t/**\n\t\t * Handle an event\n\t\t * @private\n\t\t * @param {IEvent} event the event to handle\n\t\t * @return {Boolean} true if the chart needs to re-render\n\t\t */\n\t\thandleEvent: function(e) {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options || {};\n\t\t\tvar hoverOptions = options.hover;\n\t\t\tvar changed = false;\n\n\t\t\tme.lastActive = me.lastActive || [];\n\n\t\t\t// Find Active Elements for hover and tooltips\n\t\t\tif (e.type === 'mouseout') {\n\t\t\t\tme.active = [];\n\t\t\t} else {\n\t\t\t\tme.active = me.getElementsAtEventForMode(e, hoverOptions.mode, hoverOptions);\n\t\t\t}\n\n\t\t\t// Invoke onHover hook\n\t\t\t// Need to call with native event here to not break backwards compatibility\n\t\t\thelpers.callback(options.onHover || options.hover.onHover, [e.native, me.active], me);\n\n\t\t\tif (e.type === 'mouseup' || e.type === 'click') {\n\t\t\t\tif (options.onClick) {\n\t\t\t\t\t// Use e.native here for backwards compatibility\n\t\t\t\t\toptions.onClick.call(me, e.native, me.active);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Remove styling for last active (even if it may still be active)\n\t\t\tif (me.lastActive.length) {\n\t\t\t\tme.updateHoverStyle(me.lastActive, hoverOptions.mode, false);\n\t\t\t}\n\n\t\t\t// Built in hover styling\n\t\t\tif (me.active.length && hoverOptions.mode) {\n\t\t\t\tme.updateHoverStyle(me.active, hoverOptions.mode, true);\n\t\t\t}\n\n\t\t\tchanged = !helpers.arrayEquals(me.active, me.lastActive);\n\n\t\t\t// Remember Last Actives\n\t\t\tme.lastActive = me.active;\n\n\t\t\treturn changed;\n\t\t}\n\t});\n\n\t/**\n\t * Provided for backward compatibility, use Chart instead.\n\t * @class Chart.Controller\n\t * @deprecated since version 2.6.0\n\t * @todo remove at version 3\n\t * @private\n\t */\n\tChart.Controller = Chart;\n};\n\n},{\"25\":25,\"28\":28,\"30\":30,\"31\":31,\"45\":45,\"48\":48}],24:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\nmodule.exports = function(Chart) {\n\n\tvar arrayEvents = ['push', 'pop', 'shift', 'splice', 'unshift'];\n\n\t/**\n\t * Hooks the array methods that add or remove values ('push', pop', 'shift', 'splice',\n\t * 'unshift') and notify the listener AFTER the array has been altered. Listeners are\n\t * called on the 'onData*' callbacks (e.g. onDataPush, etc.) with same arguments.\n\t */\n\tfunction listenArrayEvents(array, listener) {\n\t\tif (array._chartjs) {\n\t\t\tarray._chartjs.listeners.push(listener);\n\t\t\treturn;\n\t\t}\n\n\t\tObject.defineProperty(array, '_chartjs', {\n\t\t\tconfigurable: true,\n\t\t\tenumerable: false,\n\t\t\tvalue: {\n\t\t\t\tlisteners: [listener]\n\t\t\t}\n\t\t});\n\n\t\tarrayEvents.forEach(function(key) {\n\t\t\tvar method = 'onData' + key.charAt(0).toUpperCase() + key.slice(1);\n\t\t\tvar base = array[key];\n\n\t\t\tObject.defineProperty(array, key, {\n\t\t\t\tconfigurable: true,\n\t\t\t\tenumerable: false,\n\t\t\t\tvalue: function() {\n\t\t\t\t\tvar args = Array.prototype.slice.call(arguments);\n\t\t\t\t\tvar res = base.apply(this, args);\n\n\t\t\t\t\thelpers.each(array._chartjs.listeners, function(object) {\n\t\t\t\t\t\tif (typeof object[method] === 'function') {\n\t\t\t\t\t\t\tobject[method].apply(object, args);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\n\t\t\t\t\treturn res;\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t}\n\n\t/**\n\t * Removes the given array event listener and cleanup extra attached properties (such as\n\t * the _chartjs stub and overridden methods) if array doesn't have any more listeners.\n\t */\n\tfunction unlistenArrayEvents(array, listener) {\n\t\tvar stub = array._chartjs;\n\t\tif (!stub) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar listeners = stub.listeners;\n\t\tvar index = listeners.indexOf(listener);\n\t\tif (index !== -1) {\n\t\t\tlisteners.splice(index, 1);\n\t\t}\n\n\t\tif (listeners.length > 0) {\n\t\t\treturn;\n\t\t}\n\n\t\tarrayEvents.forEach(function(key) {\n\t\t\tdelete array[key];\n\t\t});\n\n\t\tdelete array._chartjs;\n\t}\n\n\t// Base class for all dataset controllers (line, bar, etc)\n\tChart.DatasetController = function(chart, datasetIndex) {\n\t\tthis.initialize(chart, datasetIndex);\n\t};\n\n\thelpers.extend(Chart.DatasetController.prototype, {\n\n\t\t/**\n\t\t * Element type used to generate a meta dataset (e.g. Chart.element.Line).\n\t\t * @type {Chart.core.element}\n\t\t */\n\t\tdatasetElementType: null,\n\n\t\t/**\n\t\t * Element type used to generate a meta data (e.g. Chart.element.Point).\n\t\t * @type {Chart.core.element}\n\t\t */\n\t\tdataElementType: null,\n\n\t\tinitialize: function(chart, datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tme.chart = chart;\n\t\t\tme.index = datasetIndex;\n\t\t\tme.linkScales();\n\t\t\tme.addElements();\n\t\t},\n\n\t\tupdateIndex: function(datasetIndex) {\n\t\t\tthis.index = datasetIndex;\n\t\t},\n\n\t\tlinkScales: function() {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar dataset = me.getDataset();\n\n\t\t\tif (meta.xAxisID === null || !(meta.xAxisID in me.chart.scales)) {\n\t\t\t\tmeta.xAxisID = dataset.xAxisID || me.chart.options.scales.xAxes[0].id;\n\t\t\t}\n\t\t\tif (meta.yAxisID === null || !(meta.yAxisID in me.chart.scales)) {\n\t\t\t\tmeta.yAxisID = dataset.yAxisID || me.chart.options.scales.yAxes[0].id;\n\t\t\t}\n\t\t},\n\n\t\tgetDataset: function() {\n\t\t\treturn this.chart.data.datasets[this.index];\n\t\t},\n\n\t\tgetMeta: function() {\n\t\t\treturn this.chart.getDatasetMeta(this.index);\n\t\t},\n\n\t\tgetScaleForId: function(scaleID) {\n\t\t\treturn this.chart.scales[scaleID];\n\t\t},\n\n\t\treset: function() {\n\t\t\tthis.update(true);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tdestroy: function() {\n\t\t\tif (this._data) {\n\t\t\t\tunlistenArrayEvents(this._data, this);\n\t\t\t}\n\t\t},\n\n\t\tcreateMetaDataset: function() {\n\t\t\tvar me = this;\n\t\t\tvar type = me.datasetElementType;\n\t\t\treturn type && new type({\n\t\t\t\t_chart: me.chart,\n\t\t\t\t_datasetIndex: me.index\n\t\t\t});\n\t\t},\n\n\t\tcreateMetaData: function(index) {\n\t\t\tvar me = this;\n\t\t\tvar type = me.dataElementType;\n\t\t\treturn type && new type({\n\t\t\t\t_chart: me.chart,\n\t\t\t\t_datasetIndex: me.index,\n\t\t\t\t_index: index\n\t\t\t});\n\t\t},\n\n\t\taddElements: function() {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar data = me.getDataset().data || [];\n\t\t\tvar metaData = meta.data;\n\t\t\tvar i, ilen;\n\n\t\t\tfor (i = 0, ilen = data.length; i < ilen; ++i) {\n\t\t\t\tmetaData[i] = metaData[i] || me.createMetaData(i);\n\t\t\t}\n\n\t\t\tmeta.dataset = meta.dataset || me.createMetaDataset();\n\t\t},\n\n\t\taddElementAndReset: function(index) {\n\t\t\tvar element = this.createMetaData(index);\n\t\t\tthis.getMeta().data.splice(index, 0, element);\n\t\t\tthis.updateElement(element, index, true);\n\t\t},\n\n\t\tbuildOrUpdateElements: function() {\n\t\t\tvar me = this;\n\t\t\tvar dataset = me.getDataset();\n\t\t\tvar data = dataset.data || (dataset.data = []);\n\n\t\t\t// In order to correctly handle data addition/deletion animation (an thus simulate\n\t\t\t// real-time charts), we need to monitor these data modifications and synchronize\n\t\t\t// the internal meta data accordingly.\n\t\t\tif (me._data !== data) {\n\t\t\t\tif (me._data) {\n\t\t\t\t\t// This case happens when the user replaced the data array instance.\n\t\t\t\t\tunlistenArrayEvents(me._data, me);\n\t\t\t\t}\n\n\t\t\t\tlistenArrayEvents(data, me);\n\t\t\t\tme._data = data;\n\t\t\t}\n\n\t\t\t// Re-sync meta data in case the user replaced the data array or if we missed\n\t\t\t// any updates and so make sure that we handle number of datapoints changing.\n\t\t\tme.resyncElements();\n\t\t},\n\n\t\tupdate: helpers.noop,\n\n\t\ttransition: function(easingValue) {\n\t\t\tvar meta = this.getMeta();\n\t\t\tvar elements = meta.data || [];\n\t\t\tvar ilen = elements.length;\n\t\t\tvar i = 0;\n\n\t\t\tfor (; i < ilen; ++i) {\n\t\t\t\telements[i].transition(easingValue);\n\t\t\t}\n\n\t\t\tif (meta.dataset) {\n\t\t\t\tmeta.dataset.transition(easingValue);\n\t\t\t}\n\t\t},\n\n\t\tdraw: function() {\n\t\t\tvar meta = this.getMeta();\n\t\t\tvar elements = meta.data || [];\n\t\t\tvar ilen = elements.length;\n\t\t\tvar i = 0;\n\n\t\t\tif (meta.dataset) {\n\t\t\t\tmeta.dataset.draw();\n\t\t\t}\n\n\t\t\tfor (; i < ilen; ++i) {\n\t\t\t\telements[i].draw();\n\t\t\t}\n\t\t},\n\n\t\tremoveHoverStyle: function(element, elementOpts) {\n\t\t\tvar dataset = this.chart.data.datasets[element._datasetIndex];\n\t\t\tvar index = element._index;\n\t\t\tvar custom = element.custom || {};\n\t\t\tvar valueOrDefault = helpers.valueAtIndexOrDefault;\n\t\t\tvar model = element._model;\n\n\t\t\tmodel.backgroundColor = custom.backgroundColor ? custom.backgroundColor : valueOrDefault(dataset.backgroundColor, index, elementOpts.backgroundColor);\n\t\t\tmodel.borderColor = custom.borderColor ? custom.borderColor : valueOrDefault(dataset.borderColor, index, elementOpts.borderColor);\n\t\t\tmodel.borderWidth = custom.borderWidth ? custom.borderWidth : valueOrDefault(dataset.borderWidth, index, elementOpts.borderWidth);\n\t\t},\n\n\t\tsetHoverStyle: function(element) {\n\t\t\tvar dataset = this.chart.data.datasets[element._datasetIndex];\n\t\t\tvar index = element._index;\n\t\t\tvar custom = element.custom || {};\n\t\t\tvar valueOrDefault = helpers.valueAtIndexOrDefault;\n\t\t\tvar getHoverColor = helpers.getHoverColor;\n\t\t\tvar model = element._model;\n\n\t\t\tmodel.backgroundColor = custom.hoverBackgroundColor ? custom.hoverBackgroundColor : valueOrDefault(dataset.hoverBackgroundColor, index, getHoverColor(model.backgroundColor));\n\t\t\tmodel.borderColor = custom.hoverBorderColor ? custom.hoverBorderColor : valueOrDefault(dataset.hoverBorderColor, index, getHoverColor(model.borderColor));\n\t\t\tmodel.borderWidth = custom.hoverBorderWidth ? custom.hoverBorderWidth : valueOrDefault(dataset.hoverBorderWidth, index, model.borderWidth);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tresyncElements: function() {\n\t\t\tvar me = this;\n\t\t\tvar meta = me.getMeta();\n\t\t\tvar data = me.getDataset().data;\n\t\t\tvar numMeta = meta.data.length;\n\t\t\tvar numData = data.length;\n\n\t\t\tif (numData < numMeta) {\n\t\t\t\tmeta.data.splice(numData, numMeta - numData);\n\t\t\t} else if (numData > numMeta) {\n\t\t\t\tme.insertElements(numMeta, numData - numMeta);\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tinsertElements: function(start, count) {\n\t\t\tfor (var i = 0; i < count; ++i) {\n\t\t\t\tthis.addElementAndReset(start + i);\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tonDataPush: function() {\n\t\t\tthis.insertElements(this.getDataset().data.length - 1, arguments.length);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tonDataPop: function() {\n\t\t\tthis.getMeta().data.pop();\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tonDataShift: function() {\n\t\t\tthis.getMeta().data.shift();\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tonDataSplice: function(start, count) {\n\t\t\tthis.getMeta().data.splice(start, count);\n\t\t\tthis.insertElements(start, arguments.length - 2);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tonDataUnshift: function() {\n\t\t\tthis.insertElements(0, arguments.length);\n\t\t}\n\t});\n\n\tChart.DatasetController.extend = helpers.inherits;\n};\n\n},{\"45\":45}],25:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\nmodule.exports = {\n\t/**\n\t * @private\n\t */\n\t_set: function(scope, values) {\n\t\treturn helpers.merge(this[scope] || (this[scope] = {}), values);\n\t}\n};\n\n},{\"45\":45}],26:[function(require,module,exports){\n'use strict';\n\nvar color = require(2);\nvar helpers = require(45);\n\nfunction interpolate(start, view, model, ease) {\n\tvar keys = Object.keys(model);\n\tvar i, ilen, key, actual, origin, target, type, c0, c1;\n\n\tfor (i = 0, ilen = keys.length; i < ilen; ++i) {\n\t\tkey = keys[i];\n\n\t\ttarget = model[key];\n\n\t\t// if a value is added to the model after pivot() has been called, the view\n\t\t// doesn't contain it, so let's initialize the view to the target value.\n\t\tif (!view.hasOwnProperty(key)) {\n\t\t\tview[key] = target;\n\t\t}\n\n\t\tactual = view[key];\n\n\t\tif (actual === target || key[0] === '_') {\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (!start.hasOwnProperty(key)) {\n\t\t\tstart[key] = actual;\n\t\t}\n\n\t\torigin = start[key];\n\n\t\ttype = typeof target;\n\n\t\tif (type === typeof origin) {\n\t\t\tif (type === 'string') {\n\t\t\t\tc0 = color(origin);\n\t\t\t\tif (c0.valid) {\n\t\t\t\t\tc1 = color(target);\n\t\t\t\t\tif (c1.valid) {\n\t\t\t\t\t\tview[key] = c1.mix(c0, ease).rgbString();\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if (type === 'number' && isFinite(origin) && isFinite(target)) {\n\t\t\t\tview[key] = origin + (target - origin) * ease;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tview[key] = target;\n\t}\n}\n\nvar Element = function(configuration) {\n\thelpers.extend(this, configuration);\n\tthis.initialize.apply(this, arguments);\n};\n\nhelpers.extend(Element.prototype, {\n\n\tinitialize: function() {\n\t\tthis.hidden = false;\n\t},\n\n\tpivot: function() {\n\t\tvar me = this;\n\t\tif (!me._view) {\n\t\t\tme._view = helpers.clone(me._model);\n\t\t}\n\t\tme._start = {};\n\t\treturn me;\n\t},\n\n\ttransition: function(ease) {\n\t\tvar me = this;\n\t\tvar model = me._model;\n\t\tvar start = me._start;\n\t\tvar view = me._view;\n\n\t\t// No animation -> No Transition\n\t\tif (!model || ease === 1) {\n\t\t\tme._view = model;\n\t\t\tme._start = null;\n\t\t\treturn me;\n\t\t}\n\n\t\tif (!view) {\n\t\t\tview = me._view = {};\n\t\t}\n\n\t\tif (!start) {\n\t\t\tstart = me._start = {};\n\t\t}\n\n\t\tinterpolate(start, view, model, ease);\n\n\t\treturn me;\n\t},\n\n\ttooltipPosition: function() {\n\t\treturn {\n\t\t\tx: this._model.x,\n\t\t\ty: this._model.y\n\t\t};\n\t},\n\n\thasValue: function() {\n\t\treturn helpers.isNumber(this._model.x) && helpers.isNumber(this._model.y);\n\t}\n});\n\nElement.extend = helpers.inherits;\n\nmodule.exports = Element;\n\n},{\"2\":2,\"45\":45}],27:[function(require,module,exports){\n/* global window: false */\n/* global document: false */\n'use strict';\n\nvar color = require(2);\nvar defaults = require(25);\nvar helpers = require(45);\n\nmodule.exports = function(Chart) {\n\n\t// -- Basic js utility methods\n\n\thelpers.configMerge = function(/* objects ... */) {\n\t\treturn helpers.merge(helpers.clone(arguments[0]), [].slice.call(arguments, 1), {\n\t\t\tmerger: function(key, target, source, options) {\n\t\t\t\tvar tval = target[key] || {};\n\t\t\t\tvar sval = source[key];\n\n\t\t\t\tif (key === 'scales') {\n\t\t\t\t\t// scale config merging is complex. Add our own function here for that\n\t\t\t\t\ttarget[key] = helpers.scaleMerge(tval, sval);\n\t\t\t\t} else if (key === 'scale') {\n\t\t\t\t\t// used in polar area & radar charts since there is only one scale\n\t\t\t\t\ttarget[key] = helpers.merge(tval, [Chart.scaleService.getScaleDefaults(sval.type), sval]);\n\t\t\t\t} else {\n\t\t\t\t\thelpers._merger(key, target, source, options);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t};\n\n\thelpers.scaleMerge = function(/* objects ... */) {\n\t\treturn helpers.merge(helpers.clone(arguments[0]), [].slice.call(arguments, 1), {\n\t\t\tmerger: function(key, target, source, options) {\n\t\t\t\tif (key === 'xAxes' || key === 'yAxes') {\n\t\t\t\t\tvar slen = source[key].length;\n\t\t\t\t\tvar i, type, scale;\n\n\t\t\t\t\tif (!target[key]) {\n\t\t\t\t\t\ttarget[key] = [];\n\t\t\t\t\t}\n\n\t\t\t\t\tfor (i = 0; i < slen; ++i) {\n\t\t\t\t\t\tscale = source[key][i];\n\t\t\t\t\t\ttype = helpers.valueOrDefault(scale.type, key === 'xAxes' ? 'category' : 'linear');\n\n\t\t\t\t\t\tif (i >= target[key].length) {\n\t\t\t\t\t\t\ttarget[key].push({});\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (!target[key][i].type || (scale.type && scale.type !== target[key][i].type)) {\n\t\t\t\t\t\t\t// new/untyped scale or type changed: let's apply the new defaults\n\t\t\t\t\t\t\t// then merge source scale to correctly overwrite the defaults.\n\t\t\t\t\t\t\thelpers.merge(target[key][i], [Chart.scaleService.getScaleDefaults(type), scale]);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// scales type are the same\n\t\t\t\t\t\t\thelpers.merge(target[key][i], scale);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\thelpers._merger(key, target, source, options);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t};\n\n\thelpers.where = function(collection, filterCallback) {\n\t\tif (helpers.isArray(collection) && Array.prototype.filter) {\n\t\t\treturn collection.filter(filterCallback);\n\t\t}\n\t\tvar filtered = [];\n\n\t\thelpers.each(collection, function(item) {\n\t\t\tif (filterCallback(item)) {\n\t\t\t\tfiltered.push(item);\n\t\t\t}\n\t\t});\n\n\t\treturn filtered;\n\t};\n\thelpers.findIndex = Array.prototype.findIndex ?\n\t\tfunction(array, callback, scope) {\n\t\t\treturn array.findIndex(callback, scope);\n\t\t} :\n\t\tfunction(array, callback, scope) {\n\t\t\tscope = scope === undefined ? array : scope;\n\t\t\tfor (var i = 0, ilen = array.length; i < ilen; ++i) {\n\t\t\t\tif (callback.call(scope, array[i], i, array)) {\n\t\t\t\t\treturn i;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn -1;\n\t\t};\n\thelpers.findNextWhere = function(arrayToSearch, filterCallback, startIndex) {\n\t\t// Default to start of the array\n\t\tif (helpers.isNullOrUndef(startIndex)) {\n\t\t\tstartIndex = -1;\n\t\t}\n\t\tfor (var i = startIndex + 1; i < arrayToSearch.length; i++) {\n\t\t\tvar currentItem = arrayToSearch[i];\n\t\t\tif (filterCallback(currentItem)) {\n\t\t\t\treturn currentItem;\n\t\t\t}\n\t\t}\n\t};\n\thelpers.findPreviousWhere = function(arrayToSearch, filterCallback, startIndex) {\n\t\t// Default to end of the array\n\t\tif (helpers.isNullOrUndef(startIndex)) {\n\t\t\tstartIndex = arrayToSearch.length;\n\t\t}\n\t\tfor (var i = startIndex - 1; i >= 0; i--) {\n\t\t\tvar currentItem = arrayToSearch[i];\n\t\t\tif (filterCallback(currentItem)) {\n\t\t\t\treturn currentItem;\n\t\t\t}\n\t\t}\n\t};\n\n\t// -- Math methods\n\thelpers.isNumber = function(n) {\n\t\treturn !isNaN(parseFloat(n)) && isFinite(n);\n\t};\n\thelpers.almostEquals = function(x, y, epsilon) {\n\t\treturn Math.abs(x - y) < epsilon;\n\t};\n\thelpers.almostWhole = function(x, epsilon) {\n\t\tvar rounded = Math.round(x);\n\t\treturn (((rounded - epsilon) < x) && ((rounded + epsilon) > x));\n\t};\n\thelpers.max = function(array) {\n\t\treturn array.reduce(function(max, value) {\n\t\t\tif (!isNaN(value)) {\n\t\t\t\treturn Math.max(max, value);\n\t\t\t}\n\t\t\treturn max;\n\t\t}, Number.NEGATIVE_INFINITY);\n\t};\n\thelpers.min = function(array) {\n\t\treturn array.reduce(function(min, value) {\n\t\t\tif (!isNaN(value)) {\n\t\t\t\treturn Math.min(min, value);\n\t\t\t}\n\t\t\treturn min;\n\t\t}, Number.POSITIVE_INFINITY);\n\t};\n\thelpers.sign = Math.sign ?\n\t\tfunction(x) {\n\t\t\treturn Math.sign(x);\n\t\t} :\n\t\tfunction(x) {\n\t\t\tx = +x; // convert to a number\n\t\t\tif (x === 0 || isNaN(x)) {\n\t\t\t\treturn x;\n\t\t\t}\n\t\t\treturn x > 0 ? 1 : -1;\n\t\t};\n\thelpers.log10 = Math.log10 ?\n\t\tfunction(x) {\n\t\t\treturn Math.log10(x);\n\t\t} :\n\t\tfunction(x) {\n\t\t\tvar exponent = Math.log(x) * Math.LOG10E; // Math.LOG10E = 1 / Math.LN10.\n\t\t\t// Check for whole powers of 10,\n\t\t\t// which due to floating point rounding error should be corrected.\n\t\t\tvar powerOf10 = Math.round(exponent);\n\t\t\tvar isPowerOf10 = x === Math.pow(10, powerOf10);\n\n\t\t\treturn isPowerOf10 ? powerOf10 : exponent;\n\t\t};\n\thelpers.toRadians = function(degrees) {\n\t\treturn degrees * (Math.PI / 180);\n\t};\n\thelpers.toDegrees = function(radians) {\n\t\treturn radians * (180 / Math.PI);\n\t};\n\t// Gets the angle from vertical upright to the point about a centre.\n\thelpers.getAngleFromPoint = function(centrePoint, anglePoint) {\n\t\tvar distanceFromXCenter = anglePoint.x - centrePoint.x;\n\t\tvar distanceFromYCenter = anglePoint.y - centrePoint.y;\n\t\tvar radialDistanceFromCenter = Math.sqrt(distanceFromXCenter * distanceFromXCenter + distanceFromYCenter * distanceFromYCenter);\n\n\t\tvar angle = Math.atan2(distanceFromYCenter, distanceFromXCenter);\n\n\t\tif (angle < (-0.5 * Math.PI)) {\n\t\t\tangle += 2.0 * Math.PI; // make sure the returned angle is in the range of (-PI/2, 3PI/2]\n\t\t}\n\n\t\treturn {\n\t\t\tangle: angle,\n\t\t\tdistance: radialDistanceFromCenter\n\t\t};\n\t};\n\thelpers.distanceBetweenPoints = function(pt1, pt2) {\n\t\treturn Math.sqrt(Math.pow(pt2.x - pt1.x, 2) + Math.pow(pt2.y - pt1.y, 2));\n\t};\n\thelpers.aliasPixel = function(pixelWidth) {\n\t\treturn (pixelWidth % 2 === 0) ? 0 : 0.5;\n\t};\n\thelpers.splineCurve = function(firstPoint, middlePoint, afterPoint, t) {\n\t\t// Props to Rob Spencer at scaled innovation for his post on splining between points\n\t\t// http://scaledinnovation.com/analytics/splines/aboutSplines.html\n\n\t\t// This function must also respect \"skipped\" points\n\n\t\tvar previous = firstPoint.skip ? middlePoint : firstPoint;\n\t\tvar current = middlePoint;\n\t\tvar next = afterPoint.skip ? middlePoint : afterPoint;\n\n\t\tvar d01 = Math.sqrt(Math.pow(current.x - previous.x, 2) + Math.pow(current.y - previous.y, 2));\n\t\tvar d12 = Math.sqrt(Math.pow(next.x - current.x, 2) + Math.pow(next.y - current.y, 2));\n\n\t\tvar s01 = d01 / (d01 + d12);\n\t\tvar s12 = d12 / (d01 + d12);\n\n\t\t// If all points are the same, s01 & s02 will be inf\n\t\ts01 = isNaN(s01) ? 0 : s01;\n\t\ts12 = isNaN(s12) ? 0 : s12;\n\n\t\tvar fa = t * s01; // scaling factor for triangle Ta\n\t\tvar fb = t * s12;\n\n\t\treturn {\n\t\t\tprevious: {\n\t\t\t\tx: current.x - fa * (next.x - previous.x),\n\t\t\t\ty: current.y - fa * (next.y - previous.y)\n\t\t\t},\n\t\t\tnext: {\n\t\t\t\tx: current.x + fb * (next.x - previous.x),\n\t\t\t\ty: current.y + fb * (next.y - previous.y)\n\t\t\t}\n\t\t};\n\t};\n\thelpers.EPSILON = Number.EPSILON || 1e-14;\n\thelpers.splineCurveMonotone = function(points) {\n\t\t// This function calculates Bézier control points in a similar way than |splineCurve|,\n\t\t// but preserves monotonicity of the provided data and ensures no local extremums are added\n\t\t// between the dataset discrete points due to the interpolation.\n\t\t// See : https://en.wikipedia.org/wiki/Monotone_cubic_interpolation\n\n\t\tvar pointsWithTangents = (points || []).map(function(point) {\n\t\t\treturn {\n\t\t\t\tmodel: point._model,\n\t\t\t\tdeltaK: 0,\n\t\t\t\tmK: 0\n\t\t\t};\n\t\t});\n\n\t\t// Calculate slopes (deltaK) and initialize tangents (mK)\n\t\tvar pointsLen = pointsWithTangents.length;\n\t\tvar i, pointBefore, pointCurrent, pointAfter;\n\t\tfor (i = 0; i < pointsLen; ++i) {\n\t\t\tpointCurrent = pointsWithTangents[i];\n\t\t\tif (pointCurrent.model.skip) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tpointBefore = i > 0 ? pointsWithTangents[i - 1] : null;\n\t\t\tpointAfter = i < pointsLen - 1 ? pointsWithTangents[i + 1] : null;\n\t\t\tif (pointAfter && !pointAfter.model.skip) {\n\t\t\t\tvar slopeDeltaX = (pointAfter.model.x - pointCurrent.model.x);\n\n\t\t\t\t// In the case of two points that appear at the same x pixel, slopeDeltaX is 0\n\t\t\t\tpointCurrent.deltaK = slopeDeltaX !== 0 ? (pointAfter.model.y - pointCurrent.model.y) / slopeDeltaX : 0;\n\t\t\t}\n\n\t\t\tif (!pointBefore || pointBefore.model.skip) {\n\t\t\t\tpointCurrent.mK = pointCurrent.deltaK;\n\t\t\t} else if (!pointAfter || pointAfter.model.skip) {\n\t\t\t\tpointCurrent.mK = pointBefore.deltaK;\n\t\t\t} else if (this.sign(pointBefore.deltaK) !== this.sign(pointCurrent.deltaK)) {\n\t\t\t\tpointCurrent.mK = 0;\n\t\t\t} else {\n\t\t\t\tpointCurrent.mK = (pointBefore.deltaK + pointCurrent.deltaK) / 2;\n\t\t\t}\n\t\t}\n\n\t\t// Adjust tangents to ensure monotonic properties\n\t\tvar alphaK, betaK, tauK, squaredMagnitude;\n\t\tfor (i = 0; i < pointsLen - 1; ++i) {\n\t\t\tpointCurrent = pointsWithTangents[i];\n\t\t\tpointAfter = pointsWithTangents[i + 1];\n\t\t\tif (pointCurrent.model.skip || pointAfter.model.skip) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (helpers.almostEquals(pointCurrent.deltaK, 0, this.EPSILON)) {\n\t\t\t\tpointCurrent.mK = pointAfter.mK = 0;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\talphaK = pointCurrent.mK / pointCurrent.deltaK;\n\t\t\tbetaK = pointAfter.mK / pointCurrent.deltaK;\n\t\t\tsquaredMagnitude = Math.pow(alphaK, 2) + Math.pow(betaK, 2);\n\t\t\tif (squaredMagnitude <= 9) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\ttauK = 3 / Math.sqrt(squaredMagnitude);\n\t\t\tpointCurrent.mK = alphaK * tauK * pointCurrent.deltaK;\n\t\t\tpointAfter.mK = betaK * tauK * pointCurrent.deltaK;\n\t\t}\n\n\t\t// Compute control points\n\t\tvar deltaX;\n\t\tfor (i = 0; i < pointsLen; ++i) {\n\t\t\tpointCurrent = pointsWithTangents[i];\n\t\t\tif (pointCurrent.model.skip) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tpointBefore = i > 0 ? pointsWithTangents[i - 1] : null;\n\t\t\tpointAfter = i < pointsLen - 1 ? pointsWithTangents[i + 1] : null;\n\t\t\tif (pointBefore && !pointBefore.model.skip) {\n\t\t\t\tdeltaX = (pointCurrent.model.x - pointBefore.model.x) / 3;\n\t\t\t\tpointCurrent.model.controlPointPreviousX = pointCurrent.model.x - deltaX;\n\t\t\t\tpointCurrent.model.controlPointPreviousY = pointCurrent.model.y - deltaX * pointCurrent.mK;\n\t\t\t}\n\t\t\tif (pointAfter && !pointAfter.model.skip) {\n\t\t\t\tdeltaX = (pointAfter.model.x - pointCurrent.model.x) / 3;\n\t\t\t\tpointCurrent.model.controlPointNextX = pointCurrent.model.x + deltaX;\n\t\t\t\tpointCurrent.model.controlPointNextY = pointCurrent.model.y + deltaX * pointCurrent.mK;\n\t\t\t}\n\t\t}\n\t};\n\thelpers.nextItem = function(collection, index, loop) {\n\t\tif (loop) {\n\t\t\treturn index >= collection.length - 1 ? collection[0] : collection[index + 1];\n\t\t}\n\t\treturn index >= collection.length - 1 ? collection[collection.length - 1] : collection[index + 1];\n\t};\n\thelpers.previousItem = function(collection, index, loop) {\n\t\tif (loop) {\n\t\t\treturn index <= 0 ? collection[collection.length - 1] : collection[index - 1];\n\t\t}\n\t\treturn index <= 0 ? collection[0] : collection[index - 1];\n\t};\n\t// Implementation of the nice number algorithm used in determining where axis labels will go\n\thelpers.niceNum = function(range, round) {\n\t\tvar exponent = Math.floor(helpers.log10(range));\n\t\tvar fraction = range / Math.pow(10, exponent);\n\t\tvar niceFraction;\n\n\t\tif (round) {\n\t\t\tif (fraction < 1.5) {\n\t\t\t\tniceFraction = 1;\n\t\t\t} else if (fraction < 3) {\n\t\t\t\tniceFraction = 2;\n\t\t\t} else if (fraction < 7) {\n\t\t\t\tniceFraction = 5;\n\t\t\t} else {\n\t\t\t\tniceFraction = 10;\n\t\t\t}\n\t\t} else if (fraction <= 1.0) {\n\t\t\tniceFraction = 1;\n\t\t} else if (fraction <= 2) {\n\t\t\tniceFraction = 2;\n\t\t} else if (fraction <= 5) {\n\t\t\tniceFraction = 5;\n\t\t} else {\n\t\t\tniceFraction = 10;\n\t\t}\n\n\t\treturn niceFraction * Math.pow(10, exponent);\n\t};\n\t// Request animation polyfill - http://www.paulirish.com/2011/requestanimationframe-for-smart-animating/\n\thelpers.requestAnimFrame = (function() {\n\t\tif (typeof window === 'undefined') {\n\t\t\treturn function(callback) {\n\t\t\t\tcallback();\n\t\t\t};\n\t\t}\n\t\treturn window.requestAnimationFrame ||\n\t\t\twindow.webkitRequestAnimationFrame ||\n\t\t\twindow.mozRequestAnimationFrame ||\n\t\t\twindow.oRequestAnimationFrame ||\n\t\t\twindow.msRequestAnimationFrame ||\n\t\t\tfunction(callback) {\n\t\t\t\treturn window.setTimeout(callback, 1000 / 60);\n\t\t\t};\n\t}());\n\t// -- DOM methods\n\thelpers.getRelativePosition = function(evt, chart) {\n\t\tvar mouseX, mouseY;\n\t\tvar e = evt.originalEvent || evt;\n\t\tvar canvas = evt.currentTarget || evt.srcElement;\n\t\tvar boundingRect = canvas.getBoundingClientRect();\n\n\t\tvar touches = e.touches;\n\t\tif (touches && touches.length > 0) {\n\t\t\tmouseX = touches[0].clientX;\n\t\t\tmouseY = touches[0].clientY;\n\n\t\t} else {\n\t\t\tmouseX = e.clientX;\n\t\t\tmouseY = e.clientY;\n\t\t}\n\n\t\t// Scale mouse coordinates into canvas coordinates\n\t\t// by following the pattern laid out by 'jerryj' in the comments of\n\t\t// http://www.html5canvastutorials.com/advanced/html5-canvas-mouse-coordinates/\n\t\tvar paddingLeft = parseFloat(helpers.getStyle(canvas, 'padding-left'));\n\t\tvar paddingTop = parseFloat(helpers.getStyle(canvas, 'padding-top'));\n\t\tvar paddingRight = parseFloat(helpers.getStyle(canvas, 'padding-right'));\n\t\tvar paddingBottom = parseFloat(helpers.getStyle(canvas, 'padding-bottom'));\n\t\tvar width = boundingRect.right - boundingRect.left - paddingLeft - paddingRight;\n\t\tvar height = boundingRect.bottom - boundingRect.top - paddingTop - paddingBottom;\n\n\t\t// We divide by the current device pixel ratio, because the canvas is scaled up by that amount in each direction. However\n\t\t// the backend model is in unscaled coordinates. Since we are going to deal with our model coordinates, we go back here\n\t\tmouseX = Math.round((mouseX - boundingRect.left - paddingLeft) / (width) * canvas.width / chart.currentDevicePixelRatio);\n\t\tmouseY = Math.round((mouseY - boundingRect.top - paddingTop) / (height) * canvas.height / chart.currentDevicePixelRatio);\n\n\t\treturn {\n\t\t\tx: mouseX,\n\t\t\ty: mouseY\n\t\t};\n\n\t};\n\n\t// Private helper function to convert max-width/max-height values that may be percentages into a number\n\tfunction parseMaxStyle(styleValue, node, parentProperty) {\n\t\tvar valueInPixels;\n\t\tif (typeof styleValue === 'string') {\n\t\t\tvalueInPixels = parseInt(styleValue, 10);\n\n\t\t\tif (styleValue.indexOf('%') !== -1) {\n\t\t\t\t// percentage * size in dimension\n\t\t\t\tvalueInPixels = valueInPixels / 100 * node.parentNode[parentProperty];\n\t\t\t}\n\t\t} else {\n\t\t\tvalueInPixels = styleValue;\n\t\t}\n\n\t\treturn valueInPixels;\n\t}\n\n\t/**\n\t * Returns if the given value contains an effective constraint.\n\t * @private\n\t */\n\tfunction isConstrainedValue(value) {\n\t\treturn value !== undefined && value !== null && value !== 'none';\n\t}\n\n\t// Private helper to get a constraint dimension\n\t// @param domNode : the node to check the constraint on\n\t// @param maxStyle : the style that defines the maximum for the direction we are using (maxWidth / maxHeight)\n\t// @param percentageProperty : property of parent to use when calculating width as a percentage\n\t// @see http://www.nathanaeljones.com/blog/2013/reading-max-width-cross-browser\n\tfunction getConstraintDimension(domNode, maxStyle, percentageProperty) {\n\t\tvar view = document.defaultView;\n\t\tvar parentNode = domNode.parentNode;\n\t\tvar constrainedNode = view.getComputedStyle(domNode)[maxStyle];\n\t\tvar constrainedContainer = view.getComputedStyle(parentNode)[maxStyle];\n\t\tvar hasCNode = isConstrainedValue(constrainedNode);\n\t\tvar hasCContainer = isConstrainedValue(constrainedContainer);\n\t\tvar infinity = Number.POSITIVE_INFINITY;\n\n\t\tif (hasCNode || hasCContainer) {\n\t\t\treturn Math.min(\n\t\t\t\thasCNode ? parseMaxStyle(constrainedNode, domNode, percentageProperty) : infinity,\n\t\t\t\thasCContainer ? parseMaxStyle(constrainedContainer, parentNode, percentageProperty) : infinity);\n\t\t}\n\n\t\treturn 'none';\n\t}\n\t// returns Number or undefined if no constraint\n\thelpers.getConstraintWidth = function(domNode) {\n\t\treturn getConstraintDimension(domNode, 'max-width', 'clientWidth');\n\t};\n\t// returns Number or undefined if no constraint\n\thelpers.getConstraintHeight = function(domNode) {\n\t\treturn getConstraintDimension(domNode, 'max-height', 'clientHeight');\n\t};\n\thelpers.getMaximumWidth = function(domNode) {\n\t\tvar container = domNode.parentNode;\n\t\tif (!container) {\n\t\t\treturn domNode.clientWidth;\n\t\t}\n\n\t\tvar paddingLeft = parseInt(helpers.getStyle(container, 'padding-left'), 10);\n\t\tvar paddingRight = parseInt(helpers.getStyle(container, 'padding-right'), 10);\n\t\tvar w = container.clientWidth - paddingLeft - paddingRight;\n\t\tvar cw = helpers.getConstraintWidth(domNode);\n\t\treturn isNaN(cw) ? w : Math.min(w, cw);\n\t};\n\thelpers.getMaximumHeight = function(domNode) {\n\t\tvar container = domNode.parentNode;\n\t\tif (!container) {\n\t\t\treturn domNode.clientHeight;\n\t\t}\n\n\t\tvar paddingTop = parseInt(helpers.getStyle(container, 'padding-top'), 10);\n\t\tvar paddingBottom = parseInt(helpers.getStyle(container, 'padding-bottom'), 10);\n\t\tvar h = container.clientHeight - paddingTop - paddingBottom;\n\t\tvar ch = helpers.getConstraintHeight(domNode);\n\t\treturn isNaN(ch) ? h : Math.min(h, ch);\n\t};\n\thelpers.getStyle = function(el, property) {\n\t\treturn el.currentStyle ?\n\t\t\tel.currentStyle[property] :\n\t\t\tdocument.defaultView.getComputedStyle(el, null).getPropertyValue(property);\n\t};\n\thelpers.retinaScale = function(chart, forceRatio) {\n\t\tvar pixelRatio = chart.currentDevicePixelRatio = forceRatio || window.devicePixelRatio || 1;\n\t\tif (pixelRatio === 1) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar canvas = chart.canvas;\n\t\tvar height = chart.height;\n\t\tvar width = chart.width;\n\n\t\tcanvas.height = height * pixelRatio;\n\t\tcanvas.width = width * pixelRatio;\n\t\tchart.ctx.scale(pixelRatio, pixelRatio);\n\n\t\t// If no style has been set on the canvas, the render size is used as display size,\n\t\t// making the chart visually bigger, so let's enforce it to the \"correct\" values.\n\t\t// See https://github.com/chartjs/Chart.js/issues/3575\n\t\tif (!canvas.style.height && !canvas.style.width) {\n\t\t\tcanvas.style.height = height + 'px';\n\t\t\tcanvas.style.width = width + 'px';\n\t\t}\n\t};\n\t// -- Canvas methods\n\thelpers.fontString = function(pixelSize, fontStyle, fontFamily) {\n\t\treturn fontStyle + ' ' + pixelSize + 'px ' + fontFamily;\n\t};\n\thelpers.longestText = function(ctx, font, arrayOfThings, cache) {\n\t\tcache = cache || {};\n\t\tvar data = cache.data = cache.data || {};\n\t\tvar gc = cache.garbageCollect = cache.garbageCollect || [];\n\n\t\tif (cache.font !== font) {\n\t\t\tdata = cache.data = {};\n\t\t\tgc = cache.garbageCollect = [];\n\t\t\tcache.font = font;\n\t\t}\n\n\t\tctx.font = font;\n\t\tvar longest = 0;\n\t\thelpers.each(arrayOfThings, function(thing) {\n\t\t\t// Undefined strings and arrays should not be measured\n\t\t\tif (thing !== undefined && thing !== null && helpers.isArray(thing) !== true) {\n\t\t\t\tlongest = helpers.measureText(ctx, data, gc, longest, thing);\n\t\t\t} else if (helpers.isArray(thing)) {\n\t\t\t\t// if it is an array lets measure each element\n\t\t\t\t// to do maybe simplify this function a bit so we can do this more recursively?\n\t\t\t\thelpers.each(thing, function(nestedThing) {\n\t\t\t\t\t// Undefined strings and arrays should not be measured\n\t\t\t\t\tif (nestedThing !== undefined && nestedThing !== null && !helpers.isArray(nestedThing)) {\n\t\t\t\t\t\tlongest = helpers.measureText(ctx, data, gc, longest, nestedThing);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\tvar gcLen = gc.length / 2;\n\t\tif (gcLen > arrayOfThings.length) {\n\t\t\tfor (var i = 0; i < gcLen; i++) {\n\t\t\t\tdelete data[gc[i]];\n\t\t\t}\n\t\t\tgc.splice(0, gcLen);\n\t\t}\n\t\treturn longest;\n\t};\n\thelpers.measureText = function(ctx, data, gc, longest, string) {\n\t\tvar textWidth = data[string];\n\t\tif (!textWidth) {\n\t\t\ttextWidth = data[string] = ctx.measureText(string).width;\n\t\t\tgc.push(string);\n\t\t}\n\t\tif (textWidth > longest) {\n\t\t\tlongest = textWidth;\n\t\t}\n\t\treturn longest;\n\t};\n\thelpers.numberOfLabelLines = function(arrayOfThings) {\n\t\tvar numberOfLines = 1;\n\t\thelpers.each(arrayOfThings, function(thing) {\n\t\t\tif (helpers.isArray(thing)) {\n\t\t\t\tif (thing.length > numberOfLines) {\n\t\t\t\t\tnumberOfLines = thing.length;\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t\treturn numberOfLines;\n\t};\n\n\thelpers.color = !color ?\n\t\tfunction(value) {\n\t\t\tconsole.error('Color.js not found!');\n\t\t\treturn value;\n\t\t} :\n\t\tfunction(value) {\n\t\t\t/* global CanvasGradient */\n\t\t\tif (value instanceof CanvasGradient) {\n\t\t\t\tvalue = defaults.global.defaultColor;\n\t\t\t}\n\n\t\t\treturn color(value);\n\t\t};\n\n\thelpers.getHoverColor = function(colorValue) {\n\t\t/* global CanvasPattern */\n\t\treturn (colorValue instanceof CanvasPattern) ?\n\t\t\tcolorValue :\n\t\t\thelpers.color(colorValue).saturate(0.5).darken(0.1).rgbString();\n\t};\n};\n\n},{\"2\":2,\"25\":25,\"45\":45}],28:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\n/**\n * Helper function to get relative position for an event\n * @param {Event|IEvent} event - The event to get the position for\n * @param {Chart} chart - The chart\n * @returns {Point} the event position\n */\nfunction getRelativePosition(e, chart) {\n\tif (e.native) {\n\t\treturn {\n\t\t\tx: e.x,\n\t\t\ty: e.y\n\t\t};\n\t}\n\n\treturn helpers.getRelativePosition(e, chart);\n}\n\n/**\n * Helper function to traverse all of the visible elements in the chart\n * @param chart {chart} the chart\n * @param handler {Function} the callback to execute for each visible item\n */\nfunction parseVisibleItems(chart, handler) {\n\tvar datasets = chart.data.datasets;\n\tvar meta, i, j, ilen, jlen;\n\n\tfor (i = 0, ilen = datasets.length; i < ilen; ++i) {\n\t\tif (!chart.isDatasetVisible(i)) {\n\t\t\tcontinue;\n\t\t}\n\n\t\tmeta = chart.getDatasetMeta(i);\n\t\tfor (j = 0, jlen = meta.data.length; j < jlen; ++j) {\n\t\t\tvar element = meta.data[j];\n\t\t\tif (!element._view.skip) {\n\t\t\t\thandler(element);\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * Helper function to get the items that intersect the event position\n * @param items {ChartElement[]} elements to filter\n * @param position {Point} the point to be nearest to\n * @return {ChartElement[]} the nearest items\n */\nfunction getIntersectItems(chart, position) {\n\tvar elements = [];\n\n\tparseVisibleItems(chart, function(element) {\n\t\tif (element.inRange(position.x, position.y)) {\n\t\t\telements.push(element);\n\t\t}\n\t});\n\n\treturn elements;\n}\n\n/**\n * Helper function to get the items nearest to the event position considering all visible items in teh chart\n * @param chart {Chart} the chart to look at elements from\n * @param position {Point} the point to be nearest to\n * @param intersect {Boolean} if true, only consider items that intersect the position\n * @param distanceMetric {Function} function to provide the distance between points\n * @return {ChartElement[]} the nearest items\n */\nfunction getNearestItems(chart, position, intersect, distanceMetric) {\n\tvar minDistance = Number.POSITIVE_INFINITY;\n\tvar nearestItems = [];\n\n\tparseVisibleItems(chart, function(element) {\n\t\tif (intersect && !element.inRange(position.x, position.y)) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar center = element.getCenterPoint();\n\t\tvar distance = distanceMetric(position, center);\n\n\t\tif (distance < minDistance) {\n\t\t\tnearestItems = [element];\n\t\t\tminDistance = distance;\n\t\t} else if (distance === minDistance) {\n\t\t\t// Can have multiple items at the same distance in which case we sort by size\n\t\t\tnearestItems.push(element);\n\t\t}\n\t});\n\n\treturn nearestItems;\n}\n\n/**\n * Get a distance metric function for two points based on the\n * axis mode setting\n * @param {String} axis the axis mode. x|y|xy\n */\nfunction getDistanceMetricForAxis(axis) {\n\tvar useX = axis.indexOf('x') !== -1;\n\tvar useY = axis.indexOf('y') !== -1;\n\n\treturn function(pt1, pt2) {\n\t\tvar deltaX = useX ? Math.abs(pt1.x - pt2.x) : 0;\n\t\tvar deltaY = useY ? Math.abs(pt1.y - pt2.y) : 0;\n\t\treturn Math.sqrt(Math.pow(deltaX, 2) + Math.pow(deltaY, 2));\n\t};\n}\n\nfunction indexMode(chart, e, options) {\n\tvar position = getRelativePosition(e, chart);\n\t// Default axis for index mode is 'x' to match old behaviour\n\toptions.axis = options.axis || 'x';\n\tvar distanceMetric = getDistanceMetricForAxis(options.axis);\n\tvar items = options.intersect ? getIntersectItems(chart, position) : getNearestItems(chart, position, false, distanceMetric);\n\tvar elements = [];\n\n\tif (!items.length) {\n\t\treturn [];\n\t}\n\n\tchart.data.datasets.forEach(function(dataset, datasetIndex) {\n\t\tif (chart.isDatasetVisible(datasetIndex)) {\n\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\tvar element = meta.data[items[0]._index];\n\n\t\t\t// don't count items that are skipped (null data)\n\t\t\tif (element && !element._view.skip) {\n\t\t\t\telements.push(element);\n\t\t\t}\n\t\t}\n\t});\n\n\treturn elements;\n}\n\n/**\n * @interface IInteractionOptions\n */\n/**\n * If true, only consider items that intersect the point\n * @name IInterfaceOptions#boolean\n * @type Boolean\n */\n\n/**\n * Contains interaction related functions\n * @namespace Chart.Interaction\n */\nmodule.exports = {\n\t// Helper function for different modes\n\tmodes: {\n\t\tsingle: function(chart, e) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\tvar elements = [];\n\n\t\t\tparseVisibleItems(chart, function(element) {\n\t\t\t\tif (element.inRange(position.x, position.y)) {\n\t\t\t\t\telements.push(element);\n\t\t\t\t\treturn elements;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\treturn elements.slice(0, 1);\n\t\t},\n\n\t\t/**\n\t\t * @function Chart.Interaction.modes.label\n\t\t * @deprecated since version 2.4.0\n\t\t * @todo remove at version 3\n\t\t * @private\n\t\t */\n\t\tlabel: indexMode,\n\n\t\t/**\n\t\t * Returns items at the same index. If the options.intersect parameter is true, we only return items if we intersect something\n\t\t * If the options.intersect mode is false, we find the nearest item and return the items at the same index as that item\n\t\t * @function Chart.Interaction.modes.index\n\t\t * @since v2.4.0\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @param options {IInteractionOptions} options to use during interaction\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\tindex: indexMode,\n\n\t\t/**\n\t\t * Returns items in the same dataset. If the options.intersect parameter is true, we only return items if we intersect something\n\t\t * If the options.intersect is false, we find the nearest item and return the items in that dataset\n\t\t * @function Chart.Interaction.modes.dataset\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @param options {IInteractionOptions} options to use during interaction\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\tdataset: function(chart, e, options) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\toptions.axis = options.axis || 'xy';\n\t\t\tvar distanceMetric = getDistanceMetricForAxis(options.axis);\n\t\t\tvar items = options.intersect ? getIntersectItems(chart, position) : getNearestItems(chart, position, false, distanceMetric);\n\n\t\t\tif (items.length > 0) {\n\t\t\t\titems = chart.getDatasetMeta(items[0]._datasetIndex).data;\n\t\t\t}\n\n\t\t\treturn items;\n\t\t},\n\n\t\t/**\n\t\t * @function Chart.Interaction.modes.x-axis\n\t\t * @deprecated since version 2.4.0. Use index mode and intersect == true\n\t\t * @todo remove at version 3\n\t\t * @private\n\t\t */\n\t\t'x-axis': function(chart, e) {\n\t\t\treturn indexMode(chart, e, {intersect: false});\n\t\t},\n\n\t\t/**\n\t\t * Point mode returns all elements that hit test based on the event position\n\t\t * of the event\n\t\t * @function Chart.Interaction.modes.intersect\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\tpoint: function(chart, e) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\treturn getIntersectItems(chart, position);\n\t\t},\n\n\t\t/**\n\t\t * nearest mode returns the element closest to the point\n\t\t * @function Chart.Interaction.modes.intersect\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @param options {IInteractionOptions} options to use\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\tnearest: function(chart, e, options) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\toptions.axis = options.axis || 'xy';\n\t\t\tvar distanceMetric = getDistanceMetricForAxis(options.axis);\n\t\t\tvar nearestItems = getNearestItems(chart, position, options.intersect, distanceMetric);\n\n\t\t\t// We have multiple items at the same distance from the event. Now sort by smallest\n\t\t\tif (nearestItems.length > 1) {\n\t\t\t\tnearestItems.sort(function(a, b) {\n\t\t\t\t\tvar sizeA = a.getArea();\n\t\t\t\t\tvar sizeB = b.getArea();\n\t\t\t\t\tvar ret = sizeA - sizeB;\n\n\t\t\t\t\tif (ret === 0) {\n\t\t\t\t\t\t// if equal sort by dataset index\n\t\t\t\t\t\tret = a._datasetIndex - b._datasetIndex;\n\t\t\t\t\t}\n\n\t\t\t\t\treturn ret;\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Return only 1 item\n\t\t\treturn nearestItems.slice(0, 1);\n\t\t},\n\n\t\t/**\n\t\t * x mode returns the elements that hit-test at the current x coordinate\n\t\t * @function Chart.Interaction.modes.x\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @param options {IInteractionOptions} options to use\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\tx: function(chart, e, options) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\tvar items = [];\n\t\t\tvar intersectsItem = false;\n\n\t\t\tparseVisibleItems(chart, function(element) {\n\t\t\t\tif (element.inXRange(position.x)) {\n\t\t\t\t\titems.push(element);\n\t\t\t\t}\n\n\t\t\t\tif (element.inRange(position.x, position.y)) {\n\t\t\t\t\tintersectsItem = true;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// If we want to trigger on an intersect and we don't have any items\n\t\t\t// that intersect the position, return nothing\n\t\t\tif (options.intersect && !intersectsItem) {\n\t\t\t\titems = [];\n\t\t\t}\n\t\t\treturn items;\n\t\t},\n\n\t\t/**\n\t\t * y mode returns the elements that hit-test at the current y coordinate\n\t\t * @function Chart.Interaction.modes.y\n\t\t * @param chart {chart} the chart we are returning items from\n\t\t * @param e {Event} the event we are find things at\n\t\t * @param options {IInteractionOptions} options to use\n\t\t * @return {Chart.Element[]} Array of elements that are under the point. If none are found, an empty array is returned\n\t\t */\n\t\ty: function(chart, e, options) {\n\t\t\tvar position = getRelativePosition(e, chart);\n\t\t\tvar items = [];\n\t\t\tvar intersectsItem = false;\n\n\t\t\tparseVisibleItems(chart, function(element) {\n\t\t\t\tif (element.inYRange(position.y)) {\n\t\t\t\t\titems.push(element);\n\t\t\t\t}\n\n\t\t\t\tif (element.inRange(position.x, position.y)) {\n\t\t\t\t\tintersectsItem = true;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// If we want to trigger on an intersect and we don't have any items\n\t\t\t// that intersect the position, return nothing\n\t\t\tif (options.intersect && !intersectsItem) {\n\t\t\t\titems = [];\n\t\t\t}\n\t\t\treturn items;\n\t\t}\n\t}\n};\n\n},{\"45\":45}],29:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\n\ndefaults._set('global', {\n\tresponsive: true,\n\tresponsiveAnimationDuration: 0,\n\tmaintainAspectRatio: true,\n\tevents: ['mousemove', 'mouseout', 'click', 'touchstart', 'touchmove'],\n\thover: {\n\t\tonHover: null,\n\t\tmode: 'nearest',\n\t\tintersect: true,\n\t\tanimationDuration: 400\n\t},\n\tonClick: null,\n\tdefaultColor: 'rgba(0,0,0,0.1)',\n\tdefaultFontColor: '#666',\n\tdefaultFontFamily: \"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif\",\n\tdefaultFontSize: 12,\n\tdefaultFontStyle: 'normal',\n\tshowLines: true,\n\n\t// Element defaults defined in element extensions\n\telements: {},\n\n\t// Layout options such as padding\n\tlayout: {\n\t\tpadding: {\n\t\t\ttop: 0,\n\t\t\tright: 0,\n\t\t\tbottom: 0,\n\t\t\tleft: 0\n\t\t}\n\t}\n});\n\nmodule.exports = function() {\n\n\t// Occupy the global variable of Chart, and create a simple base class\n\tvar Chart = function(item, config) {\n\t\tthis.construct(item, config);\n\t\treturn this;\n\t};\n\n\tChart.Chart = Chart;\n\n\treturn Chart;\n};\n\n},{\"25\":25}],30:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\nfunction filterByPosition(array, position) {\n\treturn helpers.where(array, function(v) {\n\t\treturn v.position === position;\n\t});\n}\n\nfunction sortByWeight(array, reverse) {\n\tarray.forEach(function(v, i) {\n\t\tv._tmpIndex_ = i;\n\t\treturn v;\n\t});\n\tarray.sort(function(a, b) {\n\t\tvar v0 = reverse ? b : a;\n\t\tvar v1 = reverse ? a : b;\n\t\treturn v0.weight === v1.weight ?\n\t\t\tv0._tmpIndex_ - v1._tmpIndex_ :\n\t\t\tv0.weight - v1.weight;\n\t});\n\tarray.forEach(function(v) {\n\t\tdelete v._tmpIndex_;\n\t});\n}\n\n/**\n * @interface ILayoutItem\n * @prop {String} position - The position of the item in the chart layout. Possible values are\n * 'left', 'top', 'right', 'bottom', and 'chartArea'\n * @prop {Number} weight - The weight used to sort the item. Higher weights are further away from the chart area\n * @prop {Boolean} fullWidth - if true, and the item is horizontal, then push vertical boxes down\n * @prop {Function} isHorizontal - returns true if the layout item is horizontal (ie. top or bottom)\n * @prop {Function} update - Takes two parameters: width and height. Returns size of item\n * @prop {Function} getPadding -  Returns an object with padding on the edges\n * @prop {Number} width - Width of item. Must be valid after update()\n * @prop {Number} height - Height of item. Must be valid after update()\n * @prop {Number} left - Left edge of the item. Set by layout system and cannot be used in update\n * @prop {Number} top - Top edge of the item. Set by layout system and cannot be used in update\n * @prop {Number} right - Right edge of the item. Set by layout system and cannot be used in update\n * @prop {Number} bottom - Bottom edge of the item. Set by layout system and cannot be used in update\n */\n\n// The layout service is very self explanatory.  It's responsible for the layout within a chart.\n// Scales, Legends and Plugins all rely on the layout service and can easily register to be placed anywhere they need\n// It is this service's responsibility of carrying out that layout.\nmodule.exports = {\n\tdefaults: {},\n\n\t/**\n\t * Register a box to a chart.\n\t * A box is simply a reference to an object that requires layout. eg. Scales, Legend, Title.\n\t * @param {Chart} chart - the chart to use\n\t * @param {ILayoutItem} item - the item to add to be layed out\n\t */\n\taddBox: function(chart, item) {\n\t\tif (!chart.boxes) {\n\t\t\tchart.boxes = [];\n\t\t}\n\n\t\t// initialize item with default values\n\t\titem.fullWidth = item.fullWidth || false;\n\t\titem.position = item.position || 'top';\n\t\titem.weight = item.weight || 0;\n\n\t\tchart.boxes.push(item);\n\t},\n\n\t/**\n\t * Remove a layoutItem from a chart\n\t * @param {Chart} chart - the chart to remove the box from\n\t * @param {Object} layoutItem - the item to remove from the layout\n\t */\n\tremoveBox: function(chart, layoutItem) {\n\t\tvar index = chart.boxes ? chart.boxes.indexOf(layoutItem) : -1;\n\t\tif (index !== -1) {\n\t\t\tchart.boxes.splice(index, 1);\n\t\t}\n\t},\n\n\t/**\n\t * Sets (or updates) options on the given `item`.\n\t * @param {Chart} chart - the chart in which the item lives (or will be added to)\n\t * @param {Object} item - the item to configure with the given options\n\t * @param {Object} options - the new item options.\n\t */\n\tconfigure: function(chart, item, options) {\n\t\tvar props = ['fullWidth', 'position', 'weight'];\n\t\tvar ilen = props.length;\n\t\tvar i = 0;\n\t\tvar prop;\n\n\t\tfor (; i < ilen; ++i) {\n\t\t\tprop = props[i];\n\t\t\tif (options.hasOwnProperty(prop)) {\n\t\t\t\titem[prop] = options[prop];\n\t\t\t}\n\t\t}\n\t},\n\n\t/**\n\t * Fits boxes of the given chart into the given size by having each box measure itself\n\t * then running a fitting algorithm\n\t * @param {Chart} chart - the chart\n\t * @param {Number} width - the width to fit into\n\t * @param {Number} height - the height to fit into\n\t */\n\tupdate: function(chart, width, height) {\n\t\tif (!chart) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar layoutOptions = chart.options.layout || {};\n\t\tvar padding = helpers.options.toPadding(layoutOptions.padding);\n\t\tvar leftPadding = padding.left;\n\t\tvar rightPadding = padding.right;\n\t\tvar topPadding = padding.top;\n\t\tvar bottomPadding = padding.bottom;\n\n\t\tvar leftBoxes = filterByPosition(chart.boxes, 'left');\n\t\tvar rightBoxes = filterByPosition(chart.boxes, 'right');\n\t\tvar topBoxes = filterByPosition(chart.boxes, 'top');\n\t\tvar bottomBoxes = filterByPosition(chart.boxes, 'bottom');\n\t\tvar chartAreaBoxes = filterByPosition(chart.boxes, 'chartArea');\n\n\t\t// Sort boxes by weight. A higher weight is further away from the chart area\n\t\tsortByWeight(leftBoxes, true);\n\t\tsortByWeight(rightBoxes, false);\n\t\tsortByWeight(topBoxes, true);\n\t\tsortByWeight(bottomBoxes, false);\n\n\t\t// Essentially we now have any number of boxes on each of the 4 sides.\n\t\t// Our canvas looks like the following.\n\t\t// The areas L1 and L2 are the left axes. R1 is the right axis, T1 is the top axis and\n\t\t// B1 is the bottom axis\n\t\t// There are also 4 quadrant-like locations (left to right instead of clockwise) reserved for chart overlays\n\t\t// These locations are single-box locations only, when trying to register a chartArea location that is already taken,\n\t\t// an error will be thrown.\n\t\t//\n\t\t// |----------------------------------------------------|\n\t\t// |                  T1 (Full Width)                   |\n\t\t// |----------------------------------------------------|\n\t\t// |    |    |                 T2                  |    |\n\t\t// |    |----|-------------------------------------|----|\n\t\t// |    |    | C1 |                           | C2 |    |\n\t\t// |    |    |----|                           |----|    |\n\t\t// |    |    |                                     |    |\n\t\t// | L1 | L2 |           ChartArea (C0)            | R1 |\n\t\t// |    |    |                                     |    |\n\t\t// |    |    |----|                           |----|    |\n\t\t// |    |    | C3 |                           | C4 |    |\n\t\t// |    |----|-------------------------------------|----|\n\t\t// |    |    |                 B1                  |    |\n\t\t// |----------------------------------------------------|\n\t\t// |                  B2 (Full Width)                   |\n\t\t// |----------------------------------------------------|\n\t\t//\n\t\t// What we do to find the best sizing, we do the following\n\t\t// 1. Determine the minimum size of the chart area.\n\t\t// 2. Split the remaining width equally between each vertical axis\n\t\t// 3. Split the remaining height equally between each horizontal axis\n\t\t// 4. Give each layout the maximum size it can be. The layout will return it's minimum size\n\t\t// 5. Adjust the sizes of each axis based on it's minimum reported size.\n\t\t// 6. Refit each axis\n\t\t// 7. Position each axis in the final location\n\t\t// 8. Tell the chart the final location of the chart area\n\t\t// 9. Tell any axes that overlay the chart area the positions of the chart area\n\n\t\t// Step 1\n\t\tvar chartWidth = width - leftPadding - rightPadding;\n\t\tvar chartHeight = height - topPadding - bottomPadding;\n\t\tvar chartAreaWidth = chartWidth / 2; // min 50%\n\t\tvar chartAreaHeight = chartHeight / 2; // min 50%\n\n\t\t// Step 2\n\t\tvar verticalBoxWidth = (width - chartAreaWidth) / (leftBoxes.length + rightBoxes.length);\n\n\t\t// Step 3\n\t\tvar horizontalBoxHeight = (height - chartAreaHeight) / (topBoxes.length + bottomBoxes.length);\n\n\t\t// Step 4\n\t\tvar maxChartAreaWidth = chartWidth;\n\t\tvar maxChartAreaHeight = chartHeight;\n\t\tvar minBoxSizes = [];\n\n\t\tfunction getMinimumBoxSize(box) {\n\t\t\tvar minSize;\n\t\t\tvar isHorizontal = box.isHorizontal();\n\n\t\t\tif (isHorizontal) {\n\t\t\t\tminSize = box.update(box.fullWidth ? chartWidth : maxChartAreaWidth, horizontalBoxHeight);\n\t\t\t\tmaxChartAreaHeight -= minSize.height;\n\t\t\t} else {\n\t\t\t\tminSize = box.update(verticalBoxWidth, maxChartAreaHeight);\n\t\t\t\tmaxChartAreaWidth -= minSize.width;\n\t\t\t}\n\n\t\t\tminBoxSizes.push({\n\t\t\t\thorizontal: isHorizontal,\n\t\t\t\tminSize: minSize,\n\t\t\t\tbox: box,\n\t\t\t});\n\t\t}\n\n\t\thelpers.each(leftBoxes.concat(rightBoxes, topBoxes, bottomBoxes), getMinimumBoxSize);\n\n\t\t// If a horizontal box has padding, we move the left boxes over to avoid ugly charts (see issue #2478)\n\t\tvar maxHorizontalLeftPadding = 0;\n\t\tvar maxHorizontalRightPadding = 0;\n\t\tvar maxVerticalTopPadding = 0;\n\t\tvar maxVerticalBottomPadding = 0;\n\n\t\thelpers.each(topBoxes.concat(bottomBoxes), function(horizontalBox) {\n\t\t\tif (horizontalBox.getPadding) {\n\t\t\t\tvar boxPadding = horizontalBox.getPadding();\n\t\t\t\tmaxHorizontalLeftPadding = Math.max(maxHorizontalLeftPadding, boxPadding.left);\n\t\t\t\tmaxHorizontalRightPadding = Math.max(maxHorizontalRightPadding, boxPadding.right);\n\t\t\t}\n\t\t});\n\n\t\thelpers.each(leftBoxes.concat(rightBoxes), function(verticalBox) {\n\t\t\tif (verticalBox.getPadding) {\n\t\t\t\tvar boxPadding = verticalBox.getPadding();\n\t\t\t\tmaxVerticalTopPadding = Math.max(maxVerticalTopPadding, boxPadding.top);\n\t\t\t\tmaxVerticalBottomPadding = Math.max(maxVerticalBottomPadding, boxPadding.bottom);\n\t\t\t}\n\t\t});\n\n\t\t// At this point, maxChartAreaHeight and maxChartAreaWidth are the size the chart area could\n\t\t// be if the axes are drawn at their minimum sizes.\n\t\t// Steps 5 & 6\n\t\tvar totalLeftBoxesWidth = leftPadding;\n\t\tvar totalRightBoxesWidth = rightPadding;\n\t\tvar totalTopBoxesHeight = topPadding;\n\t\tvar totalBottomBoxesHeight = bottomPadding;\n\n\t\t// Function to fit a box\n\t\tfunction fitBox(box) {\n\t\t\tvar minBoxSize = helpers.findNextWhere(minBoxSizes, function(minBox) {\n\t\t\t\treturn minBox.box === box;\n\t\t\t});\n\n\t\t\tif (minBoxSize) {\n\t\t\t\tif (box.isHorizontal()) {\n\t\t\t\t\tvar scaleMargin = {\n\t\t\t\t\t\tleft: Math.max(totalLeftBoxesWidth, maxHorizontalLeftPadding),\n\t\t\t\t\t\tright: Math.max(totalRightBoxesWidth, maxHorizontalRightPadding),\n\t\t\t\t\t\ttop: 0,\n\t\t\t\t\t\tbottom: 0\n\t\t\t\t\t};\n\n\t\t\t\t\t// Don't use min size here because of label rotation. When the labels are rotated, their rotation highly depends\n\t\t\t\t\t// on the margin. Sometimes they need to increase in size slightly\n\t\t\t\t\tbox.update(box.fullWidth ? chartWidth : maxChartAreaWidth, chartHeight / 2, scaleMargin);\n\t\t\t\t} else {\n\t\t\t\t\tbox.update(minBoxSize.minSize.width, maxChartAreaHeight);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Update, and calculate the left and right margins for the horizontal boxes\n\t\thelpers.each(leftBoxes.concat(rightBoxes), fitBox);\n\n\t\thelpers.each(leftBoxes, function(box) {\n\t\t\ttotalLeftBoxesWidth += box.width;\n\t\t});\n\n\t\thelpers.each(rightBoxes, function(box) {\n\t\t\ttotalRightBoxesWidth += box.width;\n\t\t});\n\n\t\t// Set the Left and Right margins for the horizontal boxes\n\t\thelpers.each(topBoxes.concat(bottomBoxes), fitBox);\n\n\t\t// Figure out how much margin is on the top and bottom of the vertical boxes\n\t\thelpers.each(topBoxes, function(box) {\n\t\t\ttotalTopBoxesHeight += box.height;\n\t\t});\n\n\t\thelpers.each(bottomBoxes, function(box) {\n\t\t\ttotalBottomBoxesHeight += box.height;\n\t\t});\n\n\t\tfunction finalFitVerticalBox(box) {\n\t\t\tvar minBoxSize = helpers.findNextWhere(minBoxSizes, function(minSize) {\n\t\t\t\treturn minSize.box === box;\n\t\t\t});\n\n\t\t\tvar scaleMargin = {\n\t\t\t\tleft: 0,\n\t\t\t\tright: 0,\n\t\t\t\ttop: totalTopBoxesHeight,\n\t\t\t\tbottom: totalBottomBoxesHeight\n\t\t\t};\n\n\t\t\tif (minBoxSize) {\n\t\t\t\tbox.update(minBoxSize.minSize.width, maxChartAreaHeight, scaleMargin);\n\t\t\t}\n\t\t}\n\n\t\t// Let the left layout know the final margin\n\t\thelpers.each(leftBoxes.concat(rightBoxes), finalFitVerticalBox);\n\n\t\t// Recalculate because the size of each layout might have changed slightly due to the margins (label rotation for instance)\n\t\ttotalLeftBoxesWidth = leftPadding;\n\t\ttotalRightBoxesWidth = rightPadding;\n\t\ttotalTopBoxesHeight = topPadding;\n\t\ttotalBottomBoxesHeight = bottomPadding;\n\n\t\thelpers.each(leftBoxes, function(box) {\n\t\t\ttotalLeftBoxesWidth += box.width;\n\t\t});\n\n\t\thelpers.each(rightBoxes, function(box) {\n\t\t\ttotalRightBoxesWidth += box.width;\n\t\t});\n\n\t\thelpers.each(topBoxes, function(box) {\n\t\t\ttotalTopBoxesHeight += box.height;\n\t\t});\n\t\thelpers.each(bottomBoxes, function(box) {\n\t\t\ttotalBottomBoxesHeight += box.height;\n\t\t});\n\n\t\t// We may be adding some padding to account for rotated x axis labels\n\t\tvar leftPaddingAddition = Math.max(maxHorizontalLeftPadding - totalLeftBoxesWidth, 0);\n\t\ttotalLeftBoxesWidth += leftPaddingAddition;\n\t\ttotalRightBoxesWidth += Math.max(maxHorizontalRightPadding - totalRightBoxesWidth, 0);\n\n\t\tvar topPaddingAddition = Math.max(maxVerticalTopPadding - totalTopBoxesHeight, 0);\n\t\ttotalTopBoxesHeight += topPaddingAddition;\n\t\ttotalBottomBoxesHeight += Math.max(maxVerticalBottomPadding - totalBottomBoxesHeight, 0);\n\n\t\t// Figure out if our chart area changed. This would occur if the dataset layout label rotation\n\t\t// changed due to the application of the margins in step 6. Since we can only get bigger, this is safe to do\n\t\t// without calling `fit` again\n\t\tvar newMaxChartAreaHeight = height - totalTopBoxesHeight - totalBottomBoxesHeight;\n\t\tvar newMaxChartAreaWidth = width - totalLeftBoxesWidth - totalRightBoxesWidth;\n\n\t\tif (newMaxChartAreaWidth !== maxChartAreaWidth || newMaxChartAreaHeight !== maxChartAreaHeight) {\n\t\t\thelpers.each(leftBoxes, function(box) {\n\t\t\t\tbox.height = newMaxChartAreaHeight;\n\t\t\t});\n\n\t\t\thelpers.each(rightBoxes, function(box) {\n\t\t\t\tbox.height = newMaxChartAreaHeight;\n\t\t\t});\n\n\t\t\thelpers.each(topBoxes, function(box) {\n\t\t\t\tif (!box.fullWidth) {\n\t\t\t\t\tbox.width = newMaxChartAreaWidth;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\thelpers.each(bottomBoxes, function(box) {\n\t\t\t\tif (!box.fullWidth) {\n\t\t\t\t\tbox.width = newMaxChartAreaWidth;\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tmaxChartAreaHeight = newMaxChartAreaHeight;\n\t\t\tmaxChartAreaWidth = newMaxChartAreaWidth;\n\t\t}\n\n\t\t// Step 7 - Position the boxes\n\t\tvar left = leftPadding + leftPaddingAddition;\n\t\tvar top = topPadding + topPaddingAddition;\n\n\t\tfunction placeBox(box) {\n\t\t\tif (box.isHorizontal()) {\n\t\t\t\tbox.left = box.fullWidth ? leftPadding : totalLeftBoxesWidth;\n\t\t\t\tbox.right = box.fullWidth ? width - rightPadding : totalLeftBoxesWidth + maxChartAreaWidth;\n\t\t\t\tbox.top = top;\n\t\t\t\tbox.bottom = top + box.height;\n\n\t\t\t\t// Move to next point\n\t\t\t\ttop = box.bottom;\n\n\t\t\t} else {\n\n\t\t\t\tbox.left = left;\n\t\t\t\tbox.right = left + box.width;\n\t\t\t\tbox.top = totalTopBoxesHeight;\n\t\t\t\tbox.bottom = totalTopBoxesHeight + maxChartAreaHeight;\n\n\t\t\t\t// Move to next point\n\t\t\t\tleft = box.right;\n\t\t\t}\n\t\t}\n\n\t\thelpers.each(leftBoxes.concat(topBoxes), placeBox);\n\n\t\t// Account for chart width and height\n\t\tleft += maxChartAreaWidth;\n\t\ttop += maxChartAreaHeight;\n\n\t\thelpers.each(rightBoxes, placeBox);\n\t\thelpers.each(bottomBoxes, placeBox);\n\n\t\t// Step 8\n\t\tchart.chartArea = {\n\t\t\tleft: totalLeftBoxesWidth,\n\t\t\ttop: totalTopBoxesHeight,\n\t\t\tright: totalLeftBoxesWidth + maxChartAreaWidth,\n\t\t\tbottom: totalTopBoxesHeight + maxChartAreaHeight\n\t\t};\n\n\t\t// Step 9\n\t\thelpers.each(chartAreaBoxes, function(box) {\n\t\t\tbox.left = chart.chartArea.left;\n\t\t\tbox.top = chart.chartArea.top;\n\t\t\tbox.right = chart.chartArea.right;\n\t\t\tbox.bottom = chart.chartArea.bottom;\n\n\t\t\tbox.update(maxChartAreaWidth, maxChartAreaHeight);\n\t\t});\n\t}\n};\n\n},{\"45\":45}],31:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar helpers = require(45);\n\ndefaults._set('global', {\n\tplugins: {}\n});\n\n/**\n * The plugin service singleton\n * @namespace Chart.plugins\n * @since 2.1.0\n */\nmodule.exports = {\n\t/**\n\t * Globally registered plugins.\n\t * @private\n\t */\n\t_plugins: [],\n\n\t/**\n\t * This identifier is used to invalidate the descriptors cache attached to each chart\n\t * when a global plugin is registered or unregistered. In this case, the cache ID is\n\t * incremented and descriptors are regenerated during following API calls.\n\t * @private\n\t */\n\t_cacheId: 0,\n\n\t/**\n\t * Registers the given plugin(s) if not already registered.\n\t * @param {Array|Object} plugins plugin instance(s).\n\t */\n\tregister: function(plugins) {\n\t\tvar p = this._plugins;\n\t\t([]).concat(plugins).forEach(function(plugin) {\n\t\t\tif (p.indexOf(plugin) === -1) {\n\t\t\t\tp.push(plugin);\n\t\t\t}\n\t\t});\n\n\t\tthis._cacheId++;\n\t},\n\n\t/**\n\t * Unregisters the given plugin(s) only if registered.\n\t * @param {Array|Object} plugins plugin instance(s).\n\t */\n\tunregister: function(plugins) {\n\t\tvar p = this._plugins;\n\t\t([]).concat(plugins).forEach(function(plugin) {\n\t\t\tvar idx = p.indexOf(plugin);\n\t\t\tif (idx !== -1) {\n\t\t\t\tp.splice(idx, 1);\n\t\t\t}\n\t\t});\n\n\t\tthis._cacheId++;\n\t},\n\n\t/**\n\t * Remove all registered plugins.\n\t * @since 2.1.5\n\t */\n\tclear: function() {\n\t\tthis._plugins = [];\n\t\tthis._cacheId++;\n\t},\n\n\t/**\n\t * Returns the number of registered plugins?\n\t * @returns {Number}\n\t * @since 2.1.5\n\t */\n\tcount: function() {\n\t\treturn this._plugins.length;\n\t},\n\n\t/**\n\t * Returns all registered plugin instances.\n\t * @returns {Array} array of plugin objects.\n\t * @since 2.1.5\n\t */\n\tgetAll: function() {\n\t\treturn this._plugins;\n\t},\n\n\t/**\n\t * Calls enabled plugins for `chart` on the specified hook and with the given args.\n\t * This method immediately returns as soon as a plugin explicitly returns false. The\n\t * returned value can be used, for instance, to interrupt the current action.\n\t * @param {Object} chart - The chart instance for which plugins should be called.\n\t * @param {String} hook - The name of the plugin method to call (e.g. 'beforeUpdate').\n\t * @param {Array} [args] - Extra arguments to apply to the hook call.\n\t * @returns {Boolean} false if any of the plugins return false, else returns true.\n\t */\n\tnotify: function(chart, hook, args) {\n\t\tvar descriptors = this.descriptors(chart);\n\t\tvar ilen = descriptors.length;\n\t\tvar i, descriptor, plugin, params, method;\n\n\t\tfor (i = 0; i < ilen; ++i) {\n\t\t\tdescriptor = descriptors[i];\n\t\t\tplugin = descriptor.plugin;\n\t\t\tmethod = plugin[hook];\n\t\t\tif (typeof method === 'function') {\n\t\t\t\tparams = [chart].concat(args || []);\n\t\t\t\tparams.push(descriptor.options);\n\t\t\t\tif (method.apply(plugin, params) === false) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn true;\n\t},\n\n\t/**\n\t * Returns descriptors of enabled plugins for the given chart.\n\t * @returns {Array} [{ plugin, options }]\n\t * @private\n\t */\n\tdescriptors: function(chart) {\n\t\tvar cache = chart.$plugins || (chart.$plugins = {});\n\t\tif (cache.id === this._cacheId) {\n\t\t\treturn cache.descriptors;\n\t\t}\n\n\t\tvar plugins = [];\n\t\tvar descriptors = [];\n\t\tvar config = (chart && chart.config) || {};\n\t\tvar options = (config.options && config.options.plugins) || {};\n\n\t\tthis._plugins.concat(config.plugins || []).forEach(function(plugin) {\n\t\t\tvar idx = plugins.indexOf(plugin);\n\t\t\tif (idx !== -1) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tvar id = plugin.id;\n\t\t\tvar opts = options[id];\n\t\t\tif (opts === false) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (opts === true) {\n\t\t\t\topts = helpers.clone(defaults.global.plugins[id]);\n\t\t\t}\n\n\t\t\tplugins.push(plugin);\n\t\t\tdescriptors.push({\n\t\t\t\tplugin: plugin,\n\t\t\t\toptions: opts || {}\n\t\t\t});\n\t\t});\n\n\t\tcache.descriptors = descriptors;\n\t\tcache.id = this._cacheId;\n\t\treturn descriptors;\n\t},\n\n\t/**\n\t * Invalidates cache for the given chart: descriptors hold a reference on plugin option,\n\t * but in some cases, this reference can be changed by the user when updating options.\n\t * https://github.com/chartjs/Chart.js/issues/5111#issuecomment-355934167\n\t * @private\n\t */\n\t_invalidate: function(chart) {\n\t\tdelete chart.$plugins;\n\t}\n};\n\n/**\n * Plugin extension hooks.\n * @interface IPlugin\n * @since 2.1.0\n */\n/**\n * @method IPlugin#beforeInit\n * @desc Called before initializing `chart`.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#afterInit\n * @desc Called after `chart` has been initialized and before the first update.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeUpdate\n * @desc Called before updating `chart`. If any plugin returns `false`, the update\n * is cancelled (and thus subsequent render(s)) until another `update` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart update.\n */\n/**\n * @method IPlugin#afterUpdate\n * @desc Called after `chart` has been updated and before rendering. Note that this\n * hook will not be called if the chart update has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeDatasetsUpdate\n * @desc Called before updating the `chart` datasets. If any plugin returns `false`,\n * the datasets update is cancelled until another `update` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} false to cancel the datasets update.\n * @since version 2.1.5\n*/\n/**\n * @method IPlugin#afterDatasetsUpdate\n * @desc Called after the `chart` datasets have been updated. Note that this hook\n * will not be called if the datasets update has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n * @since version 2.1.5\n */\n/**\n * @method IPlugin#beforeDatasetUpdate\n * @desc Called before updating the `chart` dataset at the given `args.index`. If any plugin\n * returns `false`, the datasets update is cancelled until another `update` is triggered.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Number} args.index - The dataset index.\n * @param {Object} args.meta - The dataset metadata.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart datasets drawing.\n */\n/**\n * @method IPlugin#afterDatasetUpdate\n * @desc Called after the `chart` datasets at the given `args.index` has been updated. Note\n * that this hook will not be called if the datasets update has been previously cancelled.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Number} args.index - The dataset index.\n * @param {Object} args.meta - The dataset metadata.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeLayout\n * @desc Called before laying out `chart`. If any plugin returns `false`,\n * the layout update is cancelled until another `update` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart layout.\n */\n/**\n * @method IPlugin#afterLayout\n * @desc Called after the `chart` has been layed out. Note that this hook will not\n * be called if the layout update has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeRender\n * @desc Called before rendering `chart`. If any plugin returns `false`,\n * the rendering is cancelled until another `render` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart rendering.\n */\n/**\n * @method IPlugin#afterRender\n * @desc Called after the `chart` has been fully rendered (and animation completed). Note\n * that this hook will not be called if the rendering has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeDraw\n * @desc Called before drawing `chart` at every animation frame specified by the given\n * easing value. If any plugin returns `false`, the frame drawing is cancelled until\n * another `render` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Number} easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart drawing.\n */\n/**\n * @method IPlugin#afterDraw\n * @desc Called after the `chart` has been drawn for the specific easing value. Note\n * that this hook will not be called if the drawing has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Number} easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeDatasetsDraw\n * @desc Called before drawing the `chart` datasets. If any plugin returns `false`,\n * the datasets drawing is cancelled until another `render` is triggered.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Number} easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart datasets drawing.\n */\n/**\n * @method IPlugin#afterDatasetsDraw\n * @desc Called after the `chart` datasets have been drawn. Note that this hook\n * will not be called if the datasets drawing has been previously cancelled.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Number} easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeDatasetDraw\n * @desc Called before drawing the `chart` dataset at the given `args.index` (datasets\n * are drawn in the reverse order). If any plugin returns `false`, the datasets drawing\n * is cancelled until another `render` is triggered.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Number} args.index - The dataset index.\n * @param {Object} args.meta - The dataset metadata.\n * @param {Number} args.easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart datasets drawing.\n */\n/**\n * @method IPlugin#afterDatasetDraw\n * @desc Called after the `chart` datasets at the given `args.index` have been drawn\n * (datasets are drawn in the reverse order). Note that this hook will not be called\n * if the datasets drawing has been previously cancelled.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Number} args.index - The dataset index.\n * @param {Object} args.meta - The dataset metadata.\n * @param {Number} args.easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeTooltipDraw\n * @desc Called before drawing the `tooltip`. If any plugin returns `false`,\n * the tooltip drawing is cancelled until another `render` is triggered.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Object} args.tooltip - The tooltip.\n * @param {Number} args.easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n * @returns {Boolean} `false` to cancel the chart tooltip drawing.\n */\n/**\n * @method IPlugin#afterTooltipDraw\n * @desc Called after drawing the `tooltip`. Note that this hook will not\n * be called if the tooltip drawing has been previously cancelled.\n * @param {Chart} chart - The chart instance.\n * @param {Object} args - The call arguments.\n * @param {Object} args.tooltip - The tooltip.\n * @param {Number} args.easingValue - The current animation value, between 0.0 and 1.0.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#beforeEvent\n * @desc Called before processing the specified `event`. If any plugin returns `false`,\n * the event will be discarded.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {IEvent} event - The event object.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#afterEvent\n * @desc Called after the `event` has been consumed. Note that this hook\n * will not be called if the `event` has been previously discarded.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {IEvent} event - The event object.\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#resize\n * @desc Called after the chart as been resized.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Number} size - The new canvas display size (eq. canvas.style width & height).\n * @param {Object} options - The plugin options.\n */\n/**\n * @method IPlugin#destroy\n * @desc Called after the chart as been destroyed.\n * @param {Chart.Controller} chart - The chart instance.\n * @param {Object} options - The plugin options.\n */\n\n},{\"25\":25,\"45\":45}],32:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\nvar Ticks = require(34);\n\ndefaults._set('scale', {\n\tdisplay: true,\n\tposition: 'left',\n\toffset: false,\n\n\t// grid line settings\n\tgridLines: {\n\t\tdisplay: true,\n\t\tcolor: 'rgba(0, 0, 0, 0.1)',\n\t\tlineWidth: 1,\n\t\tdrawBorder: true,\n\t\tdrawOnChartArea: true,\n\t\tdrawTicks: true,\n\t\ttickMarkLength: 10,\n\t\tzeroLineWidth: 1,\n\t\tzeroLineColor: 'rgba(0,0,0,0.25)',\n\t\tzeroLineBorderDash: [],\n\t\tzeroLineBorderDashOffset: 0.0,\n\t\toffsetGridLines: false,\n\t\tborderDash: [],\n\t\tborderDashOffset: 0.0\n\t},\n\n\t// scale label\n\tscaleLabel: {\n\t\t// display property\n\t\tdisplay: false,\n\n\t\t// actual label\n\t\tlabelString: '',\n\n\t\t// line height\n\t\tlineHeight: 1.2,\n\n\t\t// top/bottom padding\n\t\tpadding: {\n\t\t\ttop: 4,\n\t\t\tbottom: 4\n\t\t}\n\t},\n\n\t// label settings\n\tticks: {\n\t\tbeginAtZero: false,\n\t\tminRotation: 0,\n\t\tmaxRotation: 50,\n\t\tmirror: false,\n\t\tpadding: 0,\n\t\treverse: false,\n\t\tdisplay: true,\n\t\tautoSkip: true,\n\t\tautoSkipPadding: 0,\n\t\tlabelOffset: 0,\n\t\t// We pass through arrays to be rendered as multiline labels, we convert Others to strings here.\n\t\tcallback: Ticks.formatters.values,\n\t\tminor: {},\n\t\tmajor: {}\n\t}\n});\n\nfunction labelsFromTicks(ticks) {\n\tvar labels = [];\n\tvar i, ilen;\n\n\tfor (i = 0, ilen = ticks.length; i < ilen; ++i) {\n\t\tlabels.push(ticks[i].label);\n\t}\n\n\treturn labels;\n}\n\nfunction getLineValue(scale, index, offsetGridLines) {\n\tvar lineValue = scale.getPixelForTick(index);\n\n\tif (offsetGridLines) {\n\t\tif (index === 0) {\n\t\t\tlineValue -= (scale.getPixelForTick(1) - lineValue) / 2;\n\t\t} else {\n\t\t\tlineValue -= (lineValue - scale.getPixelForTick(index - 1)) / 2;\n\t\t}\n\t}\n\treturn lineValue;\n}\n\nmodule.exports = function(Chart) {\n\n\tfunction computeTextSize(context, tick, font) {\n\t\treturn helpers.isArray(tick) ?\n\t\t\thelpers.longestText(context, font, tick) :\n\t\t\tcontext.measureText(tick).width;\n\t}\n\n\tfunction parseFontOptions(options) {\n\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\tvar globalDefaults = defaults.global;\n\t\tvar size = valueOrDefault(options.fontSize, globalDefaults.defaultFontSize);\n\t\tvar style = valueOrDefault(options.fontStyle, globalDefaults.defaultFontStyle);\n\t\tvar family = valueOrDefault(options.fontFamily, globalDefaults.defaultFontFamily);\n\n\t\treturn {\n\t\t\tsize: size,\n\t\t\tstyle: style,\n\t\t\tfamily: family,\n\t\t\tfont: helpers.fontString(size, style, family)\n\t\t};\n\t}\n\n\tfunction parseLineHeight(options) {\n\t\treturn helpers.options.toLineHeight(\n\t\t\thelpers.valueOrDefault(options.lineHeight, 1.2),\n\t\t\thelpers.valueOrDefault(options.fontSize, defaults.global.defaultFontSize));\n\t}\n\n\tChart.Scale = Element.extend({\n\t\t/**\n\t\t * Get the padding needed for the scale\n\t\t * @method getPadding\n\t\t * @private\n\t\t * @returns {Padding} the necessary padding\n\t\t */\n\t\tgetPadding: function() {\n\t\t\tvar me = this;\n\t\t\treturn {\n\t\t\t\tleft: me.paddingLeft || 0,\n\t\t\t\ttop: me.paddingTop || 0,\n\t\t\t\tright: me.paddingRight || 0,\n\t\t\t\tbottom: me.paddingBottom || 0\n\t\t\t};\n\t\t},\n\n\t\t/**\n\t\t * Returns the scale tick objects ({label, major})\n\t\t * @since 2.7\n\t\t */\n\t\tgetTicks: function() {\n\t\t\treturn this._ticks;\n\t\t},\n\n\t\t// These methods are ordered by lifecyle. Utilities then follow.\n\t\t// Any function defined here is inherited by all scale types.\n\t\t// Any function can be extended by the scale type\n\n\t\tmergeTicksOptions: function() {\n\t\t\tvar ticks = this.options.ticks;\n\t\t\tif (ticks.minor === false) {\n\t\t\t\tticks.minor = {\n\t\t\t\t\tdisplay: false\n\t\t\t\t};\n\t\t\t}\n\t\t\tif (ticks.major === false) {\n\t\t\t\tticks.major = {\n\t\t\t\t\tdisplay: false\n\t\t\t\t};\n\t\t\t}\n\t\t\tfor (var key in ticks) {\n\t\t\t\tif (key !== 'major' && key !== 'minor') {\n\t\t\t\t\tif (typeof ticks.minor[key] === 'undefined') {\n\t\t\t\t\t\tticks.minor[key] = ticks[key];\n\t\t\t\t\t}\n\t\t\t\t\tif (typeof ticks.major[key] === 'undefined') {\n\t\t\t\t\t\tticks.major[key] = ticks[key];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tbeforeUpdate: function() {\n\t\t\thelpers.callback(this.options.beforeUpdate, [this]);\n\t\t},\n\t\tupdate: function(maxWidth, maxHeight, margins) {\n\t\t\tvar me = this;\n\t\t\tvar i, ilen, labels, label, ticks, tick;\n\n\t\t\t// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)\n\t\t\tme.beforeUpdate();\n\n\t\t\t// Absorb the master measurements\n\t\t\tme.maxWidth = maxWidth;\n\t\t\tme.maxHeight = maxHeight;\n\t\t\tme.margins = helpers.extend({\n\t\t\t\tleft: 0,\n\t\t\t\tright: 0,\n\t\t\t\ttop: 0,\n\t\t\t\tbottom: 0\n\t\t\t}, margins);\n\t\t\tme.longestTextCache = me.longestTextCache || {};\n\n\t\t\t// Dimensions\n\t\t\tme.beforeSetDimensions();\n\t\t\tme.setDimensions();\n\t\t\tme.afterSetDimensions();\n\n\t\t\t// Data min/max\n\t\t\tme.beforeDataLimits();\n\t\t\tme.determineDataLimits();\n\t\t\tme.afterDataLimits();\n\n\t\t\t// Ticks - `this.ticks` is now DEPRECATED!\n\t\t\t// Internal ticks are now stored as objects in the PRIVATE `this._ticks` member\n\t\t\t// and must not be accessed directly from outside this class. `this.ticks` being\n\t\t\t// around for long time and not marked as private, we can't change its structure\n\t\t\t// without unexpected breaking changes. If you need to access the scale ticks,\n\t\t\t// use scale.getTicks() instead.\n\n\t\t\tme.beforeBuildTicks();\n\n\t\t\t// New implementations should return an array of objects but for BACKWARD COMPAT,\n\t\t\t// we still support no return (`this.ticks` internally set by calling this method).\n\t\t\tticks = me.buildTicks() || [];\n\n\t\t\tme.afterBuildTicks();\n\n\t\t\tme.beforeTickToLabelConversion();\n\n\t\t\t// New implementations should return the formatted tick labels but for BACKWARD\n\t\t\t// COMPAT, we still support no return (`this.ticks` internally changed by calling\n\t\t\t// this method and supposed to contain only string values).\n\t\t\tlabels = me.convertTicksToLabels(ticks) || me.ticks;\n\n\t\t\tme.afterTickToLabelConversion();\n\n\t\t\tme.ticks = labels;   // BACKWARD COMPATIBILITY\n\n\t\t\t// IMPORTANT: from this point, we consider that `this.ticks` will NEVER change!\n\n\t\t\t// BACKWARD COMPAT: synchronize `_ticks` with labels (so potentially `this.ticks`)\n\t\t\tfor (i = 0, ilen = labels.length; i < ilen; ++i) {\n\t\t\t\tlabel = labels[i];\n\t\t\t\ttick = ticks[i];\n\t\t\t\tif (!tick) {\n\t\t\t\t\tticks.push(tick = {\n\t\t\t\t\t\tlabel: label,\n\t\t\t\t\t\tmajor: false\n\t\t\t\t\t});\n\t\t\t\t} else {\n\t\t\t\t\ttick.label = label;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tme._ticks = ticks;\n\n\t\t\t// Tick Rotation\n\t\t\tme.beforeCalculateTickRotation();\n\t\t\tme.calculateTickRotation();\n\t\t\tme.afterCalculateTickRotation();\n\t\t\t// Fit\n\t\t\tme.beforeFit();\n\t\t\tme.fit();\n\t\t\tme.afterFit();\n\t\t\t//\n\t\t\tme.afterUpdate();\n\n\t\t\treturn me.minSize;\n\n\t\t},\n\t\tafterUpdate: function() {\n\t\t\thelpers.callback(this.options.afterUpdate, [this]);\n\t\t},\n\n\t\t//\n\n\t\tbeforeSetDimensions: function() {\n\t\t\thelpers.callback(this.options.beforeSetDimensions, [this]);\n\t\t},\n\t\tsetDimensions: function() {\n\t\t\tvar me = this;\n\t\t\t// Set the unconstrained dimension before label rotation\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\t// Reset position before calculating rotation\n\t\t\t\tme.width = me.maxWidth;\n\t\t\t\tme.left = 0;\n\t\t\t\tme.right = me.width;\n\t\t\t} else {\n\t\t\t\tme.height = me.maxHeight;\n\n\t\t\t\t// Reset position before calculating rotation\n\t\t\t\tme.top = 0;\n\t\t\t\tme.bottom = me.height;\n\t\t\t}\n\n\t\t\t// Reset padding\n\t\t\tme.paddingLeft = 0;\n\t\t\tme.paddingTop = 0;\n\t\t\tme.paddingRight = 0;\n\t\t\tme.paddingBottom = 0;\n\t\t},\n\t\tafterSetDimensions: function() {\n\t\t\thelpers.callback(this.options.afterSetDimensions, [this]);\n\t\t},\n\n\t\t// Data limits\n\t\tbeforeDataLimits: function() {\n\t\t\thelpers.callback(this.options.beforeDataLimits, [this]);\n\t\t},\n\t\tdetermineDataLimits: helpers.noop,\n\t\tafterDataLimits: function() {\n\t\t\thelpers.callback(this.options.afterDataLimits, [this]);\n\t\t},\n\n\t\t//\n\t\tbeforeBuildTicks: function() {\n\t\t\thelpers.callback(this.options.beforeBuildTicks, [this]);\n\t\t},\n\t\tbuildTicks: helpers.noop,\n\t\tafterBuildTicks: function() {\n\t\t\thelpers.callback(this.options.afterBuildTicks, [this]);\n\t\t},\n\n\t\tbeforeTickToLabelConversion: function() {\n\t\t\thelpers.callback(this.options.beforeTickToLabelConversion, [this]);\n\t\t},\n\t\tconvertTicksToLabels: function() {\n\t\t\tvar me = this;\n\t\t\t// Convert ticks to strings\n\t\t\tvar tickOpts = me.options.ticks;\n\t\t\tme.ticks = me.ticks.map(tickOpts.userCallback || tickOpts.callback, this);\n\t\t},\n\t\tafterTickToLabelConversion: function() {\n\t\t\thelpers.callback(this.options.afterTickToLabelConversion, [this]);\n\t\t},\n\n\t\t//\n\n\t\tbeforeCalculateTickRotation: function() {\n\t\t\thelpers.callback(this.options.beforeCalculateTickRotation, [this]);\n\t\t},\n\t\tcalculateTickRotation: function() {\n\t\t\tvar me = this;\n\t\t\tvar context = me.ctx;\n\t\t\tvar tickOpts = me.options.ticks;\n\t\t\tvar labels = labelsFromTicks(me._ticks);\n\n\t\t\t// Get the width of each grid by calculating the difference\n\t\t\t// between x offsets between 0 and 1.\n\t\t\tvar tickFont = parseFontOptions(tickOpts);\n\t\t\tcontext.font = tickFont.font;\n\n\t\t\tvar labelRotation = tickOpts.minRotation || 0;\n\n\t\t\tif (labels.length && me.options.display && me.isHorizontal()) {\n\t\t\t\tvar originalLabelWidth = helpers.longestText(context, tickFont.font, labels, me.longestTextCache);\n\t\t\t\tvar labelWidth = originalLabelWidth;\n\t\t\t\tvar cosRotation, sinRotation;\n\n\t\t\t\t// Allow 3 pixels x2 padding either side for label readability\n\t\t\t\tvar tickWidth = me.getPixelForTick(1) - me.getPixelForTick(0) - 6;\n\n\t\t\t\t// Max label rotation can be set or default to 90 - also act as a loop counter\n\t\t\t\twhile (labelWidth > tickWidth && labelRotation < tickOpts.maxRotation) {\n\t\t\t\t\tvar angleRadians = helpers.toRadians(labelRotation);\n\t\t\t\t\tcosRotation = Math.cos(angleRadians);\n\t\t\t\t\tsinRotation = Math.sin(angleRadians);\n\n\t\t\t\t\tif (sinRotation * originalLabelWidth > me.maxHeight) {\n\t\t\t\t\t\t// go back one step\n\t\t\t\t\t\tlabelRotation--;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tlabelRotation++;\n\t\t\t\t\tlabelWidth = cosRotation * originalLabelWidth;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tme.labelRotation = labelRotation;\n\t\t},\n\t\tafterCalculateTickRotation: function() {\n\t\t\thelpers.callback(this.options.afterCalculateTickRotation, [this]);\n\t\t},\n\n\t\t//\n\n\t\tbeforeFit: function() {\n\t\t\thelpers.callback(this.options.beforeFit, [this]);\n\t\t},\n\t\tfit: function() {\n\t\t\tvar me = this;\n\t\t\t// Reset\n\t\t\tvar minSize = me.minSize = {\n\t\t\t\twidth: 0,\n\t\t\t\theight: 0\n\t\t\t};\n\n\t\t\tvar labels = labelsFromTicks(me._ticks);\n\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\t\t\tvar scaleLabelOpts = opts.scaleLabel;\n\t\t\tvar gridLineOpts = opts.gridLines;\n\t\t\tvar display = opts.display;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\n\t\t\tvar tickFont = parseFontOptions(tickOpts);\n\t\t\tvar tickMarkLength = opts.gridLines.tickMarkLength;\n\n\t\t\t// Width\n\t\t\tif (isHorizontal) {\n\t\t\t\t// subtract the margins to line up with the chartArea if we are a full width scale\n\t\t\t\tminSize.width = me.isFullWidth() ? me.maxWidth - me.margins.left - me.margins.right : me.maxWidth;\n\t\t\t} else {\n\t\t\t\tminSize.width = display && gridLineOpts.drawTicks ? tickMarkLength : 0;\n\t\t\t}\n\n\t\t\t// height\n\t\t\tif (isHorizontal) {\n\t\t\t\tminSize.height = display && gridLineOpts.drawTicks ? tickMarkLength : 0;\n\t\t\t} else {\n\t\t\t\tminSize.height = me.maxHeight; // fill all the height\n\t\t\t}\n\n\t\t\t// Are we showing a title for the scale?\n\t\t\tif (scaleLabelOpts.display && display) {\n\t\t\t\tvar scaleLabelLineHeight = parseLineHeight(scaleLabelOpts);\n\t\t\t\tvar scaleLabelPadding = helpers.options.toPadding(scaleLabelOpts.padding);\n\t\t\t\tvar deltaHeight = scaleLabelLineHeight + scaleLabelPadding.height;\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\tminSize.height += deltaHeight;\n\t\t\t\t} else {\n\t\t\t\t\tminSize.width += deltaHeight;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Don't bother fitting the ticks if we are not showing them\n\t\t\tif (tickOpts.display && display) {\n\t\t\t\tvar largestTextWidth = helpers.longestText(me.ctx, tickFont.font, labels, me.longestTextCache);\n\t\t\t\tvar tallestLabelHeightInLines = helpers.numberOfLabelLines(labels);\n\t\t\t\tvar lineSpace = tickFont.size * 0.5;\n\t\t\t\tvar tickPadding = me.options.ticks.padding;\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\t// A horizontal axis is more constrained by the height.\n\t\t\t\t\tme.longestLabelWidth = largestTextWidth;\n\n\t\t\t\t\tvar angleRadians = helpers.toRadians(me.labelRotation);\n\t\t\t\t\tvar cosRotation = Math.cos(angleRadians);\n\t\t\t\t\tvar sinRotation = Math.sin(angleRadians);\n\n\t\t\t\t\t// TODO - improve this calculation\n\t\t\t\t\tvar labelHeight = (sinRotation * largestTextWidth)\n\t\t\t\t\t\t+ (tickFont.size * tallestLabelHeightInLines)\n\t\t\t\t\t\t+ (lineSpace * (tallestLabelHeightInLines - 1))\n\t\t\t\t\t\t+ lineSpace; // padding\n\n\t\t\t\t\tminSize.height = Math.min(me.maxHeight, minSize.height + labelHeight + tickPadding);\n\n\t\t\t\t\tme.ctx.font = tickFont.font;\n\t\t\t\t\tvar firstLabelWidth = computeTextSize(me.ctx, labels[0], tickFont.font);\n\t\t\t\t\tvar lastLabelWidth = computeTextSize(me.ctx, labels[labels.length - 1], tickFont.font);\n\n\t\t\t\t\t// Ensure that our ticks are always inside the canvas. When rotated, ticks are right aligned\n\t\t\t\t\t// which means that the right padding is dominated by the font height\n\t\t\t\t\tif (me.labelRotation !== 0) {\n\t\t\t\t\t\tme.paddingLeft = opts.position === 'bottom' ? (cosRotation * firstLabelWidth) + 3 : (cosRotation * lineSpace) + 3; // add 3 px to move away from canvas edges\n\t\t\t\t\t\tme.paddingRight = opts.position === 'bottom' ? (cosRotation * lineSpace) + 3 : (cosRotation * lastLabelWidth) + 3;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tme.paddingLeft = firstLabelWidth / 2 + 3; // add 3 px to move away from canvas edges\n\t\t\t\t\t\tme.paddingRight = lastLabelWidth / 2 + 3;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// A vertical axis is more constrained by the width. Labels are the\n\t\t\t\t\t// dominant factor here, so get that length first and account for padding\n\t\t\t\t\tif (tickOpts.mirror) {\n\t\t\t\t\t\tlargestTextWidth = 0;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// use lineSpace for consistency with horizontal axis\n\t\t\t\t\t\t// tickPadding is not implemented for horizontal\n\t\t\t\t\t\tlargestTextWidth += tickPadding + lineSpace;\n\t\t\t\t\t}\n\n\t\t\t\t\tminSize.width = Math.min(me.maxWidth, minSize.width + largestTextWidth);\n\n\t\t\t\t\tme.paddingTop = tickFont.size / 2;\n\t\t\t\t\tme.paddingBottom = tickFont.size / 2;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tme.handleMargins();\n\n\t\t\tme.width = minSize.width;\n\t\t\tme.height = minSize.height;\n\t\t},\n\n\t\t/**\n\t\t * Handle margins and padding interactions\n\t\t * @private\n\t\t */\n\t\thandleMargins: function() {\n\t\t\tvar me = this;\n\t\t\tif (me.margins) {\n\t\t\t\tme.paddingLeft = Math.max(me.paddingLeft - me.margins.left, 0);\n\t\t\t\tme.paddingTop = Math.max(me.paddingTop - me.margins.top, 0);\n\t\t\t\tme.paddingRight = Math.max(me.paddingRight - me.margins.right, 0);\n\t\t\t\tme.paddingBottom = Math.max(me.paddingBottom - me.margins.bottom, 0);\n\t\t\t}\n\t\t},\n\n\t\tafterFit: function() {\n\t\t\thelpers.callback(this.options.afterFit, [this]);\n\t\t},\n\n\t\t// Shared Methods\n\t\tisHorizontal: function() {\n\t\t\treturn this.options.position === 'top' || this.options.position === 'bottom';\n\t\t},\n\t\tisFullWidth: function() {\n\t\t\treturn (this.options.fullWidth);\n\t\t},\n\n\t\t// Get the correct value. NaN bad inputs, If the value type is object get the x or y based on whether we are horizontal or not\n\t\tgetRightValue: function(rawValue) {\n\t\t\t// Null and undefined values first\n\t\t\tif (helpers.isNullOrUndef(rawValue)) {\n\t\t\t\treturn NaN;\n\t\t\t}\n\t\t\t// isNaN(object) returns true, so make sure NaN is checking for a number; Discard Infinite values\n\t\t\tif (typeof rawValue === 'number' && !isFinite(rawValue)) {\n\t\t\t\treturn NaN;\n\t\t\t}\n\t\t\t// If it is in fact an object, dive in one more level\n\t\t\tif (rawValue) {\n\t\t\t\tif (this.isHorizontal()) {\n\t\t\t\t\tif (rawValue.x !== undefined) {\n\t\t\t\t\t\treturn this.getRightValue(rawValue.x);\n\t\t\t\t\t}\n\t\t\t\t} else if (rawValue.y !== undefined) {\n\t\t\t\t\treturn this.getRightValue(rawValue.y);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Value is good, return it\n\t\t\treturn rawValue;\n\t\t},\n\n\t\t/**\n\t\t * Used to get the value to display in the tooltip for the data at the given index\n\t\t * @param index\n\t\t * @param datasetIndex\n\t\t */\n\t\tgetLabelForIndex: helpers.noop,\n\n\t\t/**\n\t\t * Returns the location of the given data point. Value can either be an index or a numerical value\n\t\t * The coordinate (0, 0) is at the upper-left corner of the canvas\n\t\t * @param value\n\t\t * @param index\n\t\t * @param datasetIndex\n\t\t */\n\t\tgetPixelForValue: helpers.noop,\n\n\t\t/**\n\t\t * Used to get the data value from a given pixel. This is the inverse of getPixelForValue\n\t\t * The coordinate (0, 0) is at the upper-left corner of the canvas\n\t\t * @param pixel\n\t\t */\n\t\tgetValueForPixel: helpers.noop,\n\n\t\t/**\n\t\t * Returns the location of the tick at the given index\n\t\t * The coordinate (0, 0) is at the upper-left corner of the canvas\n\t\t */\n\t\tgetPixelForTick: function(index) {\n\t\t\tvar me = this;\n\t\t\tvar offset = me.options.offset;\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tvar innerWidth = me.width - (me.paddingLeft + me.paddingRight);\n\t\t\t\tvar tickWidth = innerWidth / Math.max((me._ticks.length - (offset ? 0 : 1)), 1);\n\t\t\t\tvar pixel = (tickWidth * index) + me.paddingLeft;\n\n\t\t\t\tif (offset) {\n\t\t\t\t\tpixel += tickWidth / 2;\n\t\t\t\t}\n\n\t\t\t\tvar finalVal = me.left + Math.round(pixel);\n\t\t\t\tfinalVal += me.isFullWidth() ? me.margins.left : 0;\n\t\t\t\treturn finalVal;\n\t\t\t}\n\t\t\tvar innerHeight = me.height - (me.paddingTop + me.paddingBottom);\n\t\t\treturn me.top + (index * (innerHeight / (me._ticks.length - 1)));\n\t\t},\n\n\t\t/**\n\t\t * Utility for getting the pixel location of a percentage of scale\n\t\t * The coordinate (0, 0) is at the upper-left corner of the canvas\n\t\t */\n\t\tgetPixelForDecimal: function(decimal) {\n\t\t\tvar me = this;\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tvar innerWidth = me.width - (me.paddingLeft + me.paddingRight);\n\t\t\t\tvar valueOffset = (innerWidth * decimal) + me.paddingLeft;\n\n\t\t\t\tvar finalVal = me.left + Math.round(valueOffset);\n\t\t\t\tfinalVal += me.isFullWidth() ? me.margins.left : 0;\n\t\t\t\treturn finalVal;\n\t\t\t}\n\t\t\treturn me.top + (decimal * me.height);\n\t\t},\n\n\t\t/**\n\t\t * Returns the pixel for the minimum chart value\n\t\t * The coordinate (0, 0) is at the upper-left corner of the canvas\n\t\t */\n\t\tgetBasePixel: function() {\n\t\t\treturn this.getPixelForValue(this.getBaseValue());\n\t\t},\n\n\t\tgetBaseValue: function() {\n\t\t\tvar me = this;\n\t\t\tvar min = me.min;\n\t\t\tvar max = me.max;\n\n\t\t\treturn me.beginAtZero ? 0 :\n\t\t\t\tmin < 0 && max < 0 ? max :\n\t\t\t\tmin > 0 && max > 0 ? min :\n\t\t\t\t0;\n\t\t},\n\n\t\t/**\n\t\t * Returns a subset of ticks to be plotted to avoid overlapping labels.\n\t\t * @private\n\t\t */\n\t\t_autoSkip: function(ticks) {\n\t\t\tvar skipRatio;\n\t\t\tvar me = this;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\t\t\tvar optionTicks = me.options.ticks.minor;\n\t\t\tvar tickCount = ticks.length;\n\t\t\tvar labelRotationRadians = helpers.toRadians(me.labelRotation);\n\t\t\tvar cosRotation = Math.cos(labelRotationRadians);\n\t\t\tvar longestRotatedLabel = me.longestLabelWidth * cosRotation;\n\t\t\tvar result = [];\n\t\t\tvar i, tick, shouldSkip;\n\n\t\t\t// figure out the maximum number of gridlines to show\n\t\t\tvar maxTicks;\n\t\t\tif (optionTicks.maxTicksLimit) {\n\t\t\t\tmaxTicks = optionTicks.maxTicksLimit;\n\t\t\t}\n\n\t\t\tif (isHorizontal) {\n\t\t\t\tskipRatio = false;\n\n\t\t\t\tif ((longestRotatedLabel + optionTicks.autoSkipPadding) * tickCount > (me.width - (me.paddingLeft + me.paddingRight))) {\n\t\t\t\t\tskipRatio = 1 + Math.floor(((longestRotatedLabel + optionTicks.autoSkipPadding) * tickCount) / (me.width - (me.paddingLeft + me.paddingRight)));\n\t\t\t\t}\n\n\t\t\t\t// if they defined a max number of optionTicks,\n\t\t\t\t// increase skipRatio until that number is met\n\t\t\t\tif (maxTicks && tickCount > maxTicks) {\n\t\t\t\t\tskipRatio = Math.max(skipRatio, Math.floor(tickCount / maxTicks));\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor (i = 0; i < tickCount; i++) {\n\t\t\t\ttick = ticks[i];\n\n\t\t\t\t// Since we always show the last tick,we need may need to hide the last shown one before\n\t\t\t\tshouldSkip = (skipRatio > 1 && i % skipRatio > 0) || (i % skipRatio === 0 && i + skipRatio >= tickCount);\n\t\t\t\tif (shouldSkip && i !== tickCount - 1) {\n\t\t\t\t\t// leave tick in place but make sure it's not displayed (#4635)\n\t\t\t\t\tdelete tick.label;\n\t\t\t\t}\n\t\t\t\tresult.push(tick);\n\t\t\t}\n\t\t\treturn result;\n\t\t},\n\n\t\t// Actually draw the scale on the canvas\n\t\t// @param {rectangle} chartArea : the area of the chart to draw full grid lines on\n\t\tdraw: function(chartArea) {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options;\n\t\t\tif (!options.display) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tvar context = me.ctx;\n\t\t\tvar globalDefaults = defaults.global;\n\t\t\tvar optionTicks = options.ticks.minor;\n\t\t\tvar optionMajorTicks = options.ticks.major || optionTicks;\n\t\t\tvar gridLines = options.gridLines;\n\t\t\tvar scaleLabel = options.scaleLabel;\n\n\t\t\tvar isRotated = me.labelRotation !== 0;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\n\t\t\tvar ticks = optionTicks.autoSkip ? me._autoSkip(me.getTicks()) : me.getTicks();\n\t\t\tvar tickFontColor = helpers.valueOrDefault(optionTicks.fontColor, globalDefaults.defaultFontColor);\n\t\t\tvar tickFont = parseFontOptions(optionTicks);\n\t\t\tvar majorTickFontColor = helpers.valueOrDefault(optionMajorTicks.fontColor, globalDefaults.defaultFontColor);\n\t\t\tvar majorTickFont = parseFontOptions(optionMajorTicks);\n\n\t\t\tvar tl = gridLines.drawTicks ? gridLines.tickMarkLength : 0;\n\n\t\t\tvar scaleLabelFontColor = helpers.valueOrDefault(scaleLabel.fontColor, globalDefaults.defaultFontColor);\n\t\t\tvar scaleLabelFont = parseFontOptions(scaleLabel);\n\t\t\tvar scaleLabelPadding = helpers.options.toPadding(scaleLabel.padding);\n\t\t\tvar labelRotationRadians = helpers.toRadians(me.labelRotation);\n\n\t\t\tvar itemsToDraw = [];\n\n\t\t\tvar axisWidth = me.options.gridLines.lineWidth;\n\t\t\tvar xTickStart = options.position === 'right' ? me.right : me.right - axisWidth - tl;\n\t\t\tvar xTickEnd = options.position === 'right' ? me.right + tl : me.right;\n\t\t\tvar yTickStart = options.position === 'bottom' ? me.top + axisWidth : me.bottom - tl - axisWidth;\n\t\t\tvar yTickEnd = options.position === 'bottom' ? me.top + axisWidth + tl : me.bottom + axisWidth;\n\n\t\t\thelpers.each(ticks, function(tick, index) {\n\t\t\t\t// autoskipper skipped this tick (#4635)\n\t\t\t\tif (helpers.isNullOrUndef(tick.label)) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tvar label = tick.label;\n\t\t\t\tvar lineWidth, lineColor, borderDash, borderDashOffset;\n\t\t\t\tif (index === me.zeroLineIndex && options.offset === gridLines.offsetGridLines) {\n\t\t\t\t\t// Draw the first index specially\n\t\t\t\t\tlineWidth = gridLines.zeroLineWidth;\n\t\t\t\t\tlineColor = gridLines.zeroLineColor;\n\t\t\t\t\tborderDash = gridLines.zeroLineBorderDash;\n\t\t\t\t\tborderDashOffset = gridLines.zeroLineBorderDashOffset;\n\t\t\t\t} else {\n\t\t\t\t\tlineWidth = helpers.valueAtIndexOrDefault(gridLines.lineWidth, index);\n\t\t\t\t\tlineColor = helpers.valueAtIndexOrDefault(gridLines.color, index);\n\t\t\t\t\tborderDash = helpers.valueOrDefault(gridLines.borderDash, globalDefaults.borderDash);\n\t\t\t\t\tborderDashOffset = helpers.valueOrDefault(gridLines.borderDashOffset, globalDefaults.borderDashOffset);\n\t\t\t\t}\n\n\t\t\t\t// Common properties\n\t\t\t\tvar tx1, ty1, tx2, ty2, x1, y1, x2, y2, labelX, labelY;\n\t\t\t\tvar textAlign = 'middle';\n\t\t\t\tvar textBaseline = 'middle';\n\t\t\t\tvar tickPadding = optionTicks.padding;\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\tvar labelYOffset = tl + tickPadding;\n\n\t\t\t\t\tif (options.position === 'bottom') {\n\t\t\t\t\t\t// bottom\n\t\t\t\t\t\ttextBaseline = !isRotated ? 'top' : 'middle';\n\t\t\t\t\t\ttextAlign = !isRotated ? 'center' : 'right';\n\t\t\t\t\t\tlabelY = me.top + labelYOffset;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// top\n\t\t\t\t\t\ttextBaseline = !isRotated ? 'bottom' : 'middle';\n\t\t\t\t\t\ttextAlign = !isRotated ? 'center' : 'left';\n\t\t\t\t\t\tlabelY = me.bottom - labelYOffset;\n\t\t\t\t\t}\n\n\t\t\t\t\tvar xLineValue = getLineValue(me, index, gridLines.offsetGridLines && ticks.length > 1);\n\t\t\t\t\tif (xLineValue < me.left) {\n\t\t\t\t\t\tlineColor = 'rgba(0,0,0,0)';\n\t\t\t\t\t}\n\t\t\t\t\txLineValue += helpers.aliasPixel(lineWidth);\n\n\t\t\t\t\tlabelX = me.getPixelForTick(index) + optionTicks.labelOffset; // x values for optionTicks (need to consider offsetLabel option)\n\n\t\t\t\t\ttx1 = tx2 = x1 = x2 = xLineValue;\n\t\t\t\t\tty1 = yTickStart;\n\t\t\t\t\tty2 = yTickEnd;\n\t\t\t\t\ty1 = chartArea.top;\n\t\t\t\t\ty2 = chartArea.bottom + axisWidth;\n\t\t\t\t} else {\n\t\t\t\t\tvar isLeft = options.position === 'left';\n\t\t\t\t\tvar labelXOffset;\n\n\t\t\t\t\tif (optionTicks.mirror) {\n\t\t\t\t\t\ttextAlign = isLeft ? 'left' : 'right';\n\t\t\t\t\t\tlabelXOffset = tickPadding;\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttextAlign = isLeft ? 'right' : 'left';\n\t\t\t\t\t\tlabelXOffset = tl + tickPadding;\n\t\t\t\t\t}\n\n\t\t\t\t\tlabelX = isLeft ? me.right - labelXOffset : me.left + labelXOffset;\n\n\t\t\t\t\tvar yLineValue = getLineValue(me, index, gridLines.offsetGridLines && ticks.length > 1);\n\t\t\t\t\tif (yLineValue < me.top) {\n\t\t\t\t\t\tlineColor = 'rgba(0,0,0,0)';\n\t\t\t\t\t}\n\t\t\t\t\tyLineValue += helpers.aliasPixel(lineWidth);\n\n\t\t\t\t\tlabelY = me.getPixelForTick(index) + optionTicks.labelOffset;\n\n\t\t\t\t\ttx1 = xTickStart;\n\t\t\t\t\ttx2 = xTickEnd;\n\t\t\t\t\tx1 = chartArea.left;\n\t\t\t\t\tx2 = chartArea.right + axisWidth;\n\t\t\t\t\tty1 = ty2 = y1 = y2 = yLineValue;\n\t\t\t\t}\n\n\t\t\t\titemsToDraw.push({\n\t\t\t\t\ttx1: tx1,\n\t\t\t\t\tty1: ty1,\n\t\t\t\t\ttx2: tx2,\n\t\t\t\t\tty2: ty2,\n\t\t\t\t\tx1: x1,\n\t\t\t\t\ty1: y1,\n\t\t\t\t\tx2: x2,\n\t\t\t\t\ty2: y2,\n\t\t\t\t\tlabelX: labelX,\n\t\t\t\t\tlabelY: labelY,\n\t\t\t\t\tglWidth: lineWidth,\n\t\t\t\t\tglColor: lineColor,\n\t\t\t\t\tglBorderDash: borderDash,\n\t\t\t\t\tglBorderDashOffset: borderDashOffset,\n\t\t\t\t\trotation: -1 * labelRotationRadians,\n\t\t\t\t\tlabel: label,\n\t\t\t\t\tmajor: tick.major,\n\t\t\t\t\ttextBaseline: textBaseline,\n\t\t\t\t\ttextAlign: textAlign\n\t\t\t\t});\n\t\t\t});\n\n\t\t\t// Draw all of the tick labels, tick marks, and grid lines at the correct places\n\t\t\thelpers.each(itemsToDraw, function(itemToDraw) {\n\t\t\t\tif (gridLines.display) {\n\t\t\t\t\tcontext.save();\n\t\t\t\t\tcontext.lineWidth = itemToDraw.glWidth;\n\t\t\t\t\tcontext.strokeStyle = itemToDraw.glColor;\n\t\t\t\t\tif (context.setLineDash) {\n\t\t\t\t\t\tcontext.setLineDash(itemToDraw.glBorderDash);\n\t\t\t\t\t\tcontext.lineDashOffset = itemToDraw.glBorderDashOffset;\n\t\t\t\t\t}\n\n\t\t\t\t\tcontext.beginPath();\n\n\t\t\t\t\tif (gridLines.drawTicks) {\n\t\t\t\t\t\tcontext.moveTo(itemToDraw.tx1, itemToDraw.ty1);\n\t\t\t\t\t\tcontext.lineTo(itemToDraw.tx2, itemToDraw.ty2);\n\t\t\t\t\t}\n\n\t\t\t\t\tif (gridLines.drawOnChartArea) {\n\t\t\t\t\t\tcontext.moveTo(itemToDraw.x1, itemToDraw.y1);\n\t\t\t\t\t\tcontext.lineTo(itemToDraw.x2, itemToDraw.y2);\n\t\t\t\t\t}\n\n\t\t\t\t\tcontext.stroke();\n\t\t\t\t\tcontext.restore();\n\t\t\t\t}\n\n\t\t\t\tif (optionTicks.display) {\n\t\t\t\t\t// Make sure we draw text in the correct color and font\n\t\t\t\t\tcontext.save();\n\t\t\t\t\tcontext.translate(itemToDraw.labelX, itemToDraw.labelY);\n\t\t\t\t\tcontext.rotate(itemToDraw.rotation);\n\t\t\t\t\tcontext.font = itemToDraw.major ? majorTickFont.font : tickFont.font;\n\t\t\t\t\tcontext.fillStyle = itemToDraw.major ? majorTickFontColor : tickFontColor;\n\t\t\t\t\tcontext.textBaseline = itemToDraw.textBaseline;\n\t\t\t\t\tcontext.textAlign = itemToDraw.textAlign;\n\n\t\t\t\t\tvar label = itemToDraw.label;\n\t\t\t\t\tif (helpers.isArray(label)) {\n\t\t\t\t\t\tvar lineCount = label.length;\n\t\t\t\t\t\tvar lineHeight = tickFont.size * 1.5;\n\t\t\t\t\t\tvar y = me.isHorizontal() ? 0 : -lineHeight * (lineCount - 1) / 2;\n\n\t\t\t\t\t\tfor (var i = 0; i < lineCount; ++i) {\n\t\t\t\t\t\t\t// We just make sure the multiline element is a string here..\n\t\t\t\t\t\t\tcontext.fillText('' + label[i], 0, y);\n\t\t\t\t\t\t\t// apply same lineSpacing as calculated @ L#320\n\t\t\t\t\t\t\ty += lineHeight;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontext.fillText(label, 0, 0);\n\t\t\t\t\t}\n\t\t\t\t\tcontext.restore();\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tif (scaleLabel.display) {\n\t\t\t\t// Draw the scale label\n\t\t\t\tvar scaleLabelX;\n\t\t\t\tvar scaleLabelY;\n\t\t\t\tvar rotation = 0;\n\t\t\t\tvar halfLineHeight = parseLineHeight(scaleLabel) / 2;\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\tscaleLabelX = me.left + ((me.right - me.left) / 2); // midpoint of the width\n\t\t\t\t\tscaleLabelY = options.position === 'bottom'\n\t\t\t\t\t\t? me.bottom - halfLineHeight - scaleLabelPadding.bottom\n\t\t\t\t\t\t: me.top + halfLineHeight + scaleLabelPadding.top;\n\t\t\t\t} else {\n\t\t\t\t\tvar isLeft = options.position === 'left';\n\t\t\t\t\tscaleLabelX = isLeft\n\t\t\t\t\t\t? me.left + halfLineHeight + scaleLabelPadding.top\n\t\t\t\t\t\t: me.right - halfLineHeight - scaleLabelPadding.top;\n\t\t\t\t\tscaleLabelY = me.top + ((me.bottom - me.top) / 2);\n\t\t\t\t\trotation = isLeft ? -0.5 * Math.PI : 0.5 * Math.PI;\n\t\t\t\t}\n\n\t\t\t\tcontext.save();\n\t\t\t\tcontext.translate(scaleLabelX, scaleLabelY);\n\t\t\t\tcontext.rotate(rotation);\n\t\t\t\tcontext.textAlign = 'center';\n\t\t\t\tcontext.textBaseline = 'middle';\n\t\t\t\tcontext.fillStyle = scaleLabelFontColor; // render in correct colour\n\t\t\t\tcontext.font = scaleLabelFont.font;\n\t\t\t\tcontext.fillText(scaleLabel.labelString, 0, 0);\n\t\t\t\tcontext.restore();\n\t\t\t}\n\n\t\t\tif (gridLines.drawBorder) {\n\t\t\t\t// Draw the line at the edge of the axis\n\t\t\t\tcontext.lineWidth = helpers.valueAtIndexOrDefault(gridLines.lineWidth, 0);\n\t\t\t\tcontext.strokeStyle = helpers.valueAtIndexOrDefault(gridLines.color, 0);\n\t\t\t\tvar x1 = me.left;\n\t\t\t\tvar x2 = me.right + axisWidth;\n\t\t\t\tvar y1 = me.top;\n\t\t\t\tvar y2 = me.bottom + axisWidth;\n\n\t\t\t\tvar aliasPixel = helpers.aliasPixel(context.lineWidth);\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\ty1 = y2 = options.position === 'top' ? me.bottom : me.top;\n\t\t\t\t\ty1 += aliasPixel;\n\t\t\t\t\ty2 += aliasPixel;\n\t\t\t\t} else {\n\t\t\t\t\tx1 = x2 = options.position === 'left' ? me.right : me.left;\n\t\t\t\t\tx1 += aliasPixel;\n\t\t\t\t\tx2 += aliasPixel;\n\t\t\t\t}\n\n\t\t\t\tcontext.beginPath();\n\t\t\t\tcontext.moveTo(x1, y1);\n\t\t\t\tcontext.lineTo(x2, y2);\n\t\t\t\tcontext.stroke();\n\t\t\t}\n\t\t}\n\t});\n};\n\n},{\"25\":25,\"26\":26,\"34\":34,\"45\":45}],33:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar helpers = require(45);\nvar layouts = require(30);\n\nmodule.exports = function(Chart) {\n\n\tChart.scaleService = {\n\t\t// Scale registration object. Extensions can register new scale types (such as log or DB scales) and then\n\t\t// use the new chart options to grab the correct scale\n\t\tconstructors: {},\n\t\t// Use a registration function so that we can move to an ES6 map when we no longer need to support\n\t\t// old browsers\n\n\t\t// Scale config defaults\n\t\tdefaults: {},\n\t\tregisterScaleType: function(type, scaleConstructor, scaleDefaults) {\n\t\t\tthis.constructors[type] = scaleConstructor;\n\t\t\tthis.defaults[type] = helpers.clone(scaleDefaults);\n\t\t},\n\t\tgetScaleConstructor: function(type) {\n\t\t\treturn this.constructors.hasOwnProperty(type) ? this.constructors[type] : undefined;\n\t\t},\n\t\tgetScaleDefaults: function(type) {\n\t\t\t// Return the scale defaults merged with the global settings so that we always use the latest ones\n\t\t\treturn this.defaults.hasOwnProperty(type) ? helpers.merge({}, [defaults.scale, this.defaults[type]]) : {};\n\t\t},\n\t\tupdateScaleDefaults: function(type, additions) {\n\t\t\tvar me = this;\n\t\t\tif (me.defaults.hasOwnProperty(type)) {\n\t\t\t\tme.defaults[type] = helpers.extend(me.defaults[type], additions);\n\t\t\t}\n\t\t},\n\t\taddScalesToLayout: function(chart) {\n\t\t\t// Adds each scale to the chart.boxes array to be sized accordingly\n\t\t\thelpers.each(chart.scales, function(scale) {\n\t\t\t\t// Set ILayoutItem parameters for backwards compatibility\n\t\t\t\tscale.fullWidth = scale.options.fullWidth;\n\t\t\t\tscale.position = scale.options.position;\n\t\t\t\tscale.weight = scale.options.weight;\n\t\t\t\tlayouts.addBox(chart, scale);\n\t\t\t});\n\t\t}\n\t};\n};\n\n},{\"25\":25,\"30\":30,\"45\":45}],34:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\n/**\n * Namespace to hold static tick generation functions\n * @namespace Chart.Ticks\n */\nmodule.exports = {\n\t/**\n\t * Namespace to hold formatters for different types of ticks\n\t * @namespace Chart.Ticks.formatters\n\t */\n\tformatters: {\n\t\t/**\n\t\t * Formatter for value labels\n\t\t * @method Chart.Ticks.formatters.values\n\t\t * @param value the value to display\n\t\t * @return {String|Array} the label to display\n\t\t */\n\t\tvalues: function(value) {\n\t\t\treturn helpers.isArray(value) ? value : '' + value;\n\t\t},\n\n\t\t/**\n\t\t * Formatter for linear numeric ticks\n\t\t * @method Chart.Ticks.formatters.linear\n\t\t * @param tickValue {Number} the value to be formatted\n\t\t * @param index {Number} the position of the tickValue parameter in the ticks array\n\t\t * @param ticks {Array<Number>} the list of ticks being converted\n\t\t * @return {String} string representation of the tickValue parameter\n\t\t */\n\t\tlinear: function(tickValue, index, ticks) {\n\t\t\t// If we have lots of ticks, don't use the ones\n\t\t\tvar delta = ticks.length > 3 ? ticks[2] - ticks[1] : ticks[1] - ticks[0];\n\n\t\t\t// If we have a number like 2.5 as the delta, figure out how many decimal places we need\n\t\t\tif (Math.abs(delta) > 1) {\n\t\t\t\tif (tickValue !== Math.floor(tickValue)) {\n\t\t\t\t\t// not an integer\n\t\t\t\t\tdelta = tickValue - Math.floor(tickValue);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar logDelta = helpers.log10(Math.abs(delta));\n\t\t\tvar tickString = '';\n\n\t\t\tif (tickValue !== 0) {\n\t\t\t\tvar numDecimal = -1 * Math.floor(logDelta);\n\t\t\t\tnumDecimal = Math.max(Math.min(numDecimal, 20), 0); // toFixed has a max of 20 decimal places\n\t\t\t\ttickString = tickValue.toFixed(numDecimal);\n\t\t\t} else {\n\t\t\t\ttickString = '0'; // never show decimal places for 0\n\t\t\t}\n\n\t\t\treturn tickString;\n\t\t},\n\n\t\tlogarithmic: function(tickValue, index, ticks) {\n\t\t\tvar remain = tickValue / (Math.pow(10, Math.floor(helpers.log10(tickValue))));\n\n\t\t\tif (tickValue === 0) {\n\t\t\t\treturn '0';\n\t\t\t} else if (remain === 1 || remain === 2 || remain === 5 || index === 0 || index === ticks.length - 1) {\n\t\t\t\treturn tickValue.toExponential();\n\t\t\t}\n\t\t\treturn '';\n\t\t}\n\t}\n};\n\n},{\"45\":45}],35:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\n\ndefaults._set('global', {\n\ttooltips: {\n\t\tenabled: true,\n\t\tcustom: null,\n\t\tmode: 'nearest',\n\t\tposition: 'average',\n\t\tintersect: true,\n\t\tbackgroundColor: 'rgba(0,0,0,0.8)',\n\t\ttitleFontStyle: 'bold',\n\t\ttitleSpacing: 2,\n\t\ttitleMarginBottom: 6,\n\t\ttitleFontColor: '#fff',\n\t\ttitleAlign: 'left',\n\t\tbodySpacing: 2,\n\t\tbodyFontColor: '#fff',\n\t\tbodyAlign: 'left',\n\t\tfooterFontStyle: 'bold',\n\t\tfooterSpacing: 2,\n\t\tfooterMarginTop: 6,\n\t\tfooterFontColor: '#fff',\n\t\tfooterAlign: 'left',\n\t\tyPadding: 6,\n\t\txPadding: 6,\n\t\tcaretPadding: 2,\n\t\tcaretSize: 5,\n\t\tcornerRadius: 6,\n\t\tmultiKeyBackground: '#fff',\n\t\tdisplayColors: true,\n\t\tborderColor: 'rgba(0,0,0,0)',\n\t\tborderWidth: 0,\n\t\tcallbacks: {\n\t\t\t// Args are: (tooltipItems, data)\n\t\t\tbeforeTitle: helpers.noop,\n\t\t\ttitle: function(tooltipItems, data) {\n\t\t\t\t// Pick first xLabel for now\n\t\t\t\tvar title = '';\n\t\t\t\tvar labels = data.labels;\n\t\t\t\tvar labelCount = labels ? labels.length : 0;\n\n\t\t\t\tif (tooltipItems.length > 0) {\n\t\t\t\t\tvar item = tooltipItems[0];\n\n\t\t\t\t\tif (item.xLabel) {\n\t\t\t\t\t\ttitle = item.xLabel;\n\t\t\t\t\t} else if (labelCount > 0 && item.index < labelCount) {\n\t\t\t\t\t\ttitle = labels[item.index];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn title;\n\t\t\t},\n\t\t\tafterTitle: helpers.noop,\n\n\t\t\t// Args are: (tooltipItems, data)\n\t\t\tbeforeBody: helpers.noop,\n\n\t\t\t// Args are: (tooltipItem, data)\n\t\t\tbeforeLabel: helpers.noop,\n\t\t\tlabel: function(tooltipItem, data) {\n\t\t\t\tvar label = data.datasets[tooltipItem.datasetIndex].label || '';\n\n\t\t\t\tif (label) {\n\t\t\t\t\tlabel += ': ';\n\t\t\t\t}\n\t\t\t\tlabel += tooltipItem.yLabel;\n\t\t\t\treturn label;\n\t\t\t},\n\t\t\tlabelColor: function(tooltipItem, chart) {\n\t\t\t\tvar meta = chart.getDatasetMeta(tooltipItem.datasetIndex);\n\t\t\t\tvar activeElement = meta.data[tooltipItem.index];\n\t\t\t\tvar view = activeElement._view;\n\t\t\t\treturn {\n\t\t\t\t\tborderColor: view.borderColor,\n\t\t\t\t\tbackgroundColor: view.backgroundColor\n\t\t\t\t};\n\t\t\t},\n\t\t\tlabelTextColor: function() {\n\t\t\t\treturn this._options.bodyFontColor;\n\t\t\t},\n\t\t\tafterLabel: helpers.noop,\n\n\t\t\t// Args are: (tooltipItems, data)\n\t\t\tafterBody: helpers.noop,\n\n\t\t\t// Args are: (tooltipItems, data)\n\t\t\tbeforeFooter: helpers.noop,\n\t\t\tfooter: helpers.noop,\n\t\t\tafterFooter: helpers.noop\n\t\t}\n\t}\n});\n\nmodule.exports = function(Chart) {\n\n\t/**\n \t * Helper method to merge the opacity into a color\n \t */\n\tfunction mergeOpacity(colorString, opacity) {\n\t\tvar color = helpers.color(colorString);\n\t\treturn color.alpha(opacity * color.alpha()).rgbaString();\n\t}\n\n\t// Helper to push or concat based on if the 2nd parameter is an array or not\n\tfunction pushOrConcat(base, toPush) {\n\t\tif (toPush) {\n\t\t\tif (helpers.isArray(toPush)) {\n\t\t\t\t// base = base.concat(toPush);\n\t\t\t\tArray.prototype.push.apply(base, toPush);\n\t\t\t} else {\n\t\t\t\tbase.push(toPush);\n\t\t\t}\n\t\t}\n\n\t\treturn base;\n\t}\n\n\t// Private helper to create a tooltip item model\n\t// @param element : the chart element (point, arc, bar) to create the tooltip item for\n\t// @return : new tooltip item\n\tfunction createTooltipItem(element) {\n\t\tvar xScale = element._xScale;\n\t\tvar yScale = element._yScale || element._scale; // handle radar || polarArea charts\n\t\tvar index = element._index;\n\t\tvar datasetIndex = element._datasetIndex;\n\n\t\treturn {\n\t\t\txLabel: xScale ? xScale.getLabelForIndex(index, datasetIndex) : '',\n\t\t\tyLabel: yScale ? yScale.getLabelForIndex(index, datasetIndex) : '',\n\t\t\tindex: index,\n\t\t\tdatasetIndex: datasetIndex,\n\t\t\tx: element._model.x,\n\t\t\ty: element._model.y\n\t\t};\n\t}\n\n\t/**\n\t * Helper to get the reset model for the tooltip\n\t * @param tooltipOpts {Object} the tooltip options\n\t */\n\tfunction getBaseModel(tooltipOpts) {\n\t\tvar globalDefaults = defaults.global;\n\t\tvar valueOrDefault = helpers.valueOrDefault;\n\n\t\treturn {\n\t\t\t// Positioning\n\t\t\txPadding: tooltipOpts.xPadding,\n\t\t\tyPadding: tooltipOpts.yPadding,\n\t\t\txAlign: tooltipOpts.xAlign,\n\t\t\tyAlign: tooltipOpts.yAlign,\n\n\t\t\t// Body\n\t\t\tbodyFontColor: tooltipOpts.bodyFontColor,\n\t\t\t_bodyFontFamily: valueOrDefault(tooltipOpts.bodyFontFamily, globalDefaults.defaultFontFamily),\n\t\t\t_bodyFontStyle: valueOrDefault(tooltipOpts.bodyFontStyle, globalDefaults.defaultFontStyle),\n\t\t\t_bodyAlign: tooltipOpts.bodyAlign,\n\t\t\tbodyFontSize: valueOrDefault(tooltipOpts.bodyFontSize, globalDefaults.defaultFontSize),\n\t\t\tbodySpacing: tooltipOpts.bodySpacing,\n\n\t\t\t// Title\n\t\t\ttitleFontColor: tooltipOpts.titleFontColor,\n\t\t\t_titleFontFamily: valueOrDefault(tooltipOpts.titleFontFamily, globalDefaults.defaultFontFamily),\n\t\t\t_titleFontStyle: valueOrDefault(tooltipOpts.titleFontStyle, globalDefaults.defaultFontStyle),\n\t\t\ttitleFontSize: valueOrDefault(tooltipOpts.titleFontSize, globalDefaults.defaultFontSize),\n\t\t\t_titleAlign: tooltipOpts.titleAlign,\n\t\t\ttitleSpacing: tooltipOpts.titleSpacing,\n\t\t\ttitleMarginBottom: tooltipOpts.titleMarginBottom,\n\n\t\t\t// Footer\n\t\t\tfooterFontColor: tooltipOpts.footerFontColor,\n\t\t\t_footerFontFamily: valueOrDefault(tooltipOpts.footerFontFamily, globalDefaults.defaultFontFamily),\n\t\t\t_footerFontStyle: valueOrDefault(tooltipOpts.footerFontStyle, globalDefaults.defaultFontStyle),\n\t\t\tfooterFontSize: valueOrDefault(tooltipOpts.footerFontSize, globalDefaults.defaultFontSize),\n\t\t\t_footerAlign: tooltipOpts.footerAlign,\n\t\t\tfooterSpacing: tooltipOpts.footerSpacing,\n\t\t\tfooterMarginTop: tooltipOpts.footerMarginTop,\n\n\t\t\t// Appearance\n\t\t\tcaretSize: tooltipOpts.caretSize,\n\t\t\tcornerRadius: tooltipOpts.cornerRadius,\n\t\t\tbackgroundColor: tooltipOpts.backgroundColor,\n\t\t\topacity: 0,\n\t\t\tlegendColorBackground: tooltipOpts.multiKeyBackground,\n\t\t\tdisplayColors: tooltipOpts.displayColors,\n\t\t\tborderColor: tooltipOpts.borderColor,\n\t\t\tborderWidth: tooltipOpts.borderWidth\n\t\t};\n\t}\n\n\t/**\n\t * Get the size of the tooltip\n\t */\n\tfunction getTooltipSize(tooltip, model) {\n\t\tvar ctx = tooltip._chart.ctx;\n\n\t\tvar height = model.yPadding * 2; // Tooltip Padding\n\t\tvar width = 0;\n\n\t\t// Count of all lines in the body\n\t\tvar body = model.body;\n\t\tvar combinedBodyLength = body.reduce(function(count, bodyItem) {\n\t\t\treturn count + bodyItem.before.length + bodyItem.lines.length + bodyItem.after.length;\n\t\t}, 0);\n\t\tcombinedBodyLength += model.beforeBody.length + model.afterBody.length;\n\n\t\tvar titleLineCount = model.title.length;\n\t\tvar footerLineCount = model.footer.length;\n\t\tvar titleFontSize = model.titleFontSize;\n\t\tvar bodyFontSize = model.bodyFontSize;\n\t\tvar footerFontSize = model.footerFontSize;\n\n\t\theight += titleLineCount * titleFontSize; // Title Lines\n\t\theight += titleLineCount ? (titleLineCount - 1) * model.titleSpacing : 0; // Title Line Spacing\n\t\theight += titleLineCount ? model.titleMarginBottom : 0; // Title's bottom Margin\n\t\theight += combinedBodyLength * bodyFontSize; // Body Lines\n\t\theight += combinedBodyLength ? (combinedBodyLength - 1) * model.bodySpacing : 0; // Body Line Spacing\n\t\theight += footerLineCount ? model.footerMarginTop : 0; // Footer Margin\n\t\theight += footerLineCount * (footerFontSize); // Footer Lines\n\t\theight += footerLineCount ? (footerLineCount - 1) * model.footerSpacing : 0; // Footer Line Spacing\n\n\t\t// Title width\n\t\tvar widthPadding = 0;\n\t\tvar maxLineWidth = function(line) {\n\t\t\twidth = Math.max(width, ctx.measureText(line).width + widthPadding);\n\t\t};\n\n\t\tctx.font = helpers.fontString(titleFontSize, model._titleFontStyle, model._titleFontFamily);\n\t\thelpers.each(model.title, maxLineWidth);\n\n\t\t// Body width\n\t\tctx.font = helpers.fontString(bodyFontSize, model._bodyFontStyle, model._bodyFontFamily);\n\t\thelpers.each(model.beforeBody.concat(model.afterBody), maxLineWidth);\n\n\t\t// Body lines may include some extra width due to the color box\n\t\twidthPadding = model.displayColors ? (bodyFontSize + 2) : 0;\n\t\thelpers.each(body, function(bodyItem) {\n\t\t\thelpers.each(bodyItem.before, maxLineWidth);\n\t\t\thelpers.each(bodyItem.lines, maxLineWidth);\n\t\t\thelpers.each(bodyItem.after, maxLineWidth);\n\t\t});\n\n\t\t// Reset back to 0\n\t\twidthPadding = 0;\n\n\t\t// Footer width\n\t\tctx.font = helpers.fontString(footerFontSize, model._footerFontStyle, model._footerFontFamily);\n\t\thelpers.each(model.footer, maxLineWidth);\n\n\t\t// Add padding\n\t\twidth += 2 * model.xPadding;\n\n\t\treturn {\n\t\t\twidth: width,\n\t\t\theight: height\n\t\t};\n\t}\n\n\t/**\n\t * Helper to get the alignment of a tooltip given the size\n\t */\n\tfunction determineAlignment(tooltip, size) {\n\t\tvar model = tooltip._model;\n\t\tvar chart = tooltip._chart;\n\t\tvar chartArea = tooltip._chart.chartArea;\n\t\tvar xAlign = 'center';\n\t\tvar yAlign = 'center';\n\n\t\tif (model.y < size.height) {\n\t\t\tyAlign = 'top';\n\t\t} else if (model.y > (chart.height - size.height)) {\n\t\t\tyAlign = 'bottom';\n\t\t}\n\n\t\tvar lf, rf; // functions to determine left, right alignment\n\t\tvar olf, orf; // functions to determine if left/right alignment causes tooltip to go outside chart\n\t\tvar yf; // function to get the y alignment if the tooltip goes outside of the left or right edges\n\t\tvar midX = (chartArea.left + chartArea.right) / 2;\n\t\tvar midY = (chartArea.top + chartArea.bottom) / 2;\n\n\t\tif (yAlign === 'center') {\n\t\t\tlf = function(x) {\n\t\t\t\treturn x <= midX;\n\t\t\t};\n\t\t\trf = function(x) {\n\t\t\t\treturn x > midX;\n\t\t\t};\n\t\t} else {\n\t\t\tlf = function(x) {\n\t\t\t\treturn x <= (size.width / 2);\n\t\t\t};\n\t\t\trf = function(x) {\n\t\t\t\treturn x >= (chart.width - (size.width / 2));\n\t\t\t};\n\t\t}\n\n\t\tolf = function(x) {\n\t\t\treturn x + size.width + model.caretSize + model.caretPadding > chart.width;\n\t\t};\n\t\torf = function(x) {\n\t\t\treturn x - size.width - model.caretSize - model.caretPadding < 0;\n\t\t};\n\t\tyf = function(y) {\n\t\t\treturn y <= midY ? 'top' : 'bottom';\n\t\t};\n\n\t\tif (lf(model.x)) {\n\t\t\txAlign = 'left';\n\n\t\t\t// Is tooltip too wide and goes over the right side of the chart.?\n\t\t\tif (olf(model.x)) {\n\t\t\t\txAlign = 'center';\n\t\t\t\tyAlign = yf(model.y);\n\t\t\t}\n\t\t} else if (rf(model.x)) {\n\t\t\txAlign = 'right';\n\n\t\t\t// Is tooltip too wide and goes outside left edge of canvas?\n\t\t\tif (orf(model.x)) {\n\t\t\t\txAlign = 'center';\n\t\t\t\tyAlign = yf(model.y);\n\t\t\t}\n\t\t}\n\n\t\tvar opts = tooltip._options;\n\t\treturn {\n\t\t\txAlign: opts.xAlign ? opts.xAlign : xAlign,\n\t\t\tyAlign: opts.yAlign ? opts.yAlign : yAlign\n\t\t};\n\t}\n\n\t/**\n\t * @Helper to get the location a tooltip needs to be placed at given the initial position (via the vm) and the size and alignment\n\t */\n\tfunction getBackgroundPoint(vm, size, alignment, chart) {\n\t\t// Background Position\n\t\tvar x = vm.x;\n\t\tvar y = vm.y;\n\n\t\tvar caretSize = vm.caretSize;\n\t\tvar caretPadding = vm.caretPadding;\n\t\tvar cornerRadius = vm.cornerRadius;\n\t\tvar xAlign = alignment.xAlign;\n\t\tvar yAlign = alignment.yAlign;\n\t\tvar paddingAndSize = caretSize + caretPadding;\n\t\tvar radiusAndPadding = cornerRadius + caretPadding;\n\n\t\tif (xAlign === 'right') {\n\t\t\tx -= size.width;\n\t\t} else if (xAlign === 'center') {\n\t\t\tx -= (size.width / 2);\n\t\t\tif (x + size.width > chart.width) {\n\t\t\t\tx = chart.width - size.width;\n\t\t\t}\n\t\t\tif (x < 0) {\n\t\t\t\tx = 0;\n\t\t\t}\n\t\t}\n\n\t\tif (yAlign === 'top') {\n\t\t\ty += paddingAndSize;\n\t\t} else if (yAlign === 'bottom') {\n\t\t\ty -= size.height + paddingAndSize;\n\t\t} else {\n\t\t\ty -= (size.height / 2);\n\t\t}\n\n\t\tif (yAlign === 'center') {\n\t\t\tif (xAlign === 'left') {\n\t\t\t\tx += paddingAndSize;\n\t\t\t} else if (xAlign === 'right') {\n\t\t\t\tx -= paddingAndSize;\n\t\t\t}\n\t\t} else if (xAlign === 'left') {\n\t\t\tx -= radiusAndPadding;\n\t\t} else if (xAlign === 'right') {\n\t\t\tx += radiusAndPadding;\n\t\t}\n\n\t\treturn {\n\t\t\tx: x,\n\t\t\ty: y\n\t\t};\n\t}\n\n\tChart.Tooltip = Element.extend({\n\t\tinitialize: function() {\n\t\t\tthis._model = getBaseModel(this._options);\n\t\t\tthis._lastActive = [];\n\t\t},\n\n\t\t// Get the title\n\t\t// Args are: (tooltipItem, data)\n\t\tgetTitle: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me._options;\n\t\t\tvar callbacks = opts.callbacks;\n\n\t\t\tvar beforeTitle = callbacks.beforeTitle.apply(me, arguments);\n\t\t\tvar title = callbacks.title.apply(me, arguments);\n\t\t\tvar afterTitle = callbacks.afterTitle.apply(me, arguments);\n\n\t\t\tvar lines = [];\n\t\t\tlines = pushOrConcat(lines, beforeTitle);\n\t\t\tlines = pushOrConcat(lines, title);\n\t\t\tlines = pushOrConcat(lines, afterTitle);\n\n\t\t\treturn lines;\n\t\t},\n\n\t\t// Args are: (tooltipItem, data)\n\t\tgetBeforeBody: function() {\n\t\t\tvar lines = this._options.callbacks.beforeBody.apply(this, arguments);\n\t\t\treturn helpers.isArray(lines) ? lines : lines !== undefined ? [lines] : [];\n\t\t},\n\n\t\t// Args are: (tooltipItem, data)\n\t\tgetBody: function(tooltipItems, data) {\n\t\t\tvar me = this;\n\t\t\tvar callbacks = me._options.callbacks;\n\t\t\tvar bodyItems = [];\n\n\t\t\thelpers.each(tooltipItems, function(tooltipItem) {\n\t\t\t\tvar bodyItem = {\n\t\t\t\t\tbefore: [],\n\t\t\t\t\tlines: [],\n\t\t\t\t\tafter: []\n\t\t\t\t};\n\t\t\t\tpushOrConcat(bodyItem.before, callbacks.beforeLabel.call(me, tooltipItem, data));\n\t\t\t\tpushOrConcat(bodyItem.lines, callbacks.label.call(me, tooltipItem, data));\n\t\t\t\tpushOrConcat(bodyItem.after, callbacks.afterLabel.call(me, tooltipItem, data));\n\n\t\t\t\tbodyItems.push(bodyItem);\n\t\t\t});\n\n\t\t\treturn bodyItems;\n\t\t},\n\n\t\t// Args are: (tooltipItem, data)\n\t\tgetAfterBody: function() {\n\t\t\tvar lines = this._options.callbacks.afterBody.apply(this, arguments);\n\t\t\treturn helpers.isArray(lines) ? lines : lines !== undefined ? [lines] : [];\n\t\t},\n\n\t\t// Get the footer and beforeFooter and afterFooter lines\n\t\t// Args are: (tooltipItem, data)\n\t\tgetFooter: function() {\n\t\t\tvar me = this;\n\t\t\tvar callbacks = me._options.callbacks;\n\n\t\t\tvar beforeFooter = callbacks.beforeFooter.apply(me, arguments);\n\t\t\tvar footer = callbacks.footer.apply(me, arguments);\n\t\t\tvar afterFooter = callbacks.afterFooter.apply(me, arguments);\n\n\t\t\tvar lines = [];\n\t\t\tlines = pushOrConcat(lines, beforeFooter);\n\t\t\tlines = pushOrConcat(lines, footer);\n\t\t\tlines = pushOrConcat(lines, afterFooter);\n\n\t\t\treturn lines;\n\t\t},\n\n\t\tupdate: function(changed) {\n\t\t\tvar me = this;\n\t\t\tvar opts = me._options;\n\n\t\t\t// Need to regenerate the model because its faster than using extend and it is necessary due to the optimization in Chart.Element.transition\n\t\t\t// that does _view = _model if ease === 1. This causes the 2nd tooltip update to set properties in both the view and model at the same time\n\t\t\t// which breaks any animations.\n\t\t\tvar existingModel = me._model;\n\t\t\tvar model = me._model = getBaseModel(opts);\n\t\t\tvar active = me._active;\n\n\t\t\tvar data = me._data;\n\n\t\t\t// In the case where active.length === 0 we need to keep these at existing values for good animations\n\t\t\tvar alignment = {\n\t\t\t\txAlign: existingModel.xAlign,\n\t\t\t\tyAlign: existingModel.yAlign\n\t\t\t};\n\t\t\tvar backgroundPoint = {\n\t\t\t\tx: existingModel.x,\n\t\t\t\ty: existingModel.y\n\t\t\t};\n\t\t\tvar tooltipSize = {\n\t\t\t\twidth: existingModel.width,\n\t\t\t\theight: existingModel.height\n\t\t\t};\n\t\t\tvar tooltipPosition = {\n\t\t\t\tx: existingModel.caretX,\n\t\t\t\ty: existingModel.caretY\n\t\t\t};\n\n\t\t\tvar i, len;\n\n\t\t\tif (active.length) {\n\t\t\t\tmodel.opacity = 1;\n\n\t\t\t\tvar labelColors = [];\n\t\t\t\tvar labelTextColors = [];\n\t\t\t\ttooltipPosition = Chart.Tooltip.positioners[opts.position].call(me, active, me._eventPosition);\n\n\t\t\t\tvar tooltipItems = [];\n\t\t\t\tfor (i = 0, len = active.length; i < len; ++i) {\n\t\t\t\t\ttooltipItems.push(createTooltipItem(active[i]));\n\t\t\t\t}\n\n\t\t\t\t// If the user provided a filter function, use it to modify the tooltip items\n\t\t\t\tif (opts.filter) {\n\t\t\t\t\ttooltipItems = tooltipItems.filter(function(a) {\n\t\t\t\t\t\treturn opts.filter(a, data);\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// If the user provided a sorting function, use it to modify the tooltip items\n\t\t\t\tif (opts.itemSort) {\n\t\t\t\t\ttooltipItems = tooltipItems.sort(function(a, b) {\n\t\t\t\t\t\treturn opts.itemSort(a, b, data);\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Determine colors for boxes\n\t\t\t\thelpers.each(tooltipItems, function(tooltipItem) {\n\t\t\t\t\tlabelColors.push(opts.callbacks.labelColor.call(me, tooltipItem, me._chart));\n\t\t\t\t\tlabelTextColors.push(opts.callbacks.labelTextColor.call(me, tooltipItem, me._chart));\n\t\t\t\t});\n\n\n\t\t\t\t// Build the Text Lines\n\t\t\t\tmodel.title = me.getTitle(tooltipItems, data);\n\t\t\t\tmodel.beforeBody = me.getBeforeBody(tooltipItems, data);\n\t\t\t\tmodel.body = me.getBody(tooltipItems, data);\n\t\t\t\tmodel.afterBody = me.getAfterBody(tooltipItems, data);\n\t\t\t\tmodel.footer = me.getFooter(tooltipItems, data);\n\n\t\t\t\t// Initial positioning and colors\n\t\t\t\tmodel.x = Math.round(tooltipPosition.x);\n\t\t\t\tmodel.y = Math.round(tooltipPosition.y);\n\t\t\t\tmodel.caretPadding = opts.caretPadding;\n\t\t\t\tmodel.labelColors = labelColors;\n\t\t\t\tmodel.labelTextColors = labelTextColors;\n\n\t\t\t\t// data points\n\t\t\t\tmodel.dataPoints = tooltipItems;\n\n\t\t\t\t// We need to determine alignment of the tooltip\n\t\t\t\ttooltipSize = getTooltipSize(this, model);\n\t\t\t\talignment = determineAlignment(this, tooltipSize);\n\t\t\t\t// Final Size and Position\n\t\t\t\tbackgroundPoint = getBackgroundPoint(model, tooltipSize, alignment, me._chart);\n\t\t\t} else {\n\t\t\t\tmodel.opacity = 0;\n\t\t\t}\n\n\t\t\tmodel.xAlign = alignment.xAlign;\n\t\t\tmodel.yAlign = alignment.yAlign;\n\t\t\tmodel.x = backgroundPoint.x;\n\t\t\tmodel.y = backgroundPoint.y;\n\t\t\tmodel.width = tooltipSize.width;\n\t\t\tmodel.height = tooltipSize.height;\n\n\t\t\t// Point where the caret on the tooltip points to\n\t\t\tmodel.caretX = tooltipPosition.x;\n\t\t\tmodel.caretY = tooltipPosition.y;\n\n\t\t\tme._model = model;\n\n\t\t\tif (changed && opts.custom) {\n\t\t\t\topts.custom.call(me, model);\n\t\t\t}\n\n\t\t\treturn me;\n\t\t},\n\t\tdrawCaret: function(tooltipPoint, size) {\n\t\t\tvar ctx = this._chart.ctx;\n\t\t\tvar vm = this._view;\n\t\t\tvar caretPosition = this.getCaretPosition(tooltipPoint, size, vm);\n\n\t\t\tctx.lineTo(caretPosition.x1, caretPosition.y1);\n\t\t\tctx.lineTo(caretPosition.x2, caretPosition.y2);\n\t\t\tctx.lineTo(caretPosition.x3, caretPosition.y3);\n\t\t},\n\t\tgetCaretPosition: function(tooltipPoint, size, vm) {\n\t\t\tvar x1, x2, x3, y1, y2, y3;\n\t\t\tvar caretSize = vm.caretSize;\n\t\t\tvar cornerRadius = vm.cornerRadius;\n\t\t\tvar xAlign = vm.xAlign;\n\t\t\tvar yAlign = vm.yAlign;\n\t\t\tvar ptX = tooltipPoint.x;\n\t\t\tvar ptY = tooltipPoint.y;\n\t\t\tvar width = size.width;\n\t\t\tvar height = size.height;\n\n\t\t\tif (yAlign === 'center') {\n\t\t\t\ty2 = ptY + (height / 2);\n\n\t\t\t\tif (xAlign === 'left') {\n\t\t\t\t\tx1 = ptX;\n\t\t\t\t\tx2 = x1 - caretSize;\n\t\t\t\t\tx3 = x1;\n\n\t\t\t\t\ty1 = y2 + caretSize;\n\t\t\t\t\ty3 = y2 - caretSize;\n\t\t\t\t} else {\n\t\t\t\t\tx1 = ptX + width;\n\t\t\t\t\tx2 = x1 + caretSize;\n\t\t\t\t\tx3 = x1;\n\n\t\t\t\t\ty1 = y2 - caretSize;\n\t\t\t\t\ty3 = y2 + caretSize;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif (xAlign === 'left') {\n\t\t\t\t\tx2 = ptX + cornerRadius + (caretSize);\n\t\t\t\t\tx1 = x2 - caretSize;\n\t\t\t\t\tx3 = x2 + caretSize;\n\t\t\t\t} else if (xAlign === 'right') {\n\t\t\t\t\tx2 = ptX + width - cornerRadius - caretSize;\n\t\t\t\t\tx1 = x2 - caretSize;\n\t\t\t\t\tx3 = x2 + caretSize;\n\t\t\t\t} else {\n\t\t\t\t\tx2 = vm.caretX;\n\t\t\t\t\tx1 = x2 - caretSize;\n\t\t\t\t\tx3 = x2 + caretSize;\n\t\t\t\t}\n\t\t\t\tif (yAlign === 'top') {\n\t\t\t\t\ty1 = ptY;\n\t\t\t\t\ty2 = y1 - caretSize;\n\t\t\t\t\ty3 = y1;\n\t\t\t\t} else {\n\t\t\t\t\ty1 = ptY + height;\n\t\t\t\t\ty2 = y1 + caretSize;\n\t\t\t\t\ty3 = y1;\n\t\t\t\t\t// invert drawing order\n\t\t\t\t\tvar tmp = x3;\n\t\t\t\t\tx3 = x1;\n\t\t\t\t\tx1 = tmp;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn {x1: x1, x2: x2, x3: x3, y1: y1, y2: y2, y3: y3};\n\t\t},\n\t\tdrawTitle: function(pt, vm, ctx, opacity) {\n\t\t\tvar title = vm.title;\n\n\t\t\tif (title.length) {\n\t\t\t\tctx.textAlign = vm._titleAlign;\n\t\t\t\tctx.textBaseline = 'top';\n\n\t\t\t\tvar titleFontSize = vm.titleFontSize;\n\t\t\t\tvar titleSpacing = vm.titleSpacing;\n\n\t\t\t\tctx.fillStyle = mergeOpacity(vm.titleFontColor, opacity);\n\t\t\t\tctx.font = helpers.fontString(titleFontSize, vm._titleFontStyle, vm._titleFontFamily);\n\n\t\t\t\tvar i, len;\n\t\t\t\tfor (i = 0, len = title.length; i < len; ++i) {\n\t\t\t\t\tctx.fillText(title[i], pt.x, pt.y);\n\t\t\t\t\tpt.y += titleFontSize + titleSpacing; // Line Height and spacing\n\n\t\t\t\t\tif (i + 1 === title.length) {\n\t\t\t\t\t\tpt.y += vm.titleMarginBottom - titleSpacing; // If Last, add margin, remove spacing\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tdrawBody: function(pt, vm, ctx, opacity) {\n\t\t\tvar bodyFontSize = vm.bodyFontSize;\n\t\t\tvar bodySpacing = vm.bodySpacing;\n\t\t\tvar body = vm.body;\n\n\t\t\tctx.textAlign = vm._bodyAlign;\n\t\t\tctx.textBaseline = 'top';\n\t\t\tctx.font = helpers.fontString(bodyFontSize, vm._bodyFontStyle, vm._bodyFontFamily);\n\n\t\t\t// Before Body\n\t\t\tvar xLinePadding = 0;\n\t\t\tvar fillLineOfText = function(line) {\n\t\t\t\tctx.fillText(line, pt.x + xLinePadding, pt.y);\n\t\t\t\tpt.y += bodyFontSize + bodySpacing;\n\t\t\t};\n\n\t\t\t// Before body lines\n\t\t\tctx.fillStyle = mergeOpacity(vm.bodyFontColor, opacity);\n\t\t\thelpers.each(vm.beforeBody, fillLineOfText);\n\n\t\t\tvar drawColorBoxes = vm.displayColors;\n\t\t\txLinePadding = drawColorBoxes ? (bodyFontSize + 2) : 0;\n\n\t\t\t// Draw body lines now\n\t\t\thelpers.each(body, function(bodyItem, i) {\n\t\t\t\tvar textColor = mergeOpacity(vm.labelTextColors[i], opacity);\n\t\t\t\tctx.fillStyle = textColor;\n\t\t\t\thelpers.each(bodyItem.before, fillLineOfText);\n\n\t\t\t\thelpers.each(bodyItem.lines, function(line) {\n\t\t\t\t\t// Draw Legend-like boxes if needed\n\t\t\t\t\tif (drawColorBoxes) {\n\t\t\t\t\t\t// Fill a white rect so that colours merge nicely if the opacity is < 1\n\t\t\t\t\t\tctx.fillStyle = mergeOpacity(vm.legendColorBackground, opacity);\n\t\t\t\t\t\tctx.fillRect(pt.x, pt.y, bodyFontSize, bodyFontSize);\n\n\t\t\t\t\t\t// Border\n\t\t\t\t\t\tctx.lineWidth = 1;\n\t\t\t\t\t\tctx.strokeStyle = mergeOpacity(vm.labelColors[i].borderColor, opacity);\n\t\t\t\t\t\tctx.strokeRect(pt.x, pt.y, bodyFontSize, bodyFontSize);\n\n\t\t\t\t\t\t// Inner square\n\t\t\t\t\t\tctx.fillStyle = mergeOpacity(vm.labelColors[i].backgroundColor, opacity);\n\t\t\t\t\t\tctx.fillRect(pt.x + 1, pt.y + 1, bodyFontSize - 2, bodyFontSize - 2);\n\t\t\t\t\t\tctx.fillStyle = textColor;\n\t\t\t\t\t}\n\n\t\t\t\t\tfillLineOfText(line);\n\t\t\t\t});\n\n\t\t\t\thelpers.each(bodyItem.after, fillLineOfText);\n\t\t\t});\n\n\t\t\t// Reset back to 0 for after body\n\t\t\txLinePadding = 0;\n\n\t\t\t// After body lines\n\t\t\thelpers.each(vm.afterBody, fillLineOfText);\n\t\t\tpt.y -= bodySpacing; // Remove last body spacing\n\t\t},\n\t\tdrawFooter: function(pt, vm, ctx, opacity) {\n\t\t\tvar footer = vm.footer;\n\n\t\t\tif (footer.length) {\n\t\t\t\tpt.y += vm.footerMarginTop;\n\n\t\t\t\tctx.textAlign = vm._footerAlign;\n\t\t\t\tctx.textBaseline = 'top';\n\n\t\t\t\tctx.fillStyle = mergeOpacity(vm.footerFontColor, opacity);\n\t\t\t\tctx.font = helpers.fontString(vm.footerFontSize, vm._footerFontStyle, vm._footerFontFamily);\n\n\t\t\t\thelpers.each(footer, function(line) {\n\t\t\t\t\tctx.fillText(line, pt.x, pt.y);\n\t\t\t\t\tpt.y += vm.footerFontSize + vm.footerSpacing;\n\t\t\t\t});\n\t\t\t}\n\t\t},\n\t\tdrawBackground: function(pt, vm, ctx, tooltipSize, opacity) {\n\t\t\tctx.fillStyle = mergeOpacity(vm.backgroundColor, opacity);\n\t\t\tctx.strokeStyle = mergeOpacity(vm.borderColor, opacity);\n\t\t\tctx.lineWidth = vm.borderWidth;\n\t\t\tvar xAlign = vm.xAlign;\n\t\t\tvar yAlign = vm.yAlign;\n\t\t\tvar x = pt.x;\n\t\t\tvar y = pt.y;\n\t\t\tvar width = tooltipSize.width;\n\t\t\tvar height = tooltipSize.height;\n\t\t\tvar radius = vm.cornerRadius;\n\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x + radius, y);\n\t\t\tif (yAlign === 'top') {\n\t\t\t\tthis.drawCaret(pt, tooltipSize);\n\t\t\t}\n\t\t\tctx.lineTo(x + width - radius, y);\n\t\t\tctx.quadraticCurveTo(x + width, y, x + width, y + radius);\n\t\t\tif (yAlign === 'center' && xAlign === 'right') {\n\t\t\t\tthis.drawCaret(pt, tooltipSize);\n\t\t\t}\n\t\t\tctx.lineTo(x + width, y + height - radius);\n\t\t\tctx.quadraticCurveTo(x + width, y + height, x + width - radius, y + height);\n\t\t\tif (yAlign === 'bottom') {\n\t\t\t\tthis.drawCaret(pt, tooltipSize);\n\t\t\t}\n\t\t\tctx.lineTo(x + radius, y + height);\n\t\t\tctx.quadraticCurveTo(x, y + height, x, y + height - radius);\n\t\t\tif (yAlign === 'center' && xAlign === 'left') {\n\t\t\t\tthis.drawCaret(pt, tooltipSize);\n\t\t\t}\n\t\t\tctx.lineTo(x, y + radius);\n\t\t\tctx.quadraticCurveTo(x, y, x + radius, y);\n\t\t\tctx.closePath();\n\n\t\t\tctx.fill();\n\n\t\t\tif (vm.borderWidth > 0) {\n\t\t\t\tctx.stroke();\n\t\t\t}\n\t\t},\n\t\tdraw: function() {\n\t\t\tvar ctx = this._chart.ctx;\n\t\t\tvar vm = this._view;\n\n\t\t\tif (vm.opacity === 0) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tvar tooltipSize = {\n\t\t\t\twidth: vm.width,\n\t\t\t\theight: vm.height\n\t\t\t};\n\t\t\tvar pt = {\n\t\t\t\tx: vm.x,\n\t\t\t\ty: vm.y\n\t\t\t};\n\n\t\t\t// IE11/Edge does not like very small opacities, so snap to 0\n\t\t\tvar opacity = Math.abs(vm.opacity < 1e-3) ? 0 : vm.opacity;\n\n\t\t\t// Truthy/falsey value for empty tooltip\n\t\t\tvar hasTooltipContent = vm.title.length || vm.beforeBody.length || vm.body.length || vm.afterBody.length || vm.footer.length;\n\n\t\t\tif (this._options.enabled && hasTooltipContent) {\n\t\t\t\t// Draw Background\n\t\t\t\tthis.drawBackground(pt, vm, ctx, tooltipSize, opacity);\n\n\t\t\t\t// Draw Title, Body, and Footer\n\t\t\t\tpt.x += vm.xPadding;\n\t\t\t\tpt.y += vm.yPadding;\n\n\t\t\t\t// Titles\n\t\t\t\tthis.drawTitle(pt, vm, ctx, opacity);\n\n\t\t\t\t// Body\n\t\t\t\tthis.drawBody(pt, vm, ctx, opacity);\n\n\t\t\t\t// Footer\n\t\t\t\tthis.drawFooter(pt, vm, ctx, opacity);\n\t\t\t}\n\t\t},\n\n\t\t/**\n\t\t * Handle an event\n\t\t * @private\n\t\t * @param {IEvent} event - The event to handle\n\t\t * @returns {Boolean} true if the tooltip changed\n\t\t */\n\t\thandleEvent: function(e) {\n\t\t\tvar me = this;\n\t\t\tvar options = me._options;\n\t\t\tvar changed = false;\n\n\t\t\tme._lastActive = me._lastActive || [];\n\n\t\t\t// Find Active Elements for tooltips\n\t\t\tif (e.type === 'mouseout') {\n\t\t\t\tme._active = [];\n\t\t\t} else {\n\t\t\t\tme._active = me._chart.getElementsAtEventForMode(e, options.mode, options);\n\t\t\t}\n\n\t\t\t// Remember Last Actives\n\t\t\tchanged = !helpers.arrayEquals(me._active, me._lastActive);\n\n\t\t\t// Only handle target event on tooltip change\n\t\t\tif (changed) {\n\t\t\t\tme._lastActive = me._active;\n\n\t\t\t\tif (options.enabled || options.custom) {\n\t\t\t\t\tme._eventPosition = {\n\t\t\t\t\t\tx: e.x,\n\t\t\t\t\t\ty: e.y\n\t\t\t\t\t};\n\n\t\t\t\t\tme.update(true);\n\t\t\t\t\tme.pivot();\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn changed;\n\t\t}\n\t});\n\n\t/**\n\t * @namespace Chart.Tooltip.positioners\n\t */\n\tChart.Tooltip.positioners = {\n\t\t/**\n\t\t * Average mode places the tooltip at the average position of the elements shown\n\t\t * @function Chart.Tooltip.positioners.average\n\t\t * @param elements {ChartElement[]} the elements being displayed in the tooltip\n\t\t * @returns {Point} tooltip position\n\t\t */\n\t\taverage: function(elements) {\n\t\t\tif (!elements.length) {\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tvar i, len;\n\t\t\tvar x = 0;\n\t\t\tvar y = 0;\n\t\t\tvar count = 0;\n\n\t\t\tfor (i = 0, len = elements.length; i < len; ++i) {\n\t\t\t\tvar el = elements[i];\n\t\t\t\tif (el && el.hasValue()) {\n\t\t\t\t\tvar pos = el.tooltipPosition();\n\t\t\t\t\tx += pos.x;\n\t\t\t\t\ty += pos.y;\n\t\t\t\t\t++count;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tx: Math.round(x / count),\n\t\t\t\ty: Math.round(y / count)\n\t\t\t};\n\t\t},\n\n\t\t/**\n\t\t * Gets the tooltip position nearest of the item nearest to the event position\n\t\t * @function Chart.Tooltip.positioners.nearest\n\t\t * @param elements {Chart.Element[]} the tooltip elements\n\t\t * @param eventPosition {Point} the position of the event in canvas coordinates\n\t\t * @returns {Point} the tooltip position\n\t\t */\n\t\tnearest: function(elements, eventPosition) {\n\t\t\tvar x = eventPosition.x;\n\t\t\tvar y = eventPosition.y;\n\t\t\tvar minDistance = Number.POSITIVE_INFINITY;\n\t\t\tvar i, len, nearestElement;\n\n\t\t\tfor (i = 0, len = elements.length; i < len; ++i) {\n\t\t\t\tvar el = elements[i];\n\t\t\t\tif (el && el.hasValue()) {\n\t\t\t\t\tvar center = el.getCenterPoint();\n\t\t\t\t\tvar d = helpers.distanceBetweenPoints(eventPosition, center);\n\n\t\t\t\t\tif (d < minDistance) {\n\t\t\t\t\t\tminDistance = d;\n\t\t\t\t\t\tnearestElement = el;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (nearestElement) {\n\t\t\t\tvar tp = nearestElement.tooltipPosition();\n\t\t\t\tx = tp.x;\n\t\t\t\ty = tp.y;\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tx: x,\n\t\t\t\ty: y\n\t\t\t};\n\t\t}\n\t};\n};\n\n},{\"25\":25,\"26\":26,\"45\":45}],36:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\n\ndefaults._set('global', {\n\telements: {\n\t\tarc: {\n\t\t\tbackgroundColor: defaults.global.defaultColor,\n\t\t\tborderColor: '#fff',\n\t\t\tborderWidth: 2\n\t\t}\n\t}\n});\n\nmodule.exports = Element.extend({\n\tinLabelRange: function(mouseX) {\n\t\tvar vm = this._view;\n\n\t\tif (vm) {\n\t\t\treturn (Math.pow(mouseX - vm.x, 2) < Math.pow(vm.radius + vm.hoverRadius, 2));\n\t\t}\n\t\treturn false;\n\t},\n\n\tinRange: function(chartX, chartY) {\n\t\tvar vm = this._view;\n\n\t\tif (vm) {\n\t\t\tvar pointRelativePosition = helpers.getAngleFromPoint(vm, {x: chartX, y: chartY});\n\t\t\tvar\tangle = pointRelativePosition.angle;\n\t\t\tvar distance = pointRelativePosition.distance;\n\n\t\t\t// Sanitise angle range\n\t\t\tvar startAngle = vm.startAngle;\n\t\t\tvar endAngle = vm.endAngle;\n\t\t\twhile (endAngle < startAngle) {\n\t\t\t\tendAngle += 2.0 * Math.PI;\n\t\t\t}\n\t\t\twhile (angle > endAngle) {\n\t\t\t\tangle -= 2.0 * Math.PI;\n\t\t\t}\n\t\t\twhile (angle < startAngle) {\n\t\t\t\tangle += 2.0 * Math.PI;\n\t\t\t}\n\n\t\t\t// Check if within the range of the open/close angle\n\t\t\tvar betweenAngles = (angle >= startAngle && angle <= endAngle);\n\t\t\tvar withinRadius = (distance >= vm.innerRadius && distance <= vm.outerRadius);\n\n\t\t\treturn (betweenAngles && withinRadius);\n\t\t}\n\t\treturn false;\n\t},\n\n\tgetCenterPoint: function() {\n\t\tvar vm = this._view;\n\t\tvar halfAngle = (vm.startAngle + vm.endAngle) / 2;\n\t\tvar halfRadius = (vm.innerRadius + vm.outerRadius) / 2;\n\t\treturn {\n\t\t\tx: vm.x + Math.cos(halfAngle) * halfRadius,\n\t\t\ty: vm.y + Math.sin(halfAngle) * halfRadius\n\t\t};\n\t},\n\n\tgetArea: function() {\n\t\tvar vm = this._view;\n\t\treturn Math.PI * ((vm.endAngle - vm.startAngle) / (2 * Math.PI)) * (Math.pow(vm.outerRadius, 2) - Math.pow(vm.innerRadius, 2));\n\t},\n\n\ttooltipPosition: function() {\n\t\tvar vm = this._view;\n\t\tvar centreAngle = vm.startAngle + ((vm.endAngle - vm.startAngle) / 2);\n\t\tvar rangeFromCentre = (vm.outerRadius - vm.innerRadius) / 2 + vm.innerRadius;\n\n\t\treturn {\n\t\t\tx: vm.x + (Math.cos(centreAngle) * rangeFromCentre),\n\t\t\ty: vm.y + (Math.sin(centreAngle) * rangeFromCentre)\n\t\t};\n\t},\n\n\tdraw: function() {\n\t\tvar ctx = this._chart.ctx;\n\t\tvar vm = this._view;\n\t\tvar sA = vm.startAngle;\n\t\tvar eA = vm.endAngle;\n\n\t\tctx.beginPath();\n\n\t\tctx.arc(vm.x, vm.y, vm.outerRadius, sA, eA);\n\t\tctx.arc(vm.x, vm.y, vm.innerRadius, eA, sA, true);\n\n\t\tctx.closePath();\n\t\tctx.strokeStyle = vm.borderColor;\n\t\tctx.lineWidth = vm.borderWidth;\n\n\t\tctx.fillStyle = vm.backgroundColor;\n\n\t\tctx.fill();\n\t\tctx.lineJoin = 'bevel';\n\n\t\tif (vm.borderWidth) {\n\t\t\tctx.stroke();\n\t\t}\n\t}\n});\n\n},{\"25\":25,\"26\":26,\"45\":45}],37:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\n\nvar globalDefaults = defaults.global;\n\ndefaults._set('global', {\n\telements: {\n\t\tline: {\n\t\t\ttension: 0.4,\n\t\t\tbackgroundColor: globalDefaults.defaultColor,\n\t\t\tborderWidth: 3,\n\t\t\tborderColor: globalDefaults.defaultColor,\n\t\t\tborderCapStyle: 'butt',\n\t\t\tborderDash: [],\n\t\t\tborderDashOffset: 0.0,\n\t\t\tborderJoinStyle: 'miter',\n\t\t\tcapBezierPoints: true,\n\t\t\tfill: true, // do we fill in the area between the line and its base axis\n\t\t}\n\t}\n});\n\nmodule.exports = Element.extend({\n\tdraw: function() {\n\t\tvar me = this;\n\t\tvar vm = me._view;\n\t\tvar ctx = me._chart.ctx;\n\t\tvar spanGaps = vm.spanGaps;\n\t\tvar points = me._children.slice(); // clone array\n\t\tvar globalOptionLineElements = globalDefaults.elements.line;\n\t\tvar lastDrawnIndex = -1;\n\t\tvar index, current, previous, currentVM;\n\n\t\t// If we are looping, adding the first point again\n\t\tif (me._loop && points.length) {\n\t\t\tpoints.push(points[0]);\n\t\t}\n\n\t\tctx.save();\n\n\t\t// Stroke Line Options\n\t\tctx.lineCap = vm.borderCapStyle || globalOptionLineElements.borderCapStyle;\n\n\t\t// IE 9 and 10 do not support line dash\n\t\tif (ctx.setLineDash) {\n\t\t\tctx.setLineDash(vm.borderDash || globalOptionLineElements.borderDash);\n\t\t}\n\n\t\tctx.lineDashOffset = vm.borderDashOffset || globalOptionLineElements.borderDashOffset;\n\t\tctx.lineJoin = vm.borderJoinStyle || globalOptionLineElements.borderJoinStyle;\n\t\tctx.lineWidth = vm.borderWidth || globalOptionLineElements.borderWidth;\n\t\tctx.strokeStyle = vm.borderColor || globalDefaults.defaultColor;\n\n\t\t// Stroke Line\n\t\tctx.beginPath();\n\t\tlastDrawnIndex = -1;\n\n\t\tfor (index = 0; index < points.length; ++index) {\n\t\t\tcurrent = points[index];\n\t\t\tprevious = helpers.previousItem(points, index);\n\t\t\tcurrentVM = current._view;\n\n\t\t\t// First point moves to it's starting position no matter what\n\t\t\tif (index === 0) {\n\t\t\t\tif (!currentVM.skip) {\n\t\t\t\t\tctx.moveTo(currentVM.x, currentVM.y);\n\t\t\t\t\tlastDrawnIndex = index;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tprevious = lastDrawnIndex === -1 ? previous : points[lastDrawnIndex];\n\n\t\t\t\tif (!currentVM.skip) {\n\t\t\t\t\tif ((lastDrawnIndex !== (index - 1) && !spanGaps) || lastDrawnIndex === -1) {\n\t\t\t\t\t\t// There was a gap and this is the first point after the gap\n\t\t\t\t\t\tctx.moveTo(currentVM.x, currentVM.y);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Line to next point\n\t\t\t\t\t\thelpers.canvas.lineTo(ctx, previous._view, current._view);\n\t\t\t\t\t}\n\t\t\t\t\tlastDrawnIndex = index;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tctx.stroke();\n\t\tctx.restore();\n\t}\n});\n\n},{\"25\":25,\"26\":26,\"45\":45}],38:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\n\nvar defaultColor = defaults.global.defaultColor;\n\ndefaults._set('global', {\n\telements: {\n\t\tpoint: {\n\t\t\tradius: 3,\n\t\t\tpointStyle: 'circle',\n\t\t\tbackgroundColor: defaultColor,\n\t\t\tborderColor: defaultColor,\n\t\t\tborderWidth: 1,\n\t\t\t// Hover\n\t\t\thitRadius: 1,\n\t\t\thoverRadius: 4,\n\t\t\thoverBorderWidth: 1\n\t\t}\n\t}\n});\n\nfunction xRange(mouseX) {\n\tvar vm = this._view;\n\treturn vm ? (Math.abs(mouseX - vm.x) < vm.radius + vm.hitRadius) : false;\n}\n\nfunction yRange(mouseY) {\n\tvar vm = this._view;\n\treturn vm ? (Math.abs(mouseY - vm.y) < vm.radius + vm.hitRadius) : false;\n}\n\nmodule.exports = Element.extend({\n\tinRange: function(mouseX, mouseY) {\n\t\tvar vm = this._view;\n\t\treturn vm ? ((Math.pow(mouseX - vm.x, 2) + Math.pow(mouseY - vm.y, 2)) < Math.pow(vm.hitRadius + vm.radius, 2)) : false;\n\t},\n\n\tinLabelRange: xRange,\n\tinXRange: xRange,\n\tinYRange: yRange,\n\n\tgetCenterPoint: function() {\n\t\tvar vm = this._view;\n\t\treturn {\n\t\t\tx: vm.x,\n\t\t\ty: vm.y\n\t\t};\n\t},\n\n\tgetArea: function() {\n\t\treturn Math.PI * Math.pow(this._view.radius, 2);\n\t},\n\n\ttooltipPosition: function() {\n\t\tvar vm = this._view;\n\t\treturn {\n\t\t\tx: vm.x,\n\t\t\ty: vm.y,\n\t\t\tpadding: vm.radius + vm.borderWidth\n\t\t};\n\t},\n\n\tdraw: function(chartArea) {\n\t\tvar vm = this._view;\n\t\tvar model = this._model;\n\t\tvar ctx = this._chart.ctx;\n\t\tvar pointStyle = vm.pointStyle;\n\t\tvar radius = vm.radius;\n\t\tvar x = vm.x;\n\t\tvar y = vm.y;\n\t\tvar color = helpers.color;\n\t\tvar errMargin = 1.01; // 1.01 is margin for Accumulated error. (Especially Edge, IE.)\n\t\tvar ratio = 0;\n\n\t\tif (vm.skip) {\n\t\t\treturn;\n\t\t}\n\n\t\tctx.strokeStyle = vm.borderColor || defaultColor;\n\t\tctx.lineWidth = helpers.valueOrDefault(vm.borderWidth, defaults.global.elements.point.borderWidth);\n\t\tctx.fillStyle = vm.backgroundColor || defaultColor;\n\n\t\t// Cliping for Points.\n\t\t// going out from inner charArea?\n\t\tif ((chartArea !== undefined) && ((model.x < chartArea.left) || (chartArea.right * errMargin < model.x) || (model.y < chartArea.top) || (chartArea.bottom * errMargin < model.y))) {\n\t\t\t// Point fade out\n\t\t\tif (model.x < chartArea.left) {\n\t\t\t\tratio = (x - model.x) / (chartArea.left - model.x);\n\t\t\t} else if (chartArea.right * errMargin < model.x) {\n\t\t\t\tratio = (model.x - x) / (model.x - chartArea.right);\n\t\t\t} else if (model.y < chartArea.top) {\n\t\t\t\tratio = (y - model.y) / (chartArea.top - model.y);\n\t\t\t} else if (chartArea.bottom * errMargin < model.y) {\n\t\t\t\tratio = (model.y - y) / (model.y - chartArea.bottom);\n\t\t\t}\n\t\t\tratio = Math.round(ratio * 100) / 100;\n\t\t\tctx.strokeStyle = color(ctx.strokeStyle).alpha(ratio).rgbString();\n\t\t\tctx.fillStyle = color(ctx.fillStyle).alpha(ratio).rgbString();\n\t\t}\n\n\t\thelpers.canvas.drawPoint(ctx, pointStyle, radius, x, y);\n\t}\n});\n\n},{\"25\":25,\"26\":26,\"45\":45}],39:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\n\ndefaults._set('global', {\n\telements: {\n\t\trectangle: {\n\t\t\tbackgroundColor: defaults.global.defaultColor,\n\t\t\tborderColor: defaults.global.defaultColor,\n\t\t\tborderSkipped: 'bottom',\n\t\t\tborderWidth: 0\n\t\t}\n\t}\n});\n\nfunction isVertical(bar) {\n\treturn bar._view.width !== undefined;\n}\n\n/**\n * Helper function to get the bounds of the bar regardless of the orientation\n * @param bar {Chart.Element.Rectangle} the bar\n * @return {Bounds} bounds of the bar\n * @private\n */\nfunction getBarBounds(bar) {\n\tvar vm = bar._view;\n\tvar x1, x2, y1, y2;\n\n\tif (isVertical(bar)) {\n\t\t// vertical\n\t\tvar halfWidth = vm.width / 2;\n\t\tx1 = vm.x - halfWidth;\n\t\tx2 = vm.x + halfWidth;\n\t\ty1 = Math.min(vm.y, vm.base);\n\t\ty2 = Math.max(vm.y, vm.base);\n\t} else {\n\t\t// horizontal bar\n\t\tvar halfHeight = vm.height / 2;\n\t\tx1 = Math.min(vm.x, vm.base);\n\t\tx2 = Math.max(vm.x, vm.base);\n\t\ty1 = vm.y - halfHeight;\n\t\ty2 = vm.y + halfHeight;\n\t}\n\n\treturn {\n\t\tleft: x1,\n\t\ttop: y1,\n\t\tright: x2,\n\t\tbottom: y2\n\t};\n}\n\nmodule.exports = Element.extend({\n\tdraw: function() {\n\t\tvar ctx = this._chart.ctx;\n\t\tvar vm = this._view;\n\t\tvar left, right, top, bottom, signX, signY, borderSkipped;\n\t\tvar borderWidth = vm.borderWidth;\n\n\t\tif (!vm.horizontal) {\n\t\t\t// bar\n\t\t\tleft = vm.x - vm.width / 2;\n\t\t\tright = vm.x + vm.width / 2;\n\t\t\ttop = vm.y;\n\t\t\tbottom = vm.base;\n\t\t\tsignX = 1;\n\t\t\tsignY = bottom > top ? 1 : -1;\n\t\t\tborderSkipped = vm.borderSkipped || 'bottom';\n\t\t} else {\n\t\t\t// horizontal bar\n\t\t\tleft = vm.base;\n\t\t\tright = vm.x;\n\t\t\ttop = vm.y - vm.height / 2;\n\t\t\tbottom = vm.y + vm.height / 2;\n\t\t\tsignX = right > left ? 1 : -1;\n\t\t\tsignY = 1;\n\t\t\tborderSkipped = vm.borderSkipped || 'left';\n\t\t}\n\n\t\t// Canvas doesn't allow us to stroke inside the width so we can\n\t\t// adjust the sizes to fit if we're setting a stroke on the line\n\t\tif (borderWidth) {\n\t\t\t// borderWidth shold be less than bar width and bar height.\n\t\t\tvar barSize = Math.min(Math.abs(left - right), Math.abs(top - bottom));\n\t\t\tborderWidth = borderWidth > barSize ? barSize : borderWidth;\n\t\t\tvar halfStroke = borderWidth / 2;\n\t\t\t// Adjust borderWidth when bar top position is near vm.base(zero).\n\t\t\tvar borderLeft = left + (borderSkipped !== 'left' ? halfStroke * signX : 0);\n\t\t\tvar borderRight = right + (borderSkipped !== 'right' ? -halfStroke * signX : 0);\n\t\t\tvar borderTop = top + (borderSkipped !== 'top' ? halfStroke * signY : 0);\n\t\t\tvar borderBottom = bottom + (borderSkipped !== 'bottom' ? -halfStroke * signY : 0);\n\t\t\t// not become a vertical line?\n\t\t\tif (borderLeft !== borderRight) {\n\t\t\t\ttop = borderTop;\n\t\t\t\tbottom = borderBottom;\n\t\t\t}\n\t\t\t// not become a horizontal line?\n\t\t\tif (borderTop !== borderBottom) {\n\t\t\t\tleft = borderLeft;\n\t\t\t\tright = borderRight;\n\t\t\t}\n\t\t}\n\n\t\tctx.beginPath();\n\t\tctx.fillStyle = vm.backgroundColor;\n\t\tctx.strokeStyle = vm.borderColor;\n\t\tctx.lineWidth = borderWidth;\n\n\t\t// Corner points, from bottom-left to bottom-right clockwise\n\t\t// | 1 2 |\n\t\t// | 0 3 |\n\t\tvar corners = [\n\t\t\t[left, bottom],\n\t\t\t[left, top],\n\t\t\t[right, top],\n\t\t\t[right, bottom]\n\t\t];\n\n\t\t// Find first (starting) corner with fallback to 'bottom'\n\t\tvar borders = ['bottom', 'left', 'top', 'right'];\n\t\tvar startCorner = borders.indexOf(borderSkipped, 0);\n\t\tif (startCorner === -1) {\n\t\t\tstartCorner = 0;\n\t\t}\n\n\t\tfunction cornerAt(index) {\n\t\t\treturn corners[(startCorner + index) % 4];\n\t\t}\n\n\t\t// Draw rectangle from 'startCorner'\n\t\tvar corner = cornerAt(0);\n\t\tctx.moveTo(corner[0], corner[1]);\n\n\t\tfor (var i = 1; i < 4; i++) {\n\t\t\tcorner = cornerAt(i);\n\t\t\tctx.lineTo(corner[0], corner[1]);\n\t\t}\n\n\t\tctx.fill();\n\t\tif (borderWidth) {\n\t\t\tctx.stroke();\n\t\t}\n\t},\n\n\theight: function() {\n\t\tvar vm = this._view;\n\t\treturn vm.base - vm.y;\n\t},\n\n\tinRange: function(mouseX, mouseY) {\n\t\tvar inRange = false;\n\n\t\tif (this._view) {\n\t\t\tvar bounds = getBarBounds(this);\n\t\t\tinRange = mouseX >= bounds.left && mouseX <= bounds.right && mouseY >= bounds.top && mouseY <= bounds.bottom;\n\t\t}\n\n\t\treturn inRange;\n\t},\n\n\tinLabelRange: function(mouseX, mouseY) {\n\t\tvar me = this;\n\t\tif (!me._view) {\n\t\t\treturn false;\n\t\t}\n\n\t\tvar inRange = false;\n\t\tvar bounds = getBarBounds(me);\n\n\t\tif (isVertical(me)) {\n\t\t\tinRange = mouseX >= bounds.left && mouseX <= bounds.right;\n\t\t} else {\n\t\t\tinRange = mouseY >= bounds.top && mouseY <= bounds.bottom;\n\t\t}\n\n\t\treturn inRange;\n\t},\n\n\tinXRange: function(mouseX) {\n\t\tvar bounds = getBarBounds(this);\n\t\treturn mouseX >= bounds.left && mouseX <= bounds.right;\n\t},\n\n\tinYRange: function(mouseY) {\n\t\tvar bounds = getBarBounds(this);\n\t\treturn mouseY >= bounds.top && mouseY <= bounds.bottom;\n\t},\n\n\tgetCenterPoint: function() {\n\t\tvar vm = this._view;\n\t\tvar x, y;\n\t\tif (isVertical(this)) {\n\t\t\tx = vm.x;\n\t\t\ty = (vm.y + vm.base) / 2;\n\t\t} else {\n\t\t\tx = (vm.x + vm.base) / 2;\n\t\t\ty = vm.y;\n\t\t}\n\n\t\treturn {x: x, y: y};\n\t},\n\n\tgetArea: function() {\n\t\tvar vm = this._view;\n\t\treturn vm.width * Math.abs(vm.y - vm.base);\n\t},\n\n\ttooltipPosition: function() {\n\t\tvar vm = this._view;\n\t\treturn {\n\t\t\tx: vm.x,\n\t\t\ty: vm.y\n\t\t};\n\t}\n});\n\n},{\"25\":25,\"26\":26}],40:[function(require,module,exports){\n'use strict';\n\nmodule.exports = {};\nmodule.exports.Arc = require(36);\nmodule.exports.Line = require(37);\nmodule.exports.Point = require(38);\nmodule.exports.Rectangle = require(39);\n\n},{\"36\":36,\"37\":37,\"38\":38,\"39\":39}],41:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(42);\n\n/**\n * @namespace Chart.helpers.canvas\n */\nvar exports = module.exports = {\n\t/**\n\t * Clears the entire canvas associated to the given `chart`.\n\t * @param {Chart} chart - The chart for which to clear the canvas.\n\t */\n\tclear: function(chart) {\n\t\tchart.ctx.clearRect(0, 0, chart.width, chart.height);\n\t},\n\n\t/**\n\t * Creates a \"path\" for a rectangle with rounded corners at position (x, y) with a\n\t * given size (width, height) and the same `radius` for all corners.\n\t * @param {CanvasRenderingContext2D} ctx - The canvas 2D Context.\n\t * @param {Number} x - The x axis of the coordinate for the rectangle starting point.\n\t * @param {Number} y - The y axis of the coordinate for the rectangle starting point.\n\t * @param {Number} width - The rectangle's width.\n\t * @param {Number} height - The rectangle's height.\n\t * @param {Number} radius - The rounded amount (in pixels) for the four corners.\n\t * @todo handle `radius` as top-left, top-right, bottom-right, bottom-left array/object?\n\t */\n\troundedRect: function(ctx, x, y, width, height, radius) {\n\t\tif (radius) {\n\t\t\tvar rx = Math.min(radius, width / 2);\n\t\t\tvar ry = Math.min(radius, height / 2);\n\n\t\t\tctx.moveTo(x + rx, y);\n\t\t\tctx.lineTo(x + width - rx, y);\n\t\t\tctx.quadraticCurveTo(x + width, y, x + width, y + ry);\n\t\t\tctx.lineTo(x + width, y + height - ry);\n\t\t\tctx.quadraticCurveTo(x + width, y + height, x + width - rx, y + height);\n\t\t\tctx.lineTo(x + rx, y + height);\n\t\t\tctx.quadraticCurveTo(x, y + height, x, y + height - ry);\n\t\t\tctx.lineTo(x, y + ry);\n\t\t\tctx.quadraticCurveTo(x, y, x + rx, y);\n\t\t} else {\n\t\t\tctx.rect(x, y, width, height);\n\t\t}\n\t},\n\n\tdrawPoint: function(ctx, style, radius, x, y) {\n\t\tvar type, edgeLength, xOffset, yOffset, height, size;\n\n\t\tif (style && typeof style === 'object') {\n\t\t\ttype = style.toString();\n\t\t\tif (type === '[object HTMLImageElement]' || type === '[object HTMLCanvasElement]') {\n\t\t\t\tctx.drawImage(style, x - style.width / 2, y - style.height / 2, style.width, style.height);\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\n\t\tif (isNaN(radius) || radius <= 0) {\n\t\t\treturn;\n\t\t}\n\n\t\tswitch (style) {\n\t\t// Default includes circle\n\t\tdefault:\n\t\t\tctx.beginPath();\n\t\t\tctx.arc(x, y, radius, 0, Math.PI * 2);\n\t\t\tctx.closePath();\n\t\t\tctx.fill();\n\t\t\tbreak;\n\t\tcase 'triangle':\n\t\t\tctx.beginPath();\n\t\t\tedgeLength = 3 * radius / Math.sqrt(3);\n\t\t\theight = edgeLength * Math.sqrt(3) / 2;\n\t\t\tctx.moveTo(x - edgeLength / 2, y + height / 3);\n\t\t\tctx.lineTo(x + edgeLength / 2, y + height / 3);\n\t\t\tctx.lineTo(x, y - 2 * height / 3);\n\t\t\tctx.closePath();\n\t\t\tctx.fill();\n\t\t\tbreak;\n\t\tcase 'rect':\n\t\t\tsize = 1 / Math.SQRT2 * radius;\n\t\t\tctx.beginPath();\n\t\t\tctx.fillRect(x - size, y - size, 2 * size, 2 * size);\n\t\t\tctx.strokeRect(x - size, y - size, 2 * size, 2 * size);\n\t\t\tbreak;\n\t\tcase 'rectRounded':\n\t\t\tvar offset = radius / Math.SQRT2;\n\t\t\tvar leftX = x - offset;\n\t\t\tvar topY = y - offset;\n\t\t\tvar sideSize = Math.SQRT2 * radius;\n\t\t\tctx.beginPath();\n\t\t\tthis.roundedRect(ctx, leftX, topY, sideSize, sideSize, radius / 2);\n\t\t\tctx.closePath();\n\t\t\tctx.fill();\n\t\t\tbreak;\n\t\tcase 'rectRot':\n\t\t\tsize = 1 / Math.SQRT2 * radius;\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x - size, y);\n\t\t\tctx.lineTo(x, y + size);\n\t\t\tctx.lineTo(x + size, y);\n\t\t\tctx.lineTo(x, y - size);\n\t\t\tctx.closePath();\n\t\t\tctx.fill();\n\t\t\tbreak;\n\t\tcase 'cross':\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x, y + radius);\n\t\t\tctx.lineTo(x, y - radius);\n\t\t\tctx.moveTo(x - radius, y);\n\t\t\tctx.lineTo(x + radius, y);\n\t\t\tctx.closePath();\n\t\t\tbreak;\n\t\tcase 'crossRot':\n\t\t\tctx.beginPath();\n\t\t\txOffset = Math.cos(Math.PI / 4) * radius;\n\t\t\tyOffset = Math.sin(Math.PI / 4) * radius;\n\t\t\tctx.moveTo(x - xOffset, y - yOffset);\n\t\t\tctx.lineTo(x + xOffset, y + yOffset);\n\t\t\tctx.moveTo(x - xOffset, y + yOffset);\n\t\t\tctx.lineTo(x + xOffset, y - yOffset);\n\t\t\tctx.closePath();\n\t\t\tbreak;\n\t\tcase 'star':\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x, y + radius);\n\t\t\tctx.lineTo(x, y - radius);\n\t\t\tctx.moveTo(x - radius, y);\n\t\t\tctx.lineTo(x + radius, y);\n\t\t\txOffset = Math.cos(Math.PI / 4) * radius;\n\t\t\tyOffset = Math.sin(Math.PI / 4) * radius;\n\t\t\tctx.moveTo(x - xOffset, y - yOffset);\n\t\t\tctx.lineTo(x + xOffset, y + yOffset);\n\t\t\tctx.moveTo(x - xOffset, y + yOffset);\n\t\t\tctx.lineTo(x + xOffset, y - yOffset);\n\t\t\tctx.closePath();\n\t\t\tbreak;\n\t\tcase 'line':\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x - radius, y);\n\t\t\tctx.lineTo(x + radius, y);\n\t\t\tctx.closePath();\n\t\t\tbreak;\n\t\tcase 'dash':\n\t\t\tctx.beginPath();\n\t\t\tctx.moveTo(x, y);\n\t\t\tctx.lineTo(x + radius, y);\n\t\t\tctx.closePath();\n\t\t\tbreak;\n\t\t}\n\n\t\tctx.stroke();\n\t},\n\n\tclipArea: function(ctx, area) {\n\t\tctx.save();\n\t\tctx.beginPath();\n\t\tctx.rect(area.left, area.top, area.right - area.left, area.bottom - area.top);\n\t\tctx.clip();\n\t},\n\n\tunclipArea: function(ctx) {\n\t\tctx.restore();\n\t},\n\n\tlineTo: function(ctx, previous, target, flip) {\n\t\tif (target.steppedLine) {\n\t\t\tif ((target.steppedLine === 'after' && !flip) || (target.steppedLine !== 'after' && flip)) {\n\t\t\t\tctx.lineTo(previous.x, target.y);\n\t\t\t} else {\n\t\t\t\tctx.lineTo(target.x, previous.y);\n\t\t\t}\n\t\t\tctx.lineTo(target.x, target.y);\n\t\t\treturn;\n\t\t}\n\n\t\tif (!target.tension) {\n\t\t\tctx.lineTo(target.x, target.y);\n\t\t\treturn;\n\t\t}\n\n\t\tctx.bezierCurveTo(\n\t\t\tflip ? previous.controlPointPreviousX : previous.controlPointNextX,\n\t\t\tflip ? previous.controlPointPreviousY : previous.controlPointNextY,\n\t\t\tflip ? target.controlPointNextX : target.controlPointPreviousX,\n\t\t\tflip ? target.controlPointNextY : target.controlPointPreviousY,\n\t\t\ttarget.x,\n\t\t\ttarget.y);\n\t}\n};\n\n// DEPRECATIONS\n\n/**\n * Provided for backward compatibility, use Chart.helpers.canvas.clear instead.\n * @namespace Chart.helpers.clear\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.clear = exports.clear;\n\n/**\n * Provided for backward compatibility, use Chart.helpers.canvas.roundedRect instead.\n * @namespace Chart.helpers.drawRoundedRectangle\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.drawRoundedRectangle = function(ctx) {\n\tctx.beginPath();\n\texports.roundedRect.apply(exports, arguments);\n\tctx.closePath();\n};\n\n},{\"42\":42}],42:[function(require,module,exports){\n'use strict';\n\n/**\n * @namespace Chart.helpers\n */\nvar helpers = {\n\t/**\n\t * An empty function that can be used, for example, for optional callback.\n\t */\n\tnoop: function() {},\n\n\t/**\n\t * Returns a unique id, sequentially generated from a global variable.\n\t * @returns {Number}\n\t * @function\n\t */\n\tuid: (function() {\n\t\tvar id = 0;\n\t\treturn function() {\n\t\t\treturn id++;\n\t\t};\n\t}()),\n\n\t/**\n\t * Returns true if `value` is neither null nor undefined, else returns false.\n\t * @param {*} value - The value to test.\n\t * @returns {Boolean}\n\t * @since 2.7.0\n\t */\n\tisNullOrUndef: function(value) {\n\t\treturn value === null || typeof value === 'undefined';\n\t},\n\n\t/**\n\t * Returns true if `value` is an array, else returns false.\n\t * @param {*} value - The value to test.\n\t * @returns {Boolean}\n\t * @function\n\t */\n\tisArray: Array.isArray ? Array.isArray : function(value) {\n\t\treturn Object.prototype.toString.call(value) === '[object Array]';\n\t},\n\n\t/**\n\t * Returns true if `value` is an object (excluding null), else returns false.\n\t * @param {*} value - The value to test.\n\t * @returns {Boolean}\n\t * @since 2.7.0\n\t */\n\tisObject: function(value) {\n\t\treturn value !== null && Object.prototype.toString.call(value) === '[object Object]';\n\t},\n\n\t/**\n\t * Returns `value` if defined, else returns `defaultValue`.\n\t * @param {*} value - The value to return if defined.\n\t * @param {*} defaultValue - The value to return if `value` is undefined.\n\t * @returns {*}\n\t */\n\tvalueOrDefault: function(value, defaultValue) {\n\t\treturn typeof value === 'undefined' ? defaultValue : value;\n\t},\n\n\t/**\n\t * Returns value at the given `index` in array if defined, else returns `defaultValue`.\n\t * @param {Array} value - The array to lookup for value at `index`.\n\t * @param {Number} index - The index in `value` to lookup for value.\n\t * @param {*} defaultValue - The value to return if `value[index]` is undefined.\n\t * @returns {*}\n\t */\n\tvalueAtIndexOrDefault: function(value, index, defaultValue) {\n\t\treturn helpers.valueOrDefault(helpers.isArray(value) ? value[index] : value, defaultValue);\n\t},\n\n\t/**\n\t * Calls `fn` with the given `args` in the scope defined by `thisArg` and returns the\n\t * value returned by `fn`. If `fn` is not a function, this method returns undefined.\n\t * @param {Function} fn - The function to call.\n\t * @param {Array|undefined|null} args - The arguments with which `fn` should be called.\n\t * @param {Object} [thisArg] - The value of `this` provided for the call to `fn`.\n\t * @returns {*}\n\t */\n\tcallback: function(fn, args, thisArg) {\n\t\tif (fn && typeof fn.call === 'function') {\n\t\t\treturn fn.apply(thisArg, args);\n\t\t}\n\t},\n\n\t/**\n\t * Note(SB) for performance sake, this method should only be used when loopable type\n\t * is unknown or in none intensive code (not called often and small loopable). Else\n\t * it's preferable to use a regular for() loop and save extra function calls.\n\t * @param {Object|Array} loopable - The object or array to be iterated.\n\t * @param {Function} fn - The function to call for each item.\n\t * @param {Object} [thisArg] - The value of `this` provided for the call to `fn`.\n\t * @param {Boolean} [reverse] - If true, iterates backward on the loopable.\n\t */\n\teach: function(loopable, fn, thisArg, reverse) {\n\t\tvar i, len, keys;\n\t\tif (helpers.isArray(loopable)) {\n\t\t\tlen = loopable.length;\n\t\t\tif (reverse) {\n\t\t\t\tfor (i = len - 1; i >= 0; i--) {\n\t\t\t\t\tfn.call(thisArg, loopable[i], i);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor (i = 0; i < len; i++) {\n\t\t\t\t\tfn.call(thisArg, loopable[i], i);\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (helpers.isObject(loopable)) {\n\t\t\tkeys = Object.keys(loopable);\n\t\t\tlen = keys.length;\n\t\t\tfor (i = 0; i < len; i++) {\n\t\t\t\tfn.call(thisArg, loopable[keys[i]], keys[i]);\n\t\t\t}\n\t\t}\n\t},\n\n\t/**\n\t * Returns true if the `a0` and `a1` arrays have the same content, else returns false.\n\t * @see http://stackoverflow.com/a/14853974\n\t * @param {Array} a0 - The array to compare\n\t * @param {Array} a1 - The array to compare\n\t * @returns {Boolean}\n\t */\n\tarrayEquals: function(a0, a1) {\n\t\tvar i, ilen, v0, v1;\n\n\t\tif (!a0 || !a1 || a0.length !== a1.length) {\n\t\t\treturn false;\n\t\t}\n\n\t\tfor (i = 0, ilen = a0.length; i < ilen; ++i) {\n\t\t\tv0 = a0[i];\n\t\t\tv1 = a1[i];\n\n\t\t\tif (v0 instanceof Array && v1 instanceof Array) {\n\t\t\t\tif (!helpers.arrayEquals(v0, v1)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t} else if (v0 !== v1) {\n\t\t\t\t// NOTE: two different object instances will never be equal: {x:20} != {x:20}\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\n\t\treturn true;\n\t},\n\n\t/**\n\t * Returns a deep copy of `source` without keeping references on objects and arrays.\n\t * @param {*} source - The value to clone.\n\t * @returns {*}\n\t */\n\tclone: function(source) {\n\t\tif (helpers.isArray(source)) {\n\t\t\treturn source.map(helpers.clone);\n\t\t}\n\n\t\tif (helpers.isObject(source)) {\n\t\t\tvar target = {};\n\t\t\tvar keys = Object.keys(source);\n\t\t\tvar klen = keys.length;\n\t\t\tvar k = 0;\n\n\t\t\tfor (; k < klen; ++k) {\n\t\t\t\ttarget[keys[k]] = helpers.clone(source[keys[k]]);\n\t\t\t}\n\n\t\t\treturn target;\n\t\t}\n\n\t\treturn source;\n\t},\n\n\t/**\n\t * The default merger when Chart.helpers.merge is called without merger option.\n\t * Note(SB): this method is also used by configMerge and scaleMerge as fallback.\n\t * @private\n\t */\n\t_merger: function(key, target, source, options) {\n\t\tvar tval = target[key];\n\t\tvar sval = source[key];\n\n\t\tif (helpers.isObject(tval) && helpers.isObject(sval)) {\n\t\t\thelpers.merge(tval, sval, options);\n\t\t} else {\n\t\t\ttarget[key] = helpers.clone(sval);\n\t\t}\n\t},\n\n\t/**\n\t * Merges source[key] in target[key] only if target[key] is undefined.\n\t * @private\n\t */\n\t_mergerIf: function(key, target, source) {\n\t\tvar tval = target[key];\n\t\tvar sval = source[key];\n\n\t\tif (helpers.isObject(tval) && helpers.isObject(sval)) {\n\t\t\thelpers.mergeIf(tval, sval);\n\t\t} else if (!target.hasOwnProperty(key)) {\n\t\t\ttarget[key] = helpers.clone(sval);\n\t\t}\n\t},\n\n\t/**\n\t * Recursively deep copies `source` properties into `target` with the given `options`.\n\t * IMPORTANT: `target` is not cloned and will be updated with `source` properties.\n\t * @param {Object} target - The target object in which all sources are merged into.\n\t * @param {Object|Array(Object)} source - Object(s) to merge into `target`.\n\t * @param {Object} [options] - Merging options:\n\t * @param {Function} [options.merger] - The merge method (key, target, source, options)\n\t * @returns {Object} The `target` object.\n\t */\n\tmerge: function(target, source, options) {\n\t\tvar sources = helpers.isArray(source) ? source : [source];\n\t\tvar ilen = sources.length;\n\t\tvar merge, i, keys, klen, k;\n\n\t\tif (!helpers.isObject(target)) {\n\t\t\treturn target;\n\t\t}\n\n\t\toptions = options || {};\n\t\tmerge = options.merger || helpers._merger;\n\n\t\tfor (i = 0; i < ilen; ++i) {\n\t\t\tsource = sources[i];\n\t\t\tif (!helpers.isObject(source)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tkeys = Object.keys(source);\n\t\t\tfor (k = 0, klen = keys.length; k < klen; ++k) {\n\t\t\t\tmerge(keys[k], target, source, options);\n\t\t\t}\n\t\t}\n\n\t\treturn target;\n\t},\n\n\t/**\n\t * Recursively deep copies `source` properties into `target` *only* if not defined in target.\n\t * IMPORTANT: `target` is not cloned and will be updated with `source` properties.\n\t * @param {Object} target - The target object in which all sources are merged into.\n\t * @param {Object|Array(Object)} source - Object(s) to merge into `target`.\n\t * @returns {Object} The `target` object.\n\t */\n\tmergeIf: function(target, source) {\n\t\treturn helpers.merge(target, source, {merger: helpers._mergerIf});\n\t},\n\n\t/**\n\t * Applies the contents of two or more objects together into the first object.\n\t * @param {Object} target - The target object in which all objects are merged into.\n\t * @param {Object} arg1 - Object containing additional properties to merge in target.\n\t * @param {Object} argN - Additional objects containing properties to merge in target.\n\t * @returns {Object} The `target` object.\n\t */\n\textend: function(target) {\n\t\tvar setFn = function(value, key) {\n\t\t\ttarget[key] = value;\n\t\t};\n\t\tfor (var i = 1, ilen = arguments.length; i < ilen; ++i) {\n\t\t\thelpers.each(arguments[i], setFn);\n\t\t}\n\t\treturn target;\n\t},\n\n\t/**\n\t * Basic javascript inheritance based on the model created in Backbone.js\n\t */\n\tinherits: function(extensions) {\n\t\tvar me = this;\n\t\tvar ChartElement = (extensions && extensions.hasOwnProperty('constructor')) ? extensions.constructor : function() {\n\t\t\treturn me.apply(this, arguments);\n\t\t};\n\n\t\tvar Surrogate = function() {\n\t\t\tthis.constructor = ChartElement;\n\t\t};\n\n\t\tSurrogate.prototype = me.prototype;\n\t\tChartElement.prototype = new Surrogate();\n\t\tChartElement.extend = helpers.inherits;\n\n\t\tif (extensions) {\n\t\t\thelpers.extend(ChartElement.prototype, extensions);\n\t\t}\n\n\t\tChartElement.__super__ = me.prototype;\n\t\treturn ChartElement;\n\t}\n};\n\nmodule.exports = helpers;\n\n// DEPRECATIONS\n\n/**\n * Provided for backward compatibility, use Chart.helpers.callback instead.\n * @function Chart.helpers.callCallback\n * @deprecated since version 2.6.0\n * @todo remove at version 3\n * @private\n */\nhelpers.callCallback = helpers.callback;\n\n/**\n * Provided for backward compatibility, use Array.prototype.indexOf instead.\n * Array.prototype.indexOf compatibility: Chrome, Opera, Safari, FF1.5+, IE9+\n * @function Chart.helpers.indexOf\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.indexOf = function(array, item, fromIndex) {\n\treturn Array.prototype.indexOf.call(array, item, fromIndex);\n};\n\n/**\n * Provided for backward compatibility, use Chart.helpers.valueOrDefault instead.\n * @function Chart.helpers.getValueOrDefault\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.getValueOrDefault = helpers.valueOrDefault;\n\n/**\n * Provided for backward compatibility, use Chart.helpers.valueAtIndexOrDefault instead.\n * @function Chart.helpers.getValueAtIndexOrDefault\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.getValueAtIndexOrDefault = helpers.valueAtIndexOrDefault;\n\n},{}],43:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(42);\n\n/**\n * Easing functions adapted from Robert Penner's easing equations.\n * @namespace Chart.helpers.easingEffects\n * @see http://www.robertpenner.com/easing/\n */\nvar effects = {\n\tlinear: function(t) {\n\t\treturn t;\n\t},\n\n\teaseInQuad: function(t) {\n\t\treturn t * t;\n\t},\n\n\teaseOutQuad: function(t) {\n\t\treturn -t * (t - 2);\n\t},\n\n\teaseInOutQuad: function(t) {\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * t * t;\n\t\t}\n\t\treturn -0.5 * ((--t) * (t - 2) - 1);\n\t},\n\n\teaseInCubic: function(t) {\n\t\treturn t * t * t;\n\t},\n\n\teaseOutCubic: function(t) {\n\t\treturn (t = t - 1) * t * t + 1;\n\t},\n\n\teaseInOutCubic: function(t) {\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * t * t * t;\n\t\t}\n\t\treturn 0.5 * ((t -= 2) * t * t + 2);\n\t},\n\n\teaseInQuart: function(t) {\n\t\treturn t * t * t * t;\n\t},\n\n\teaseOutQuart: function(t) {\n\t\treturn -((t = t - 1) * t * t * t - 1);\n\t},\n\n\teaseInOutQuart: function(t) {\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * t * t * t * t;\n\t\t}\n\t\treturn -0.5 * ((t -= 2) * t * t * t - 2);\n\t},\n\n\teaseInQuint: function(t) {\n\t\treturn t * t * t * t * t;\n\t},\n\n\teaseOutQuint: function(t) {\n\t\treturn (t = t - 1) * t * t * t * t + 1;\n\t},\n\n\teaseInOutQuint: function(t) {\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * t * t * t * t * t;\n\t\t}\n\t\treturn 0.5 * ((t -= 2) * t * t * t * t + 2);\n\t},\n\n\teaseInSine: function(t) {\n\t\treturn -Math.cos(t * (Math.PI / 2)) + 1;\n\t},\n\n\teaseOutSine: function(t) {\n\t\treturn Math.sin(t * (Math.PI / 2));\n\t},\n\n\teaseInOutSine: function(t) {\n\t\treturn -0.5 * (Math.cos(Math.PI * t) - 1);\n\t},\n\n\teaseInExpo: function(t) {\n\t\treturn (t === 0) ? 0 : Math.pow(2, 10 * (t - 1));\n\t},\n\n\teaseOutExpo: function(t) {\n\t\treturn (t === 1) ? 1 : -Math.pow(2, -10 * t) + 1;\n\t},\n\n\teaseInOutExpo: function(t) {\n\t\tif (t === 0) {\n\t\t\treturn 0;\n\t\t}\n\t\tif (t === 1) {\n\t\t\treturn 1;\n\t\t}\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * Math.pow(2, 10 * (t - 1));\n\t\t}\n\t\treturn 0.5 * (-Math.pow(2, -10 * --t) + 2);\n\t},\n\n\teaseInCirc: function(t) {\n\t\tif (t >= 1) {\n\t\t\treturn t;\n\t\t}\n\t\treturn -(Math.sqrt(1 - t * t) - 1);\n\t},\n\n\teaseOutCirc: function(t) {\n\t\treturn Math.sqrt(1 - (t = t - 1) * t);\n\t},\n\n\teaseInOutCirc: function(t) {\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn -0.5 * (Math.sqrt(1 - t * t) - 1);\n\t\t}\n\t\treturn 0.5 * (Math.sqrt(1 - (t -= 2) * t) + 1);\n\t},\n\n\teaseInElastic: function(t) {\n\t\tvar s = 1.70158;\n\t\tvar p = 0;\n\t\tvar a = 1;\n\t\tif (t === 0) {\n\t\t\treturn 0;\n\t\t}\n\t\tif (t === 1) {\n\t\t\treturn 1;\n\t\t}\n\t\tif (!p) {\n\t\t\tp = 0.3;\n\t\t}\n\t\tif (a < 1) {\n\t\t\ta = 1;\n\t\t\ts = p / 4;\n\t\t} else {\n\t\t\ts = p / (2 * Math.PI) * Math.asin(1 / a);\n\t\t}\n\t\treturn -(a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t - s) * (2 * Math.PI) / p));\n\t},\n\n\teaseOutElastic: function(t) {\n\t\tvar s = 1.70158;\n\t\tvar p = 0;\n\t\tvar a = 1;\n\t\tif (t === 0) {\n\t\t\treturn 0;\n\t\t}\n\t\tif (t === 1) {\n\t\t\treturn 1;\n\t\t}\n\t\tif (!p) {\n\t\t\tp = 0.3;\n\t\t}\n\t\tif (a < 1) {\n\t\t\ta = 1;\n\t\t\ts = p / 4;\n\t\t} else {\n\t\t\ts = p / (2 * Math.PI) * Math.asin(1 / a);\n\t\t}\n\t\treturn a * Math.pow(2, -10 * t) * Math.sin((t - s) * (2 * Math.PI) / p) + 1;\n\t},\n\n\teaseInOutElastic: function(t) {\n\t\tvar s = 1.70158;\n\t\tvar p = 0;\n\t\tvar a = 1;\n\t\tif (t === 0) {\n\t\t\treturn 0;\n\t\t}\n\t\tif ((t /= 0.5) === 2) {\n\t\t\treturn 1;\n\t\t}\n\t\tif (!p) {\n\t\t\tp = 0.45;\n\t\t}\n\t\tif (a < 1) {\n\t\t\ta = 1;\n\t\t\ts = p / 4;\n\t\t} else {\n\t\t\ts = p / (2 * Math.PI) * Math.asin(1 / a);\n\t\t}\n\t\tif (t < 1) {\n\t\t\treturn -0.5 * (a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t - s) * (2 * Math.PI) / p));\n\t\t}\n\t\treturn a * Math.pow(2, -10 * (t -= 1)) * Math.sin((t - s) * (2 * Math.PI) / p) * 0.5 + 1;\n\t},\n\teaseInBack: function(t) {\n\t\tvar s = 1.70158;\n\t\treturn t * t * ((s + 1) * t - s);\n\t},\n\n\teaseOutBack: function(t) {\n\t\tvar s = 1.70158;\n\t\treturn (t = t - 1) * t * ((s + 1) * t + s) + 1;\n\t},\n\n\teaseInOutBack: function(t) {\n\t\tvar s = 1.70158;\n\t\tif ((t /= 0.5) < 1) {\n\t\t\treturn 0.5 * (t * t * (((s *= (1.525)) + 1) * t - s));\n\t\t}\n\t\treturn 0.5 * ((t -= 2) * t * (((s *= (1.525)) + 1) * t + s) + 2);\n\t},\n\n\teaseInBounce: function(t) {\n\t\treturn 1 - effects.easeOutBounce(1 - t);\n\t},\n\n\teaseOutBounce: function(t) {\n\t\tif (t < (1 / 2.75)) {\n\t\t\treturn 7.5625 * t * t;\n\t\t}\n\t\tif (t < (2 / 2.75)) {\n\t\t\treturn 7.5625 * (t -= (1.5 / 2.75)) * t + 0.75;\n\t\t}\n\t\tif (t < (2.5 / 2.75)) {\n\t\t\treturn 7.5625 * (t -= (2.25 / 2.75)) * t + 0.9375;\n\t\t}\n\t\treturn 7.5625 * (t -= (2.625 / 2.75)) * t + 0.984375;\n\t},\n\n\teaseInOutBounce: function(t) {\n\t\tif (t < 0.5) {\n\t\t\treturn effects.easeInBounce(t * 2) * 0.5;\n\t\t}\n\t\treturn effects.easeOutBounce(t * 2 - 1) * 0.5 + 0.5;\n\t}\n};\n\nmodule.exports = {\n\teffects: effects\n};\n\n// DEPRECATIONS\n\n/**\n * Provided for backward compatibility, use Chart.helpers.easing.effects instead.\n * @function Chart.helpers.easingEffects\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.easingEffects = effects;\n\n},{\"42\":42}],44:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(42);\n\n/**\n * @alias Chart.helpers.options\n * @namespace\n */\nmodule.exports = {\n\t/**\n\t * Converts the given line height `value` in pixels for a specific font `size`.\n\t * @param {Number|String} value - The lineHeight to parse (eg. 1.6, '14px', '75%', '1.6em').\n\t * @param {Number} size - The font size (in pixels) used to resolve relative `value`.\n\t * @returns {Number} The effective line height in pixels (size * 1.2 if value is invalid).\n\t * @see https://developer.mozilla.org/en-US/docs/Web/CSS/line-height\n\t * @since 2.7.0\n\t */\n\ttoLineHeight: function(value, size) {\n\t\tvar matches = ('' + value).match(/^(normal|(\\d+(?:\\.\\d+)?)(px|em|%)?)$/);\n\t\tif (!matches || matches[1] === 'normal') {\n\t\t\treturn size * 1.2;\n\t\t}\n\n\t\tvalue = +matches[2];\n\n\t\tswitch (matches[3]) {\n\t\tcase 'px':\n\t\t\treturn value;\n\t\tcase '%':\n\t\t\tvalue /= 100;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t\t}\n\n\t\treturn size * value;\n\t},\n\n\t/**\n\t * Converts the given value into a padding object with pre-computed width/height.\n\t * @param {Number|Object} value - If a number, set the value to all TRBL component,\n\t *  else, if and object, use defined properties and sets undefined ones to 0.\n\t * @returns {Object} The padding values (top, right, bottom, left, width, height)\n\t * @since 2.7.0\n\t */\n\ttoPadding: function(value) {\n\t\tvar t, r, b, l;\n\n\t\tif (helpers.isObject(value)) {\n\t\t\tt = +value.top || 0;\n\t\t\tr = +value.right || 0;\n\t\t\tb = +value.bottom || 0;\n\t\t\tl = +value.left || 0;\n\t\t} else {\n\t\t\tt = r = b = l = +value || 0;\n\t\t}\n\n\t\treturn {\n\t\t\ttop: t,\n\t\t\tright: r,\n\t\t\tbottom: b,\n\t\t\tleft: l,\n\t\t\theight: t + b,\n\t\t\twidth: l + r\n\t\t};\n\t},\n\n\t/**\n\t * Evaluates the given `inputs` sequentially and returns the first defined value.\n\t * @param {Array[]} inputs - An array of values, falling back to the last value.\n\t * @param {Object} [context] - If defined and the current value is a function, the value\n\t * is called with `context` as first argument and the result becomes the new input.\n\t * @param {Number} [index] - If defined and the current value is an array, the value\n\t * at `index` become the new input.\n\t * @since 2.7.0\n\t */\n\tresolve: function(inputs, context, index) {\n\t\tvar i, ilen, value;\n\n\t\tfor (i = 0, ilen = inputs.length; i < ilen; ++i) {\n\t\t\tvalue = inputs[i];\n\t\t\tif (value === undefined) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (context !== undefined && typeof value === 'function') {\n\t\t\t\tvalue = value(context);\n\t\t\t}\n\t\t\tif (index !== undefined && helpers.isArray(value)) {\n\t\t\t\tvalue = value[index];\n\t\t\t}\n\t\t\tif (value !== undefined) {\n\t\t\t\treturn value;\n\t\t\t}\n\t\t}\n\t}\n};\n\n},{\"42\":42}],45:[function(require,module,exports){\n'use strict';\n\nmodule.exports = require(42);\nmodule.exports.easing = require(43);\nmodule.exports.canvas = require(41);\nmodule.exports.options = require(44);\n\n},{\"41\":41,\"42\":42,\"43\":43,\"44\":44}],46:[function(require,module,exports){\n/**\n * Platform fallback implementation (minimal).\n * @see https://github.com/chartjs/Chart.js/pull/4591#issuecomment-319575939\n */\n\nmodule.exports = {\n\tacquireContext: function(item) {\n\t\tif (item && item.canvas) {\n\t\t\t// Support for any object associated to a canvas (including a context2d)\n\t\t\titem = item.canvas;\n\t\t}\n\n\t\treturn item && item.getContext('2d') || null;\n\t}\n};\n\n},{}],47:[function(require,module,exports){\n/**\n * Chart.Platform implementation for targeting a web browser\n */\n\n'use strict';\n\nvar helpers = require(45);\n\nvar EXPANDO_KEY = '$chartjs';\nvar CSS_PREFIX = 'chartjs-';\nvar CSS_RENDER_MONITOR = CSS_PREFIX + 'render-monitor';\nvar CSS_RENDER_ANIMATION = CSS_PREFIX + 'render-animation';\nvar ANIMATION_START_EVENTS = ['animationstart', 'webkitAnimationStart'];\n\n/**\n * DOM event types -> Chart.js event types.\n * Note: only events with different types are mapped.\n * @see https://developer.mozilla.org/en-US/docs/Web/Events\n */\nvar EVENT_TYPES = {\n\ttouchstart: 'mousedown',\n\ttouchmove: 'mousemove',\n\ttouchend: 'mouseup',\n\tpointerenter: 'mouseenter',\n\tpointerdown: 'mousedown',\n\tpointermove: 'mousemove',\n\tpointerup: 'mouseup',\n\tpointerleave: 'mouseout',\n\tpointerout: 'mouseout'\n};\n\n/**\n * The \"used\" size is the final value of a dimension property after all calculations have\n * been performed. This method uses the computed style of `element` but returns undefined\n * if the computed style is not expressed in pixels. That can happen in some cases where\n * `element` has a size relative to its parent and this last one is not yet displayed,\n * for example because of `display: none` on a parent node.\n * @see https://developer.mozilla.org/en-US/docs/Web/CSS/used_value\n * @returns {Number} Size in pixels or undefined if unknown.\n */\nfunction readUsedSize(element, property) {\n\tvar value = helpers.getStyle(element, property);\n\tvar matches = value && value.match(/^(\\d+)(\\.\\d+)?px$/);\n\treturn matches ? Number(matches[1]) : undefined;\n}\n\n/**\n * Initializes the canvas style and render size without modifying the canvas display size,\n * since responsiveness is handled by the controller.resize() method. The config is used\n * to determine the aspect ratio to apply in case no explicit height has been specified.\n */\nfunction initCanvas(canvas, config) {\n\tvar style = canvas.style;\n\n\t// NOTE(SB) canvas.getAttribute('width') !== canvas.width: in the first case it\n\t// returns null or '' if no explicit value has been set to the canvas attribute.\n\tvar renderHeight = canvas.getAttribute('height');\n\tvar renderWidth = canvas.getAttribute('width');\n\n\t// Chart.js modifies some canvas values that we want to restore on destroy\n\tcanvas[EXPANDO_KEY] = {\n\t\tinitial: {\n\t\t\theight: renderHeight,\n\t\t\twidth: renderWidth,\n\t\t\tstyle: {\n\t\t\t\tdisplay: style.display,\n\t\t\t\theight: style.height,\n\t\t\t\twidth: style.width\n\t\t\t}\n\t\t}\n\t};\n\n\t// Force canvas to display as block to avoid extra space caused by inline\n\t// elements, which would interfere with the responsive resize process.\n\t// https://github.com/chartjs/Chart.js/issues/2538\n\tstyle.display = style.display || 'block';\n\n\tif (renderWidth === null || renderWidth === '') {\n\t\tvar displayWidth = readUsedSize(canvas, 'width');\n\t\tif (displayWidth !== undefined) {\n\t\t\tcanvas.width = displayWidth;\n\t\t}\n\t}\n\n\tif (renderHeight === null || renderHeight === '') {\n\t\tif (canvas.style.height === '') {\n\t\t\t// If no explicit render height and style height, let's apply the aspect ratio,\n\t\t\t// which one can be specified by the user but also by charts as default option\n\t\t\t// (i.e. options.aspectRatio). If not specified, use canvas aspect ratio of 2.\n\t\t\tcanvas.height = canvas.width / (config.options.aspectRatio || 2);\n\t\t} else {\n\t\t\tvar displayHeight = readUsedSize(canvas, 'height');\n\t\t\tif (displayWidth !== undefined) {\n\t\t\t\tcanvas.height = displayHeight;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn canvas;\n}\n\n/**\n * Detects support for options object argument in addEventListener.\n * https://developer.mozilla.org/en-US/docs/Web/API/EventTarget/addEventListener#Safely_detecting_option_support\n * @private\n */\nvar supportsEventListenerOptions = (function() {\n\tvar supports = false;\n\ttry {\n\t\tvar options = Object.defineProperty({}, 'passive', {\n\t\t\tget: function() {\n\t\t\t\tsupports = true;\n\t\t\t}\n\t\t});\n\t\twindow.addEventListener('e', null, options);\n\t} catch (e) {\n\t\t// continue regardless of error\n\t}\n\treturn supports;\n}());\n\n// Default passive to true as expected by Chrome for 'touchstart' and 'touchend' events.\n// https://github.com/chartjs/Chart.js/issues/4287\nvar eventListenerOptions = supportsEventListenerOptions ? {passive: true} : false;\n\nfunction addEventListener(node, type, listener) {\n\tnode.addEventListener(type, listener, eventListenerOptions);\n}\n\nfunction removeEventListener(node, type, listener) {\n\tnode.removeEventListener(type, listener, eventListenerOptions);\n}\n\nfunction createEvent(type, chart, x, y, nativeEvent) {\n\treturn {\n\t\ttype: type,\n\t\tchart: chart,\n\t\tnative: nativeEvent || null,\n\t\tx: x !== undefined ? x : null,\n\t\ty: y !== undefined ? y : null,\n\t};\n}\n\nfunction fromNativeEvent(event, chart) {\n\tvar type = EVENT_TYPES[event.type] || event.type;\n\tvar pos = helpers.getRelativePosition(event, chart);\n\treturn createEvent(type, chart, pos.x, pos.y, event);\n}\n\nfunction throttled(fn, thisArg) {\n\tvar ticking = false;\n\tvar args = [];\n\n\treturn function() {\n\t\targs = Array.prototype.slice.call(arguments);\n\t\tthisArg = thisArg || this;\n\n\t\tif (!ticking) {\n\t\t\tticking = true;\n\t\t\thelpers.requestAnimFrame.call(window, function() {\n\t\t\t\tticking = false;\n\t\t\t\tfn.apply(thisArg, args);\n\t\t\t});\n\t\t}\n\t};\n}\n\n// Implementation based on https://github.com/marcj/css-element-queries\nfunction createResizer(handler) {\n\tvar resizer = document.createElement('div');\n\tvar cls = CSS_PREFIX + 'size-monitor';\n\tvar maxSize = 1000000;\n\tvar style =\n\t\t'position:absolute;' +\n\t\t'left:0;' +\n\t\t'top:0;' +\n\t\t'right:0;' +\n\t\t'bottom:0;' +\n\t\t'overflow:hidden;' +\n\t\t'pointer-events:none;' +\n\t\t'visibility:hidden;' +\n\t\t'z-index:-1;';\n\n\tresizer.style.cssText = style;\n\tresizer.className = cls;\n\tresizer.innerHTML =\n\t\t'<div class=\"' + cls + '-expand\" style=\"' + style + '\">' +\n\t\t\t'<div style=\"' +\n\t\t\t\t'position:absolute;' +\n\t\t\t\t'width:' + maxSize + 'px;' +\n\t\t\t\t'height:' + maxSize + 'px;' +\n\t\t\t\t'left:0;' +\n\t\t\t\t'top:0\">' +\n\t\t\t'</div>' +\n\t\t'</div>' +\n\t\t'<div class=\"' + cls + '-shrink\" style=\"' + style + '\">' +\n\t\t\t'<div style=\"' +\n\t\t\t\t'position:absolute;' +\n\t\t\t\t'width:200%;' +\n\t\t\t\t'height:200%;' +\n\t\t\t\t'left:0; ' +\n\t\t\t\t'top:0\">' +\n\t\t\t'</div>' +\n\t\t'</div>';\n\n\tvar expand = resizer.childNodes[0];\n\tvar shrink = resizer.childNodes[1];\n\n\tresizer._reset = function() {\n\t\texpand.scrollLeft = maxSize;\n\t\texpand.scrollTop = maxSize;\n\t\tshrink.scrollLeft = maxSize;\n\t\tshrink.scrollTop = maxSize;\n\t};\n\tvar onScroll = function() {\n\t\tresizer._reset();\n\t\thandler();\n\t};\n\n\taddEventListener(expand, 'scroll', onScroll.bind(expand, 'expand'));\n\taddEventListener(shrink, 'scroll', onScroll.bind(shrink, 'shrink'));\n\n\treturn resizer;\n}\n\n// https://davidwalsh.name/detect-node-insertion\nfunction watchForRender(node, handler) {\n\tvar expando = node[EXPANDO_KEY] || (node[EXPANDO_KEY] = {});\n\tvar proxy = expando.renderProxy = function(e) {\n\t\tif (e.animationName === CSS_RENDER_ANIMATION) {\n\t\t\thandler();\n\t\t}\n\t};\n\n\thelpers.each(ANIMATION_START_EVENTS, function(type) {\n\t\taddEventListener(node, type, proxy);\n\t});\n\n\t// #4737: Chrome might skip the CSS animation when the CSS_RENDER_MONITOR class\n\t// is removed then added back immediately (same animation frame?). Accessing the\n\t// `offsetParent` property will force a reflow and re-evaluate the CSS animation.\n\t// https://gist.github.com/paulirish/5d52fb081b3570c81e3a#box-metrics\n\t// https://github.com/chartjs/Chart.js/issues/4737\n\texpando.reflow = !!node.offsetParent;\n\n\tnode.classList.add(CSS_RENDER_MONITOR);\n}\n\nfunction unwatchForRender(node) {\n\tvar expando = node[EXPANDO_KEY] || {};\n\tvar proxy = expando.renderProxy;\n\n\tif (proxy) {\n\t\thelpers.each(ANIMATION_START_EVENTS, function(type) {\n\t\t\tremoveEventListener(node, type, proxy);\n\t\t});\n\n\t\tdelete expando.renderProxy;\n\t}\n\n\tnode.classList.remove(CSS_RENDER_MONITOR);\n}\n\nfunction addResizeListener(node, listener, chart) {\n\tvar expando = node[EXPANDO_KEY] || (node[EXPANDO_KEY] = {});\n\n\t// Let's keep track of this added resizer and thus avoid DOM query when removing it.\n\tvar resizer = expando.resizer = createResizer(throttled(function() {\n\t\tif (expando.resizer) {\n\t\t\treturn listener(createEvent('resize', chart));\n\t\t}\n\t}));\n\n\t// The resizer needs to be attached to the node parent, so we first need to be\n\t// sure that `node` is attached to the DOM before injecting the resizer element.\n\twatchForRender(node, function() {\n\t\tif (expando.resizer) {\n\t\t\tvar container = node.parentNode;\n\t\t\tif (container && container !== resizer.parentNode) {\n\t\t\t\tcontainer.insertBefore(resizer, container.firstChild);\n\t\t\t}\n\n\t\t\t// The container size might have changed, let's reset the resizer state.\n\t\t\tresizer._reset();\n\t\t}\n\t});\n}\n\nfunction removeResizeListener(node) {\n\tvar expando = node[EXPANDO_KEY] || {};\n\tvar resizer = expando.resizer;\n\n\tdelete expando.resizer;\n\tunwatchForRender(node);\n\n\tif (resizer && resizer.parentNode) {\n\t\tresizer.parentNode.removeChild(resizer);\n\t}\n}\n\nfunction injectCSS(platform, css) {\n\t// http://stackoverflow.com/q/3922139\n\tvar style = platform._style || document.createElement('style');\n\tif (!platform._style) {\n\t\tplatform._style = style;\n\t\tcss = '/* Chart.js */\\n' + css;\n\t\tstyle.setAttribute('type', 'text/css');\n\t\tdocument.getElementsByTagName('head')[0].appendChild(style);\n\t}\n\n\tstyle.appendChild(document.createTextNode(css));\n}\n\nmodule.exports = {\n\t/**\n\t * This property holds whether this platform is enabled for the current environment.\n\t * Currently used by platform.js to select the proper implementation.\n\t * @private\n\t */\n\t_enabled: typeof window !== 'undefined' && typeof document !== 'undefined',\n\n\tinitialize: function() {\n\t\tvar keyframes = 'from{opacity:0.99}to{opacity:1}';\n\n\t\tinjectCSS(this,\n\t\t\t// DOM rendering detection\n\t\t\t// https://davidwalsh.name/detect-node-insertion\n\t\t\t'@-webkit-keyframes ' + CSS_RENDER_ANIMATION + '{' + keyframes + '}' +\n\t\t\t'@keyframes ' + CSS_RENDER_ANIMATION + '{' + keyframes + '}' +\n\t\t\t'.' + CSS_RENDER_MONITOR + '{' +\n\t\t\t\t'-webkit-animation:' + CSS_RENDER_ANIMATION + ' 0.001s;' +\n\t\t\t\t'animation:' + CSS_RENDER_ANIMATION + ' 0.001s;' +\n\t\t\t'}'\n\t\t);\n\t},\n\n\tacquireContext: function(item, config) {\n\t\tif (typeof item === 'string') {\n\t\t\titem = document.getElementById(item);\n\t\t} else if (item.length) {\n\t\t\t// Support for array based queries (such as jQuery)\n\t\t\titem = item[0];\n\t\t}\n\n\t\tif (item && item.canvas) {\n\t\t\t// Support for any object associated to a canvas (including a context2d)\n\t\t\titem = item.canvas;\n\t\t}\n\n\t\t// To prevent canvas fingerprinting, some add-ons undefine the getContext\n\t\t// method, for example: https://github.com/kkapsner/CanvasBlocker\n\t\t// https://github.com/chartjs/Chart.js/issues/2807\n\t\tvar context = item && item.getContext && item.getContext('2d');\n\n\t\t// `instanceof HTMLCanvasElement/CanvasRenderingContext2D` fails when the item is\n\t\t// inside an iframe or when running in a protected environment. We could guess the\n\t\t// types from their toString() value but let's keep things flexible and assume it's\n\t\t// a sufficient condition if the item has a context2D which has item as `canvas`.\n\t\t// https://github.com/chartjs/Chart.js/issues/3887\n\t\t// https://github.com/chartjs/Chart.js/issues/4102\n\t\t// https://github.com/chartjs/Chart.js/issues/4152\n\t\tif (context && context.canvas === item) {\n\t\t\tinitCanvas(item, config);\n\t\t\treturn context;\n\t\t}\n\n\t\treturn null;\n\t},\n\n\treleaseContext: function(context) {\n\t\tvar canvas = context.canvas;\n\t\tif (!canvas[EXPANDO_KEY]) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar initial = canvas[EXPANDO_KEY].initial;\n\t\t['height', 'width'].forEach(function(prop) {\n\t\t\tvar value = initial[prop];\n\t\t\tif (helpers.isNullOrUndef(value)) {\n\t\t\t\tcanvas.removeAttribute(prop);\n\t\t\t} else {\n\t\t\t\tcanvas.setAttribute(prop, value);\n\t\t\t}\n\t\t});\n\n\t\thelpers.each(initial.style || {}, function(value, key) {\n\t\t\tcanvas.style[key] = value;\n\t\t});\n\n\t\t// The canvas render size might have been changed (and thus the state stack discarded),\n\t\t// we can't use save() and restore() to restore the initial state. So make sure that at\n\t\t// least the canvas context is reset to the default state by setting the canvas width.\n\t\t// https://www.w3.org/TR/2011/WD-html5-20110525/the-canvas-element.html\n\t\tcanvas.width = canvas.width;\n\n\t\tdelete canvas[EXPANDO_KEY];\n\t},\n\n\taddEventListener: function(chart, type, listener) {\n\t\tvar canvas = chart.canvas;\n\t\tif (type === 'resize') {\n\t\t\t// Note: the resize event is not supported on all browsers.\n\t\t\taddResizeListener(canvas, listener, chart);\n\t\t\treturn;\n\t\t}\n\n\t\tvar expando = listener[EXPANDO_KEY] || (listener[EXPANDO_KEY] = {});\n\t\tvar proxies = expando.proxies || (expando.proxies = {});\n\t\tvar proxy = proxies[chart.id + '_' + type] = function(event) {\n\t\t\tlistener(fromNativeEvent(event, chart));\n\t\t};\n\n\t\taddEventListener(canvas, type, proxy);\n\t},\n\n\tremoveEventListener: function(chart, type, listener) {\n\t\tvar canvas = chart.canvas;\n\t\tif (type === 'resize') {\n\t\t\t// Note: the resize event is not supported on all browsers.\n\t\t\tremoveResizeListener(canvas, listener);\n\t\t\treturn;\n\t\t}\n\n\t\tvar expando = listener[EXPANDO_KEY] || {};\n\t\tvar proxies = expando.proxies || {};\n\t\tvar proxy = proxies[chart.id + '_' + type];\n\t\tif (!proxy) {\n\t\t\treturn;\n\t\t}\n\n\t\tremoveEventListener(canvas, type, proxy);\n\t}\n};\n\n// DEPRECATIONS\n\n/**\n * Provided for backward compatibility, use EventTarget.addEventListener instead.\n * EventTarget.addEventListener compatibility: Chrome, Opera 7, Safari, FF1.5+, IE9+\n * @see https://developer.mozilla.org/en-US/docs/Web/API/EventTarget/addEventListener\n * @function Chart.helpers.addEvent\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.addEvent = addEventListener;\n\n/**\n * Provided for backward compatibility, use EventTarget.removeEventListener instead.\n * EventTarget.removeEventListener compatibility: Chrome, Opera 7, Safari, FF1.5+, IE9+\n * @see https://developer.mozilla.org/en-US/docs/Web/API/EventTarget/removeEventListener\n * @function Chart.helpers.removeEvent\n * @deprecated since version 2.7.0\n * @todo remove at version 3\n * @private\n */\nhelpers.removeEvent = removeEventListener;\n\n},{\"45\":45}],48:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\nvar basic = require(46);\nvar dom = require(47);\n\n// @TODO Make possible to select another platform at build time.\nvar implementation = dom._enabled ? dom : basic;\n\n/**\n * @namespace Chart.platform\n * @see https://chartjs.gitbooks.io/proposals/content/Platform.html\n * @since 2.4.0\n */\nmodule.exports = helpers.extend({\n\t/**\n\t * @since 2.7.0\n\t */\n\tinitialize: function() {},\n\n\t/**\n\t * Called at chart construction time, returns a context2d instance implementing\n\t * the [W3C Canvas 2D Context API standard]{@link https://www.w3.org/TR/2dcontext/}.\n\t * @param {*} item - The native item from which to acquire context (platform specific)\n\t * @param {Object} options - The chart options\n\t * @returns {CanvasRenderingContext2D} context2d instance\n\t */\n\tacquireContext: function() {},\n\n\t/**\n\t * Called at chart destruction time, releases any resources associated to the context\n\t * previously returned by the acquireContext() method.\n\t * @param {CanvasRenderingContext2D} context - The context2d instance\n\t * @returns {Boolean} true if the method succeeded, else false\n\t */\n\treleaseContext: function() {},\n\n\t/**\n\t * Registers the specified listener on the given chart.\n\t * @param {Chart} chart - Chart from which to listen for event\n\t * @param {String} type - The ({@link IEvent}) type to listen for\n\t * @param {Function} listener - Receives a notification (an object that implements\n\t * the {@link IEvent} interface) when an event of the specified type occurs.\n\t */\n\taddEventListener: function() {},\n\n\t/**\n\t * Removes the specified listener previously registered with addEventListener.\n\t * @param {Chart} chart -Chart from which to remove the listener\n\t * @param {String} type - The ({@link IEvent}) type to remove\n\t * @param {Function} listener - The listener function to remove from the event target.\n\t */\n\tremoveEventListener: function() {}\n\n}, implementation);\n\n/**\n * @interface IPlatform\n * Allows abstracting platform dependencies away from the chart\n * @borrows Chart.platform.acquireContext as acquireContext\n * @borrows Chart.platform.releaseContext as releaseContext\n * @borrows Chart.platform.addEventListener as addEventListener\n * @borrows Chart.platform.removeEventListener as removeEventListener\n */\n\n/**\n * @interface IEvent\n * @prop {String} type - The event type name, possible values are:\n * 'contextmenu', 'mouseenter', 'mousedown', 'mousemove', 'mouseup', 'mouseout',\n * 'click', 'dblclick', 'keydown', 'keypress', 'keyup' and 'resize'\n * @prop {*} native - The original native event (null for emulated events, e.g. 'resize')\n * @prop {Number} x - The mouse x position, relative to the canvas (null for incompatible events)\n * @prop {Number} y - The mouse y position, relative to the canvas (null for incompatible events)\n */\n\n},{\"45\":45,\"46\":46,\"47\":47}],49:[function(require,module,exports){\n'use strict';\n\nmodule.exports = {};\nmodule.exports.filler = require(50);\nmodule.exports.legend = require(51);\nmodule.exports.title = require(52);\n\n},{\"50\":50,\"51\":51,\"52\":52}],50:[function(require,module,exports){\n/**\n * Plugin based on discussion from the following Chart.js issues:\n * @see https://github.com/chartjs/Chart.js/issues/2380#issuecomment-279961569\n * @see https://github.com/chartjs/Chart.js/issues/2440#issuecomment-256461897\n */\n\n'use strict';\n\nvar defaults = require(25);\nvar elements = require(40);\nvar helpers = require(45);\n\ndefaults._set('global', {\n\tplugins: {\n\t\tfiller: {\n\t\t\tpropagate: true\n\t\t}\n\t}\n});\n\nvar mappers = {\n\tdataset: function(source) {\n\t\tvar index = source.fill;\n\t\tvar chart = source.chart;\n\t\tvar meta = chart.getDatasetMeta(index);\n\t\tvar visible = meta && chart.isDatasetVisible(index);\n\t\tvar points = (visible && meta.dataset._children) || [];\n\t\tvar length = points.length || 0;\n\n\t\treturn !length ? null : function(point, i) {\n\t\t\treturn (i < length && points[i]._view) || null;\n\t\t};\n\t},\n\n\tboundary: function(source) {\n\t\tvar boundary = source.boundary;\n\t\tvar x = boundary ? boundary.x : null;\n\t\tvar y = boundary ? boundary.y : null;\n\n\t\treturn function(point) {\n\t\t\treturn {\n\t\t\t\tx: x === null ? point.x : x,\n\t\t\t\ty: y === null ? point.y : y,\n\t\t\t};\n\t\t};\n\t}\n};\n\n// @todo if (fill[0] === '#')\nfunction decodeFill(el, index, count) {\n\tvar model = el._model || {};\n\tvar fill = model.fill;\n\tvar target;\n\n\tif (fill === undefined) {\n\t\tfill = !!model.backgroundColor;\n\t}\n\n\tif (fill === false || fill === null) {\n\t\treturn false;\n\t}\n\n\tif (fill === true) {\n\t\treturn 'origin';\n\t}\n\n\ttarget = parseFloat(fill, 10);\n\tif (isFinite(target) && Math.floor(target) === target) {\n\t\tif (fill[0] === '-' || fill[0] === '+') {\n\t\t\ttarget = index + target;\n\t\t}\n\n\t\tif (target === index || target < 0 || target >= count) {\n\t\t\treturn false;\n\t\t}\n\n\t\treturn target;\n\t}\n\n\tswitch (fill) {\n\t// compatibility\n\tcase 'bottom':\n\t\treturn 'start';\n\tcase 'top':\n\t\treturn 'end';\n\tcase 'zero':\n\t\treturn 'origin';\n\t// supported boundaries\n\tcase 'origin':\n\tcase 'start':\n\tcase 'end':\n\t\treturn fill;\n\t// invalid fill values\n\tdefault:\n\t\treturn false;\n\t}\n}\n\nfunction computeBoundary(source) {\n\tvar model = source.el._model || {};\n\tvar scale = source.el._scale || {};\n\tvar fill = source.fill;\n\tvar target = null;\n\tvar horizontal;\n\n\tif (isFinite(fill)) {\n\t\treturn null;\n\t}\n\n\t// Backward compatibility: until v3, we still need to support boundary values set on\n\t// the model (scaleTop, scaleBottom and scaleZero) because some external plugins and\n\t// controllers might still use it (e.g. the Smith chart).\n\n\tif (fill === 'start') {\n\t\ttarget = model.scaleBottom === undefined ? scale.bottom : model.scaleBottom;\n\t} else if (fill === 'end') {\n\t\ttarget = model.scaleTop === undefined ? scale.top : model.scaleTop;\n\t} else if (model.scaleZero !== undefined) {\n\t\ttarget = model.scaleZero;\n\t} else if (scale.getBasePosition) {\n\t\ttarget = scale.getBasePosition();\n\t} else if (scale.getBasePixel) {\n\t\ttarget = scale.getBasePixel();\n\t}\n\n\tif (target !== undefined && target !== null) {\n\t\tif (target.x !== undefined && target.y !== undefined) {\n\t\t\treturn target;\n\t\t}\n\n\t\tif (typeof target === 'number' && isFinite(target)) {\n\t\t\thorizontal = scale.isHorizontal();\n\t\t\treturn {\n\t\t\t\tx: horizontal ? target : null,\n\t\t\t\ty: horizontal ? null : target\n\t\t\t};\n\t\t}\n\t}\n\n\treturn null;\n}\n\nfunction resolveTarget(sources, index, propagate) {\n\tvar source = sources[index];\n\tvar fill = source.fill;\n\tvar visited = [index];\n\tvar target;\n\n\tif (!propagate) {\n\t\treturn fill;\n\t}\n\n\twhile (fill !== false && visited.indexOf(fill) === -1) {\n\t\tif (!isFinite(fill)) {\n\t\t\treturn fill;\n\t\t}\n\n\t\ttarget = sources[fill];\n\t\tif (!target) {\n\t\t\treturn false;\n\t\t}\n\n\t\tif (target.visible) {\n\t\t\treturn fill;\n\t\t}\n\n\t\tvisited.push(fill);\n\t\tfill = target.fill;\n\t}\n\n\treturn false;\n}\n\nfunction createMapper(source) {\n\tvar fill = source.fill;\n\tvar type = 'dataset';\n\n\tif (fill === false) {\n\t\treturn null;\n\t}\n\n\tif (!isFinite(fill)) {\n\t\ttype = 'boundary';\n\t}\n\n\treturn mappers[type](source);\n}\n\nfunction isDrawable(point) {\n\treturn point && !point.skip;\n}\n\nfunction drawArea(ctx, curve0, curve1, len0, len1) {\n\tvar i;\n\n\tif (!len0 || !len1) {\n\t\treturn;\n\t}\n\n\t// building first area curve (normal)\n\tctx.moveTo(curve0[0].x, curve0[0].y);\n\tfor (i = 1; i < len0; ++i) {\n\t\thelpers.canvas.lineTo(ctx, curve0[i - 1], curve0[i]);\n\t}\n\n\t// joining the two area curves\n\tctx.lineTo(curve1[len1 - 1].x, curve1[len1 - 1].y);\n\n\t// building opposite area curve (reverse)\n\tfor (i = len1 - 1; i > 0; --i) {\n\t\thelpers.canvas.lineTo(ctx, curve1[i], curve1[i - 1], true);\n\t}\n}\n\nfunction doFill(ctx, points, mapper, view, color, loop) {\n\tvar count = points.length;\n\tvar span = view.spanGaps;\n\tvar curve0 = [];\n\tvar curve1 = [];\n\tvar len0 = 0;\n\tvar len1 = 0;\n\tvar i, ilen, index, p0, p1, d0, d1;\n\n\tctx.beginPath();\n\n\tfor (i = 0, ilen = (count + !!loop); i < ilen; ++i) {\n\t\tindex = i % count;\n\t\tp0 = points[index]._view;\n\t\tp1 = mapper(p0, index, view);\n\t\td0 = isDrawable(p0);\n\t\td1 = isDrawable(p1);\n\n\t\tif (d0 && d1) {\n\t\t\tlen0 = curve0.push(p0);\n\t\t\tlen1 = curve1.push(p1);\n\t\t} else if (len0 && len1) {\n\t\t\tif (!span) {\n\t\t\t\tdrawArea(ctx, curve0, curve1, len0, len1);\n\t\t\t\tlen0 = len1 = 0;\n\t\t\t\tcurve0 = [];\n\t\t\t\tcurve1 = [];\n\t\t\t} else {\n\t\t\t\tif (d0) {\n\t\t\t\t\tcurve0.push(p0);\n\t\t\t\t}\n\t\t\t\tif (d1) {\n\t\t\t\t\tcurve1.push(p1);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdrawArea(ctx, curve0, curve1, len0, len1);\n\n\tctx.closePath();\n\tctx.fillStyle = color;\n\tctx.fill();\n}\n\nmodule.exports = {\n\tid: 'filler',\n\n\tafterDatasetsUpdate: function(chart, options) {\n\t\tvar count = (chart.data.datasets || []).length;\n\t\tvar propagate = options.propagate;\n\t\tvar sources = [];\n\t\tvar meta, i, el, source;\n\n\t\tfor (i = 0; i < count; ++i) {\n\t\t\tmeta = chart.getDatasetMeta(i);\n\t\t\tel = meta.dataset;\n\t\t\tsource = null;\n\n\t\t\tif (el && el._model && el instanceof elements.Line) {\n\t\t\t\tsource = {\n\t\t\t\t\tvisible: chart.isDatasetVisible(i),\n\t\t\t\t\tfill: decodeFill(el, i, count),\n\t\t\t\t\tchart: chart,\n\t\t\t\t\tel: el\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tmeta.$filler = source;\n\t\t\tsources.push(source);\n\t\t}\n\n\t\tfor (i = 0; i < count; ++i) {\n\t\t\tsource = sources[i];\n\t\t\tif (!source) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tsource.fill = resolveTarget(sources, i, propagate);\n\t\t\tsource.boundary = computeBoundary(source);\n\t\t\tsource.mapper = createMapper(source);\n\t\t}\n\t},\n\n\tbeforeDatasetDraw: function(chart, args) {\n\t\tvar meta = args.meta.$filler;\n\t\tif (!meta) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar ctx = chart.ctx;\n\t\tvar el = meta.el;\n\t\tvar view = el._view;\n\t\tvar points = el._children || [];\n\t\tvar mapper = meta.mapper;\n\t\tvar color = view.backgroundColor || defaults.global.defaultColor;\n\n\t\tif (mapper && color && points.length) {\n\t\t\thelpers.canvas.clipArea(ctx, chart.chartArea);\n\t\t\tdoFill(ctx, points, mapper, view, color, el._loop);\n\t\t\thelpers.canvas.unclipArea(ctx);\n\t\t}\n\t}\n};\n\n},{\"25\":25,\"40\":40,\"45\":45}],51:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\nvar layouts = require(30);\n\nvar noop = helpers.noop;\n\ndefaults._set('global', {\n\tlegend: {\n\t\tdisplay: true,\n\t\tposition: 'top',\n\t\tfullWidth: true,\n\t\treverse: false,\n\t\tweight: 1000,\n\n\t\t// a callback that will handle\n\t\tonClick: function(e, legendItem) {\n\t\t\tvar index = legendItem.datasetIndex;\n\t\t\tvar ci = this.chart;\n\t\t\tvar meta = ci.getDatasetMeta(index);\n\n\t\t\t// See controller.isDatasetVisible comment\n\t\t\tmeta.hidden = meta.hidden === null ? !ci.data.datasets[index].hidden : null;\n\n\t\t\t// We hid a dataset ... rerender the chart\n\t\t\tci.update();\n\t\t},\n\n\t\tonHover: null,\n\n\t\tlabels: {\n\t\t\tboxWidth: 40,\n\t\t\tpadding: 10,\n\t\t\t// Generates labels shown in the legend\n\t\t\t// Valid properties to return:\n\t\t\t// text : text to display\n\t\t\t// fillStyle : fill of coloured box\n\t\t\t// strokeStyle: stroke of coloured box\n\t\t\t// hidden : if this legend item refers to a hidden item\n\t\t\t// lineCap : cap style for line\n\t\t\t// lineDash\n\t\t\t// lineDashOffset :\n\t\t\t// lineJoin :\n\t\t\t// lineWidth :\n\t\t\tgenerateLabels: function(chart) {\n\t\t\t\tvar data = chart.data;\n\t\t\t\treturn helpers.isArray(data.datasets) ? data.datasets.map(function(dataset, i) {\n\t\t\t\t\treturn {\n\t\t\t\t\t\ttext: dataset.label,\n\t\t\t\t\t\tfillStyle: (!helpers.isArray(dataset.backgroundColor) ? dataset.backgroundColor : dataset.backgroundColor[0]),\n\t\t\t\t\t\thidden: !chart.isDatasetVisible(i),\n\t\t\t\t\t\tlineCap: dataset.borderCapStyle,\n\t\t\t\t\t\tlineDash: dataset.borderDash,\n\t\t\t\t\t\tlineDashOffset: dataset.borderDashOffset,\n\t\t\t\t\t\tlineJoin: dataset.borderJoinStyle,\n\t\t\t\t\t\tlineWidth: dataset.borderWidth,\n\t\t\t\t\t\tstrokeStyle: dataset.borderColor,\n\t\t\t\t\t\tpointStyle: dataset.pointStyle,\n\n\t\t\t\t\t\t// Below is extra data used for toggling the datasets\n\t\t\t\t\t\tdatasetIndex: i\n\t\t\t\t\t};\n\t\t\t\t}, this) : [];\n\t\t\t}\n\t\t}\n\t},\n\n\tlegendCallback: function(chart) {\n\t\tvar text = [];\n\t\ttext.push('<ul class=\"' + chart.id + '-legend\">');\n\t\tfor (var i = 0; i < chart.data.datasets.length; i++) {\n\t\t\ttext.push('<li><span style=\"background-color:' + chart.data.datasets[i].backgroundColor + '\"></span>');\n\t\t\tif (chart.data.datasets[i].label) {\n\t\t\t\ttext.push(chart.data.datasets[i].label);\n\t\t\t}\n\t\t\ttext.push('</li>');\n\t\t}\n\t\ttext.push('</ul>');\n\t\treturn text.join('');\n\t}\n});\n\n/**\n * Helper function to get the box width based on the usePointStyle option\n * @param labelopts {Object} the label options on the legend\n * @param fontSize {Number} the label font size\n * @return {Number} width of the color box area\n */\nfunction getBoxWidth(labelOpts, fontSize) {\n\treturn labelOpts.usePointStyle ?\n\t\tfontSize * Math.SQRT2 :\n\t\tlabelOpts.boxWidth;\n}\n\n/**\n * IMPORTANT: this class is exposed publicly as Chart.Legend, backward compatibility required!\n */\nvar Legend = Element.extend({\n\n\tinitialize: function(config) {\n\t\thelpers.extend(this, config);\n\n\t\t// Contains hit boxes for each dataset (in dataset order)\n\t\tthis.legendHitBoxes = [];\n\n\t\t// Are we in doughnut mode which has a different data type\n\t\tthis.doughnutMode = false;\n\t},\n\n\t// These methods are ordered by lifecycle. Utilities then follow.\n\t// Any function defined here is inherited by all legend types.\n\t// Any function can be extended by the legend type\n\n\tbeforeUpdate: noop,\n\tupdate: function(maxWidth, maxHeight, margins) {\n\t\tvar me = this;\n\n\t\t// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)\n\t\tme.beforeUpdate();\n\n\t\t// Absorb the master measurements\n\t\tme.maxWidth = maxWidth;\n\t\tme.maxHeight = maxHeight;\n\t\tme.margins = margins;\n\n\t\t// Dimensions\n\t\tme.beforeSetDimensions();\n\t\tme.setDimensions();\n\t\tme.afterSetDimensions();\n\t\t// Labels\n\t\tme.beforeBuildLabels();\n\t\tme.buildLabels();\n\t\tme.afterBuildLabels();\n\n\t\t// Fit\n\t\tme.beforeFit();\n\t\tme.fit();\n\t\tme.afterFit();\n\t\t//\n\t\tme.afterUpdate();\n\n\t\treturn me.minSize;\n\t},\n\tafterUpdate: noop,\n\n\t//\n\n\tbeforeSetDimensions: noop,\n\tsetDimensions: function() {\n\t\tvar me = this;\n\t\t// Set the unconstrained dimension before label rotation\n\t\tif (me.isHorizontal()) {\n\t\t\t// Reset position before calculating rotation\n\t\t\tme.width = me.maxWidth;\n\t\t\tme.left = 0;\n\t\t\tme.right = me.width;\n\t\t} else {\n\t\t\tme.height = me.maxHeight;\n\n\t\t\t// Reset position before calculating rotation\n\t\t\tme.top = 0;\n\t\t\tme.bottom = me.height;\n\t\t}\n\n\t\t// Reset padding\n\t\tme.paddingLeft = 0;\n\t\tme.paddingTop = 0;\n\t\tme.paddingRight = 0;\n\t\tme.paddingBottom = 0;\n\n\t\t// Reset minSize\n\t\tme.minSize = {\n\t\t\twidth: 0,\n\t\t\theight: 0\n\t\t};\n\t},\n\tafterSetDimensions: noop,\n\n\t//\n\n\tbeforeBuildLabels: noop,\n\tbuildLabels: function() {\n\t\tvar me = this;\n\t\tvar labelOpts = me.options.labels || {};\n\t\tvar legendItems = helpers.callback(labelOpts.generateLabels, [me.chart], me) || [];\n\n\t\tif (labelOpts.filter) {\n\t\t\tlegendItems = legendItems.filter(function(item) {\n\t\t\t\treturn labelOpts.filter(item, me.chart.data);\n\t\t\t});\n\t\t}\n\n\t\tif (me.options.reverse) {\n\t\t\tlegendItems.reverse();\n\t\t}\n\n\t\tme.legendItems = legendItems;\n\t},\n\tafterBuildLabels: noop,\n\n\t//\n\n\tbeforeFit: noop,\n\tfit: function() {\n\t\tvar me = this;\n\t\tvar opts = me.options;\n\t\tvar labelOpts = opts.labels;\n\t\tvar display = opts.display;\n\n\t\tvar ctx = me.ctx;\n\n\t\tvar globalDefault = defaults.global;\n\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\tvar fontSize = valueOrDefault(labelOpts.fontSize, globalDefault.defaultFontSize);\n\t\tvar fontStyle = valueOrDefault(labelOpts.fontStyle, globalDefault.defaultFontStyle);\n\t\tvar fontFamily = valueOrDefault(labelOpts.fontFamily, globalDefault.defaultFontFamily);\n\t\tvar labelFont = helpers.fontString(fontSize, fontStyle, fontFamily);\n\n\t\t// Reset hit boxes\n\t\tvar hitboxes = me.legendHitBoxes = [];\n\n\t\tvar minSize = me.minSize;\n\t\tvar isHorizontal = me.isHorizontal();\n\n\t\tif (isHorizontal) {\n\t\t\tminSize.width = me.maxWidth; // fill all the width\n\t\t\tminSize.height = display ? 10 : 0;\n\t\t} else {\n\t\t\tminSize.width = display ? 10 : 0;\n\t\t\tminSize.height = me.maxHeight; // fill all the height\n\t\t}\n\n\t\t// Increase sizes here\n\t\tif (display) {\n\t\t\tctx.font = labelFont;\n\n\t\t\tif (isHorizontal) {\n\t\t\t\t// Labels\n\n\t\t\t\t// Width of each line of legend boxes. Labels wrap onto multiple lines when there are too many to fit on one\n\t\t\t\tvar lineWidths = me.lineWidths = [0];\n\t\t\t\tvar totalHeight = me.legendItems.length ? fontSize + (labelOpts.padding) : 0;\n\n\t\t\t\tctx.textAlign = 'left';\n\t\t\t\tctx.textBaseline = 'top';\n\n\t\t\t\thelpers.each(me.legendItems, function(legendItem, i) {\n\t\t\t\t\tvar boxWidth = getBoxWidth(labelOpts, fontSize);\n\t\t\t\t\tvar width = boxWidth + (fontSize / 2) + ctx.measureText(legendItem.text).width;\n\n\t\t\t\t\tif (lineWidths[lineWidths.length - 1] + width + labelOpts.padding >= me.width) {\n\t\t\t\t\t\ttotalHeight += fontSize + (labelOpts.padding);\n\t\t\t\t\t\tlineWidths[lineWidths.length] = me.left;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store the hitbox width and height here. Final position will be updated in `draw`\n\t\t\t\t\thitboxes[i] = {\n\t\t\t\t\t\tleft: 0,\n\t\t\t\t\t\ttop: 0,\n\t\t\t\t\t\twidth: width,\n\t\t\t\t\t\theight: fontSize\n\t\t\t\t\t};\n\n\t\t\t\t\tlineWidths[lineWidths.length - 1] += width + labelOpts.padding;\n\t\t\t\t});\n\n\t\t\t\tminSize.height += totalHeight;\n\n\t\t\t} else {\n\t\t\t\tvar vPadding = labelOpts.padding;\n\t\t\t\tvar columnWidths = me.columnWidths = [];\n\t\t\t\tvar totalWidth = labelOpts.padding;\n\t\t\t\tvar currentColWidth = 0;\n\t\t\t\tvar currentColHeight = 0;\n\t\t\t\tvar itemHeight = fontSize + vPadding;\n\n\t\t\t\thelpers.each(me.legendItems, function(legendItem, i) {\n\t\t\t\t\tvar boxWidth = getBoxWidth(labelOpts, fontSize);\n\t\t\t\t\tvar itemWidth = boxWidth + (fontSize / 2) + ctx.measureText(legendItem.text).width;\n\n\t\t\t\t\t// If too tall, go to new column\n\t\t\t\t\tif (currentColHeight + itemHeight > minSize.height) {\n\t\t\t\t\t\ttotalWidth += currentColWidth + labelOpts.padding;\n\t\t\t\t\t\tcolumnWidths.push(currentColWidth); // previous column width\n\n\t\t\t\t\t\tcurrentColWidth = 0;\n\t\t\t\t\t\tcurrentColHeight = 0;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Get max width\n\t\t\t\t\tcurrentColWidth = Math.max(currentColWidth, itemWidth);\n\t\t\t\t\tcurrentColHeight += itemHeight;\n\n\t\t\t\t\t// Store the hitbox width and height here. Final position will be updated in `draw`\n\t\t\t\t\thitboxes[i] = {\n\t\t\t\t\t\tleft: 0,\n\t\t\t\t\t\ttop: 0,\n\t\t\t\t\t\twidth: itemWidth,\n\t\t\t\t\t\theight: fontSize\n\t\t\t\t\t};\n\t\t\t\t});\n\n\t\t\t\ttotalWidth += currentColWidth;\n\t\t\t\tcolumnWidths.push(currentColWidth);\n\t\t\t\tminSize.width += totalWidth;\n\t\t\t}\n\t\t}\n\n\t\tme.width = minSize.width;\n\t\tme.height = minSize.height;\n\t},\n\tafterFit: noop,\n\n\t// Shared Methods\n\tisHorizontal: function() {\n\t\treturn this.options.position === 'top' || this.options.position === 'bottom';\n\t},\n\n\t// Actually draw the legend on the canvas\n\tdraw: function() {\n\t\tvar me = this;\n\t\tvar opts = me.options;\n\t\tvar labelOpts = opts.labels;\n\t\tvar globalDefault = defaults.global;\n\t\tvar lineDefault = globalDefault.elements.line;\n\t\tvar legendWidth = me.width;\n\t\tvar lineWidths = me.lineWidths;\n\n\t\tif (opts.display) {\n\t\t\tvar ctx = me.ctx;\n\t\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\t\tvar fontColor = valueOrDefault(labelOpts.fontColor, globalDefault.defaultFontColor);\n\t\t\tvar fontSize = valueOrDefault(labelOpts.fontSize, globalDefault.defaultFontSize);\n\t\t\tvar fontStyle = valueOrDefault(labelOpts.fontStyle, globalDefault.defaultFontStyle);\n\t\t\tvar fontFamily = valueOrDefault(labelOpts.fontFamily, globalDefault.defaultFontFamily);\n\t\t\tvar labelFont = helpers.fontString(fontSize, fontStyle, fontFamily);\n\t\t\tvar cursor;\n\n\t\t\t// Canvas setup\n\t\t\tctx.textAlign = 'left';\n\t\t\tctx.textBaseline = 'middle';\n\t\t\tctx.lineWidth = 0.5;\n\t\t\tctx.strokeStyle = fontColor; // for strikethrough effect\n\t\t\tctx.fillStyle = fontColor; // render in correct colour\n\t\t\tctx.font = labelFont;\n\n\t\t\tvar boxWidth = getBoxWidth(labelOpts, fontSize);\n\t\t\tvar hitboxes = me.legendHitBoxes;\n\n\t\t\t// current position\n\t\t\tvar drawLegendBox = function(x, y, legendItem) {\n\t\t\t\tif (isNaN(boxWidth) || boxWidth <= 0) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// Set the ctx for the box\n\t\t\t\tctx.save();\n\n\t\t\t\tctx.fillStyle = valueOrDefault(legendItem.fillStyle, globalDefault.defaultColor);\n\t\t\t\tctx.lineCap = valueOrDefault(legendItem.lineCap, lineDefault.borderCapStyle);\n\t\t\t\tctx.lineDashOffset = valueOrDefault(legendItem.lineDashOffset, lineDefault.borderDashOffset);\n\t\t\t\tctx.lineJoin = valueOrDefault(legendItem.lineJoin, lineDefault.borderJoinStyle);\n\t\t\t\tctx.lineWidth = valueOrDefault(legendItem.lineWidth, lineDefault.borderWidth);\n\t\t\t\tctx.strokeStyle = valueOrDefault(legendItem.strokeStyle, globalDefault.defaultColor);\n\t\t\t\tvar isLineWidthZero = (valueOrDefault(legendItem.lineWidth, lineDefault.borderWidth) === 0);\n\n\t\t\t\tif (ctx.setLineDash) {\n\t\t\t\t\t// IE 9 and 10 do not support line dash\n\t\t\t\t\tctx.setLineDash(valueOrDefault(legendItem.lineDash, lineDefault.borderDash));\n\t\t\t\t}\n\n\t\t\t\tif (opts.labels && opts.labels.usePointStyle) {\n\t\t\t\t\t// Recalculate x and y for drawPoint() because its expecting\n\t\t\t\t\t// x and y to be center of figure (instead of top left)\n\t\t\t\t\tvar radius = fontSize * Math.SQRT2 / 2;\n\t\t\t\t\tvar offSet = radius / Math.SQRT2;\n\t\t\t\t\tvar centerX = x + offSet;\n\t\t\t\t\tvar centerY = y + offSet;\n\n\t\t\t\t\t// Draw pointStyle as legend symbol\n\t\t\t\t\thelpers.canvas.drawPoint(ctx, legendItem.pointStyle, radius, centerX, centerY);\n\t\t\t\t} else {\n\t\t\t\t\t// Draw box as legend symbol\n\t\t\t\t\tif (!isLineWidthZero) {\n\t\t\t\t\t\tctx.strokeRect(x, y, boxWidth, fontSize);\n\t\t\t\t\t}\n\t\t\t\t\tctx.fillRect(x, y, boxWidth, fontSize);\n\t\t\t\t}\n\n\t\t\t\tctx.restore();\n\t\t\t};\n\t\t\tvar fillText = function(x, y, legendItem, textWidth) {\n\t\t\t\tvar halfFontSize = fontSize / 2;\n\t\t\t\tvar xLeft = boxWidth + halfFontSize + x;\n\t\t\t\tvar yMiddle = y + halfFontSize;\n\n\t\t\t\tctx.fillText(legendItem.text, xLeft, yMiddle);\n\n\t\t\t\tif (legendItem.hidden) {\n\t\t\t\t\t// Strikethrough the text if hidden\n\t\t\t\t\tctx.beginPath();\n\t\t\t\t\tctx.lineWidth = 2;\n\t\t\t\t\tctx.moveTo(xLeft, yMiddle);\n\t\t\t\t\tctx.lineTo(xLeft + textWidth, yMiddle);\n\t\t\t\t\tctx.stroke();\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// Horizontal\n\t\t\tvar isHorizontal = me.isHorizontal();\n\t\t\tif (isHorizontal) {\n\t\t\t\tcursor = {\n\t\t\t\t\tx: me.left + ((legendWidth - lineWidths[0]) / 2),\n\t\t\t\t\ty: me.top + labelOpts.padding,\n\t\t\t\t\tline: 0\n\t\t\t\t};\n\t\t\t} else {\n\t\t\t\tcursor = {\n\t\t\t\t\tx: me.left + labelOpts.padding,\n\t\t\t\t\ty: me.top + labelOpts.padding,\n\t\t\t\t\tline: 0\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tvar itemHeight = fontSize + labelOpts.padding;\n\t\t\thelpers.each(me.legendItems, function(legendItem, i) {\n\t\t\t\tvar textWidth = ctx.measureText(legendItem.text).width;\n\t\t\t\tvar width = boxWidth + (fontSize / 2) + textWidth;\n\t\t\t\tvar x = cursor.x;\n\t\t\t\tvar y = cursor.y;\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\tif (x + width >= legendWidth) {\n\t\t\t\t\t\ty = cursor.y += itemHeight;\n\t\t\t\t\t\tcursor.line++;\n\t\t\t\t\t\tx = cursor.x = me.left + ((legendWidth - lineWidths[cursor.line]) / 2);\n\t\t\t\t\t}\n\t\t\t\t} else if (y + itemHeight > me.bottom) {\n\t\t\t\t\tx = cursor.x = x + me.columnWidths[cursor.line] + labelOpts.padding;\n\t\t\t\t\ty = cursor.y = me.top + labelOpts.padding;\n\t\t\t\t\tcursor.line++;\n\t\t\t\t}\n\n\t\t\t\tdrawLegendBox(x, y, legendItem);\n\n\t\t\t\thitboxes[i].left = x;\n\t\t\t\thitboxes[i].top = y;\n\n\t\t\t\t// Fill the actual label\n\t\t\t\tfillText(x, y, legendItem, textWidth);\n\n\t\t\t\tif (isHorizontal) {\n\t\t\t\t\tcursor.x += width + (labelOpts.padding);\n\t\t\t\t} else {\n\t\t\t\t\tcursor.y += itemHeight;\n\t\t\t\t}\n\n\t\t\t});\n\t\t}\n\t},\n\n\t/**\n\t * Handle an event\n\t * @private\n\t * @param {IEvent} event - The event to handle\n\t * @return {Boolean} true if a change occured\n\t */\n\thandleEvent: function(e) {\n\t\tvar me = this;\n\t\tvar opts = me.options;\n\t\tvar type = e.type === 'mouseup' ? 'click' : e.type;\n\t\tvar changed = false;\n\n\t\tif (type === 'mousemove') {\n\t\t\tif (!opts.onHover) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t} else if (type === 'click') {\n\t\t\tif (!opts.onClick) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t} else {\n\t\t\treturn;\n\t\t}\n\n\t\t// Chart event already has relative position in it\n\t\tvar x = e.x;\n\t\tvar y = e.y;\n\n\t\tif (x >= me.left && x <= me.right && y >= me.top && y <= me.bottom) {\n\t\t\t// See if we are touching one of the dataset boxes\n\t\t\tvar lh = me.legendHitBoxes;\n\t\t\tfor (var i = 0; i < lh.length; ++i) {\n\t\t\t\tvar hitBox = lh[i];\n\n\t\t\t\tif (x >= hitBox.left && x <= hitBox.left + hitBox.width && y >= hitBox.top && y <= hitBox.top + hitBox.height) {\n\t\t\t\t\t// Touching an element\n\t\t\t\t\tif (type === 'click') {\n\t\t\t\t\t\t// use e.native for backwards compatibility\n\t\t\t\t\t\topts.onClick.call(me, e.native, me.legendItems[i]);\n\t\t\t\t\t\tchanged = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t} else if (type === 'mousemove') {\n\t\t\t\t\t\t// use e.native for backwards compatibility\n\t\t\t\t\t\topts.onHover.call(me, e.native, me.legendItems[i]);\n\t\t\t\t\t\tchanged = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn changed;\n\t}\n});\n\nfunction createNewLegendAndAttach(chart, legendOpts) {\n\tvar legend = new Legend({\n\t\tctx: chart.ctx,\n\t\toptions: legendOpts,\n\t\tchart: chart\n\t});\n\n\tlayouts.configure(chart, legend, legendOpts);\n\tlayouts.addBox(chart, legend);\n\tchart.legend = legend;\n}\n\nmodule.exports = {\n\tid: 'legend',\n\n\t/**\n\t * Backward compatibility: since 2.1.5, the legend is registered as a plugin, making\n\t * Chart.Legend obsolete. To avoid a breaking change, we export the Legend as part of\n\t * the plugin, which one will be re-exposed in the chart.js file.\n\t * https://github.com/chartjs/Chart.js/pull/2640\n\t * @private\n\t */\n\t_element: Legend,\n\n\tbeforeInit: function(chart) {\n\t\tvar legendOpts = chart.options.legend;\n\n\t\tif (legendOpts) {\n\t\t\tcreateNewLegendAndAttach(chart, legendOpts);\n\t\t}\n\t},\n\n\tbeforeUpdate: function(chart) {\n\t\tvar legendOpts = chart.options.legend;\n\t\tvar legend = chart.legend;\n\n\t\tif (legendOpts) {\n\t\t\thelpers.mergeIf(legendOpts, defaults.global.legend);\n\n\t\t\tif (legend) {\n\t\t\t\tlayouts.configure(chart, legend, legendOpts);\n\t\t\t\tlegend.options = legendOpts;\n\t\t\t} else {\n\t\t\t\tcreateNewLegendAndAttach(chart, legendOpts);\n\t\t\t}\n\t\t} else if (legend) {\n\t\t\tlayouts.removeBox(chart, legend);\n\t\t\tdelete chart.legend;\n\t\t}\n\t},\n\n\tafterEvent: function(chart, e) {\n\t\tvar legend = chart.legend;\n\t\tif (legend) {\n\t\t\tlegend.handleEvent(e);\n\t\t}\n\t}\n};\n\n},{\"25\":25,\"26\":26,\"30\":30,\"45\":45}],52:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar Element = require(26);\nvar helpers = require(45);\nvar layouts = require(30);\n\nvar noop = helpers.noop;\n\ndefaults._set('global', {\n\ttitle: {\n\t\tdisplay: false,\n\t\tfontStyle: 'bold',\n\t\tfullWidth: true,\n\t\tlineHeight: 1.2,\n\t\tpadding: 10,\n\t\tposition: 'top',\n\t\ttext: '',\n\t\tweight: 2000         // by default greater than legend (1000) to be above\n\t}\n});\n\n/**\n * IMPORTANT: this class is exposed publicly as Chart.Legend, backward compatibility required!\n */\nvar Title = Element.extend({\n\tinitialize: function(config) {\n\t\tvar me = this;\n\t\thelpers.extend(me, config);\n\n\t\t// Contains hit boxes for each dataset (in dataset order)\n\t\tme.legendHitBoxes = [];\n\t},\n\n\t// These methods are ordered by lifecycle. Utilities then follow.\n\n\tbeforeUpdate: noop,\n\tupdate: function(maxWidth, maxHeight, margins) {\n\t\tvar me = this;\n\n\t\t// Update Lifecycle - Probably don't want to ever extend or overwrite this function ;)\n\t\tme.beforeUpdate();\n\n\t\t// Absorb the master measurements\n\t\tme.maxWidth = maxWidth;\n\t\tme.maxHeight = maxHeight;\n\t\tme.margins = margins;\n\n\t\t// Dimensions\n\t\tme.beforeSetDimensions();\n\t\tme.setDimensions();\n\t\tme.afterSetDimensions();\n\t\t// Labels\n\t\tme.beforeBuildLabels();\n\t\tme.buildLabels();\n\t\tme.afterBuildLabels();\n\n\t\t// Fit\n\t\tme.beforeFit();\n\t\tme.fit();\n\t\tme.afterFit();\n\t\t//\n\t\tme.afterUpdate();\n\n\t\treturn me.minSize;\n\n\t},\n\tafterUpdate: noop,\n\n\t//\n\n\tbeforeSetDimensions: noop,\n\tsetDimensions: function() {\n\t\tvar me = this;\n\t\t// Set the unconstrained dimension before label rotation\n\t\tif (me.isHorizontal()) {\n\t\t\t// Reset position before calculating rotation\n\t\t\tme.width = me.maxWidth;\n\t\t\tme.left = 0;\n\t\t\tme.right = me.width;\n\t\t} else {\n\t\t\tme.height = me.maxHeight;\n\n\t\t\t// Reset position before calculating rotation\n\t\t\tme.top = 0;\n\t\t\tme.bottom = me.height;\n\t\t}\n\n\t\t// Reset padding\n\t\tme.paddingLeft = 0;\n\t\tme.paddingTop = 0;\n\t\tme.paddingRight = 0;\n\t\tme.paddingBottom = 0;\n\n\t\t// Reset minSize\n\t\tme.minSize = {\n\t\t\twidth: 0,\n\t\t\theight: 0\n\t\t};\n\t},\n\tafterSetDimensions: noop,\n\n\t//\n\n\tbeforeBuildLabels: noop,\n\tbuildLabels: noop,\n\tafterBuildLabels: noop,\n\n\t//\n\n\tbeforeFit: noop,\n\tfit: function() {\n\t\tvar me = this;\n\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\tvar opts = me.options;\n\t\tvar display = opts.display;\n\t\tvar fontSize = valueOrDefault(opts.fontSize, defaults.global.defaultFontSize);\n\t\tvar minSize = me.minSize;\n\t\tvar lineCount = helpers.isArray(opts.text) ? opts.text.length : 1;\n\t\tvar lineHeight = helpers.options.toLineHeight(opts.lineHeight, fontSize);\n\t\tvar textSize = display ? (lineCount * lineHeight) + (opts.padding * 2) : 0;\n\n\t\tif (me.isHorizontal()) {\n\t\t\tminSize.width = me.maxWidth; // fill all the width\n\t\t\tminSize.height = textSize;\n\t\t} else {\n\t\t\tminSize.width = textSize;\n\t\t\tminSize.height = me.maxHeight; // fill all the height\n\t\t}\n\n\t\tme.width = minSize.width;\n\t\tme.height = minSize.height;\n\n\t},\n\tafterFit: noop,\n\n\t// Shared Methods\n\tisHorizontal: function() {\n\t\tvar pos = this.options.position;\n\t\treturn pos === 'top' || pos === 'bottom';\n\t},\n\n\t// Actually draw the title block on the canvas\n\tdraw: function() {\n\t\tvar me = this;\n\t\tvar ctx = me.ctx;\n\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\tvar opts = me.options;\n\t\tvar globalDefaults = defaults.global;\n\n\t\tif (opts.display) {\n\t\t\tvar fontSize = valueOrDefault(opts.fontSize, globalDefaults.defaultFontSize);\n\t\t\tvar fontStyle = valueOrDefault(opts.fontStyle, globalDefaults.defaultFontStyle);\n\t\t\tvar fontFamily = valueOrDefault(opts.fontFamily, globalDefaults.defaultFontFamily);\n\t\t\tvar titleFont = helpers.fontString(fontSize, fontStyle, fontFamily);\n\t\t\tvar lineHeight = helpers.options.toLineHeight(opts.lineHeight, fontSize);\n\t\t\tvar offset = lineHeight / 2 + opts.padding;\n\t\t\tvar rotation = 0;\n\t\t\tvar top = me.top;\n\t\t\tvar left = me.left;\n\t\t\tvar bottom = me.bottom;\n\t\t\tvar right = me.right;\n\t\t\tvar maxWidth, titleX, titleY;\n\n\t\t\tctx.fillStyle = valueOrDefault(opts.fontColor, globalDefaults.defaultFontColor); // render in correct colour\n\t\t\tctx.font = titleFont;\n\n\t\t\t// Horizontal\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\ttitleX = left + ((right - left) / 2); // midpoint of the width\n\t\t\t\ttitleY = top + offset;\n\t\t\t\tmaxWidth = right - left;\n\t\t\t} else {\n\t\t\t\ttitleX = opts.position === 'left' ? left + offset : right - offset;\n\t\t\t\ttitleY = top + ((bottom - top) / 2);\n\t\t\t\tmaxWidth = bottom - top;\n\t\t\t\trotation = Math.PI * (opts.position === 'left' ? -0.5 : 0.5);\n\t\t\t}\n\n\t\t\tctx.save();\n\t\t\tctx.translate(titleX, titleY);\n\t\t\tctx.rotate(rotation);\n\t\t\tctx.textAlign = 'center';\n\t\t\tctx.textBaseline = 'middle';\n\n\t\t\tvar text = opts.text;\n\t\t\tif (helpers.isArray(text)) {\n\t\t\t\tvar y = 0;\n\t\t\t\tfor (var i = 0; i < text.length; ++i) {\n\t\t\t\t\tctx.fillText(text[i], 0, y, maxWidth);\n\t\t\t\t\ty += lineHeight;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tctx.fillText(text, 0, 0, maxWidth);\n\t\t\t}\n\n\t\t\tctx.restore();\n\t\t}\n\t}\n});\n\nfunction createNewTitleBlockAndAttach(chart, titleOpts) {\n\tvar title = new Title({\n\t\tctx: chart.ctx,\n\t\toptions: titleOpts,\n\t\tchart: chart\n\t});\n\n\tlayouts.configure(chart, title, titleOpts);\n\tlayouts.addBox(chart, title);\n\tchart.titleBlock = title;\n}\n\nmodule.exports = {\n\tid: 'title',\n\n\t/**\n\t * Backward compatibility: since 2.1.5, the title is registered as a plugin, making\n\t * Chart.Title obsolete. To avoid a breaking change, we export the Title as part of\n\t * the plugin, which one will be re-exposed in the chart.js file.\n\t * https://github.com/chartjs/Chart.js/pull/2640\n\t * @private\n\t */\n\t_element: Title,\n\n\tbeforeInit: function(chart) {\n\t\tvar titleOpts = chart.options.title;\n\n\t\tif (titleOpts) {\n\t\t\tcreateNewTitleBlockAndAttach(chart, titleOpts);\n\t\t}\n\t},\n\n\tbeforeUpdate: function(chart) {\n\t\tvar titleOpts = chart.options.title;\n\t\tvar titleBlock = chart.titleBlock;\n\n\t\tif (titleOpts) {\n\t\t\thelpers.mergeIf(titleOpts, defaults.global.title);\n\n\t\t\tif (titleBlock) {\n\t\t\t\tlayouts.configure(chart, titleBlock, titleOpts);\n\t\t\t\ttitleBlock.options = titleOpts;\n\t\t\t} else {\n\t\t\t\tcreateNewTitleBlockAndAttach(chart, titleOpts);\n\t\t\t}\n\t\t} else if (titleBlock) {\n\t\t\tlayouts.removeBox(chart, titleBlock);\n\t\t\tdelete chart.titleBlock;\n\t\t}\n\t}\n};\n\n},{\"25\":25,\"26\":26,\"30\":30,\"45\":45}],53:[function(require,module,exports){\n'use strict';\n\nmodule.exports = function(Chart) {\n\n\t// Default config for a category scale\n\tvar defaultConfig = {\n\t\tposition: 'bottom'\n\t};\n\n\tvar DatasetScale = Chart.Scale.extend({\n\t\t/**\n\t\t* Internal function to get the correct labels. If data.xLabels or data.yLabels are defined, use those\n\t\t* else fall back to data.labels\n\t\t* @private\n\t\t*/\n\t\tgetLabels: function() {\n\t\t\tvar data = this.chart.data;\n\t\t\treturn this.options.labels || (this.isHorizontal() ? data.xLabels : data.yLabels) || data.labels;\n\t\t},\n\n\t\tdetermineDataLimits: function() {\n\t\t\tvar me = this;\n\t\t\tvar labels = me.getLabels();\n\t\t\tme.minIndex = 0;\n\t\t\tme.maxIndex = labels.length - 1;\n\t\t\tvar findIndex;\n\n\t\t\tif (me.options.ticks.min !== undefined) {\n\t\t\t\t// user specified min value\n\t\t\t\tfindIndex = labels.indexOf(me.options.ticks.min);\n\t\t\t\tme.minIndex = findIndex !== -1 ? findIndex : me.minIndex;\n\t\t\t}\n\n\t\t\tif (me.options.ticks.max !== undefined) {\n\t\t\t\t// user specified max value\n\t\t\t\tfindIndex = labels.indexOf(me.options.ticks.max);\n\t\t\t\tme.maxIndex = findIndex !== -1 ? findIndex : me.maxIndex;\n\t\t\t}\n\n\t\t\tme.min = labels[me.minIndex];\n\t\t\tme.max = labels[me.maxIndex];\n\t\t},\n\n\t\tbuildTicks: function() {\n\t\t\tvar me = this;\n\t\t\tvar labels = me.getLabels();\n\t\t\t// If we are viewing some subset of labels, slice the original array\n\t\t\tme.ticks = (me.minIndex === 0 && me.maxIndex === labels.length - 1) ? labels : labels.slice(me.minIndex, me.maxIndex + 1);\n\t\t},\n\n\t\tgetLabelForIndex: function(index, datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tvar data = me.chart.data;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\n\t\t\tif (data.yLabels && !isHorizontal) {\n\t\t\t\treturn me.getRightValue(data.datasets[datasetIndex].data[index]);\n\t\t\t}\n\t\t\treturn me.ticks[index - me.minIndex];\n\t\t},\n\n\t\t// Used to get data value locations.  Value can either be an index or a numerical value\n\t\tgetPixelForValue: function(value, index) {\n\t\t\tvar me = this;\n\t\t\tvar offset = me.options.offset;\n\t\t\t// 1 is added because we need the length but we have the indexes\n\t\t\tvar offsetAmt = Math.max((me.maxIndex + 1 - me.minIndex - (offset ? 0 : 1)), 1);\n\n\t\t\t// If value is a data object, then index is the index in the data array,\n\t\t\t// not the index of the scale. We need to change that.\n\t\t\tvar valueCategory;\n\t\t\tif (value !== undefined && value !== null) {\n\t\t\t\tvalueCategory = me.isHorizontal() ? value.x : value.y;\n\t\t\t}\n\t\t\tif (valueCategory !== undefined || (value !== undefined && isNaN(index))) {\n\t\t\t\tvar labels = me.getLabels();\n\t\t\t\tvalue = valueCategory || value;\n\t\t\t\tvar idx = labels.indexOf(value);\n\t\t\t\tindex = idx !== -1 ? idx : index;\n\t\t\t}\n\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tvar valueWidth = me.width / offsetAmt;\n\t\t\t\tvar widthOffset = (valueWidth * (index - me.minIndex));\n\n\t\t\t\tif (offset) {\n\t\t\t\t\twidthOffset += (valueWidth / 2);\n\t\t\t\t}\n\n\t\t\t\treturn me.left + Math.round(widthOffset);\n\t\t\t}\n\t\t\tvar valueHeight = me.height / offsetAmt;\n\t\t\tvar heightOffset = (valueHeight * (index - me.minIndex));\n\n\t\t\tif (offset) {\n\t\t\t\theightOffset += (valueHeight / 2);\n\t\t\t}\n\n\t\t\treturn me.top + Math.round(heightOffset);\n\t\t},\n\t\tgetPixelForTick: function(index) {\n\t\t\treturn this.getPixelForValue(this.ticks[index], index + this.minIndex, null);\n\t\t},\n\t\tgetValueForPixel: function(pixel) {\n\t\t\tvar me = this;\n\t\t\tvar offset = me.options.offset;\n\t\t\tvar value;\n\t\t\tvar offsetAmt = Math.max((me._ticks.length - (offset ? 0 : 1)), 1);\n\t\t\tvar horz = me.isHorizontal();\n\t\t\tvar valueDimension = (horz ? me.width : me.height) / offsetAmt;\n\n\t\t\tpixel -= horz ? me.left : me.top;\n\n\t\t\tif (offset) {\n\t\t\t\tpixel -= (valueDimension / 2);\n\t\t\t}\n\n\t\t\tif (pixel <= 0) {\n\t\t\t\tvalue = 0;\n\t\t\t} else {\n\t\t\t\tvalue = Math.round(pixel / valueDimension);\n\t\t\t}\n\n\t\t\treturn value + me.minIndex;\n\t\t},\n\t\tgetBasePixel: function() {\n\t\t\treturn this.bottom;\n\t\t}\n\t});\n\n\tChart.scaleService.registerScaleType('category', DatasetScale, defaultConfig);\n\n};\n\n},{}],54:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar helpers = require(45);\nvar Ticks = require(34);\n\nmodule.exports = function(Chart) {\n\n\tvar defaultConfig = {\n\t\tposition: 'left',\n\t\tticks: {\n\t\t\tcallback: Ticks.formatters.linear\n\t\t}\n\t};\n\n\tvar LinearScale = Chart.LinearScaleBase.extend({\n\n\t\tdetermineDataLimits: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar chart = me.chart;\n\t\t\tvar data = chart.data;\n\t\t\tvar datasets = data.datasets;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\t\t\tvar DEFAULT_MIN = 0;\n\t\t\tvar DEFAULT_MAX = 1;\n\n\t\t\tfunction IDMatches(meta) {\n\t\t\t\treturn isHorizontal ? meta.xAxisID === me.id : meta.yAxisID === me.id;\n\t\t\t}\n\n\t\t\t// First Calculate the range\n\t\t\tme.min = null;\n\t\t\tme.max = null;\n\n\t\t\tvar hasStacks = opts.stacked;\n\t\t\tif (hasStacks === undefined) {\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tif (hasStacks) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta) &&\n\t\t\t\t\t\tmeta.stack !== undefined) {\n\t\t\t\t\t\thasStacks = true;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tif (opts.stacked || hasStacks) {\n\t\t\t\tvar valuesPerStack = {};\n\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tvar key = [\n\t\t\t\t\t\tmeta.type,\n\t\t\t\t\t\t// we have a separate stack for stack=undefined datasets when the opts.stacked is undefined\n\t\t\t\t\t\t((opts.stacked === undefined && meta.stack === undefined) ? datasetIndex : ''),\n\t\t\t\t\t\tmeta.stack\n\t\t\t\t\t].join('.');\n\n\t\t\t\t\tif (valuesPerStack[key] === undefined) {\n\t\t\t\t\t\tvaluesPerStack[key] = {\n\t\t\t\t\t\t\tpositiveValues: [],\n\t\t\t\t\t\t\tnegativeValues: []\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store these per type\n\t\t\t\t\tvar positiveValues = valuesPerStack[key].positiveValues;\n\t\t\t\t\tvar negativeValues = valuesPerStack[key].negativeValues;\n\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {\n\t\t\t\t\t\thelpers.each(dataset.data, function(rawValue, index) {\n\t\t\t\t\t\t\tvar value = +me.getRightValue(rawValue);\n\t\t\t\t\t\t\tif (isNaN(value) || meta.data[index].hidden) {\n\t\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tpositiveValues[index] = positiveValues[index] || 0;\n\t\t\t\t\t\t\tnegativeValues[index] = negativeValues[index] || 0;\n\n\t\t\t\t\t\t\tif (opts.relativePoints) {\n\t\t\t\t\t\t\t\tpositiveValues[index] = 100;\n\t\t\t\t\t\t\t} else if (value < 0) {\n\t\t\t\t\t\t\t\tnegativeValues[index] += value;\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tpositiveValues[index] += value;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\thelpers.each(valuesPerStack, function(valuesForType) {\n\t\t\t\t\tvar values = valuesForType.positiveValues.concat(valuesForType.negativeValues);\n\t\t\t\t\tvar minVal = helpers.min(values);\n\t\t\t\t\tvar maxVal = helpers.max(values);\n\t\t\t\t\tme.min = me.min === null ? minVal : Math.min(me.min, minVal);\n\t\t\t\t\tme.max = me.max === null ? maxVal : Math.max(me.max, maxVal);\n\t\t\t\t});\n\n\t\t\t} else {\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {\n\t\t\t\t\t\thelpers.each(dataset.data, function(rawValue, index) {\n\t\t\t\t\t\t\tvar value = +me.getRightValue(rawValue);\n\t\t\t\t\t\t\tif (isNaN(value) || meta.data[index].hidden) {\n\t\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (me.min === null) {\n\t\t\t\t\t\t\t\tme.min = value;\n\t\t\t\t\t\t\t} else if (value < me.min) {\n\t\t\t\t\t\t\t\tme.min = value;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (me.max === null) {\n\t\t\t\t\t\t\t\tme.max = value;\n\t\t\t\t\t\t\t} else if (value > me.max) {\n\t\t\t\t\t\t\t\tme.max = value;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tme.min = isFinite(me.min) && !isNaN(me.min) ? me.min : DEFAULT_MIN;\n\t\t\tme.max = isFinite(me.max) && !isNaN(me.max) ? me.max : DEFAULT_MAX;\n\n\t\t\t// Common base implementation to handle ticks.min, ticks.max, ticks.beginAtZero\n\t\t\tthis.handleTickRangeOptions();\n\t\t},\n\t\tgetTickLimit: function() {\n\t\t\tvar maxTicks;\n\t\t\tvar me = this;\n\t\t\tvar tickOpts = me.options.ticks;\n\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tmaxTicks = Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(me.width / 50));\n\t\t\t} else {\n\t\t\t\t// The factor of 2 used to scale the font size has been experimentally determined.\n\t\t\t\tvar tickFontSize = helpers.valueOrDefault(tickOpts.fontSize, defaults.global.defaultFontSize);\n\t\t\t\tmaxTicks = Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(me.height / (2 * tickFontSize)));\n\t\t\t}\n\n\t\t\treturn maxTicks;\n\t\t},\n\t\t// Called after the ticks are built. We need\n\t\thandleDirectionalChanges: function() {\n\t\t\tif (!this.isHorizontal()) {\n\t\t\t\t// We are in a vertical orientation. The top value is the highest. So reverse the array\n\t\t\t\tthis.ticks.reverse();\n\t\t\t}\n\t\t},\n\t\tgetLabelForIndex: function(index, datasetIndex) {\n\t\t\treturn +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);\n\t\t},\n\t\t// Utils\n\t\tgetPixelForValue: function(value) {\n\t\t\t// This must be called after fit has been run so that\n\t\t\t// this.left, this.top, this.right, and this.bottom have been defined\n\t\t\tvar me = this;\n\t\t\tvar start = me.start;\n\n\t\t\tvar rightValue = +me.getRightValue(value);\n\t\t\tvar pixel;\n\t\t\tvar range = me.end - start;\n\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tpixel = me.left + (me.width / range * (rightValue - start));\n\t\t\t} else {\n\t\t\t\tpixel = me.bottom - (me.height / range * (rightValue - start));\n\t\t\t}\n\t\t\treturn pixel;\n\t\t},\n\t\tgetValueForPixel: function(pixel) {\n\t\t\tvar me = this;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\t\t\tvar innerDimension = isHorizontal ? me.width : me.height;\n\t\t\tvar offset = (isHorizontal ? pixel - me.left : me.bottom - pixel) / innerDimension;\n\t\t\treturn me.start + ((me.end - me.start) * offset);\n\t\t},\n\t\tgetPixelForTick: function(index) {\n\t\t\treturn this.getPixelForValue(this.ticksAsNumbers[index]);\n\t\t}\n\t});\n\tChart.scaleService.registerScaleType('linear', LinearScale, defaultConfig);\n\n};\n\n},{\"25\":25,\"34\":34,\"45\":45}],55:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\n\n/**\n * Generate a set of linear ticks\n * @param generationOptions the options used to generate the ticks\n * @param dataRange the range of the data\n * @returns {Array<Number>} array of tick values\n */\nfunction generateTicks(generationOptions, dataRange) {\n\tvar ticks = [];\n\t// To get a \"nice\" value for the tick spacing, we will use the appropriately named\n\t// \"nice number\" algorithm. See http://stackoverflow.com/questions/8506881/nice-label-algorithm-for-charts-with-minimum-ticks\n\t// for details.\n\n\tvar spacing;\n\tif (generationOptions.stepSize && generationOptions.stepSize > 0) {\n\t\tspacing = generationOptions.stepSize;\n\t} else {\n\t\tvar niceRange = helpers.niceNum(dataRange.max - dataRange.min, false);\n\t\tspacing = helpers.niceNum(niceRange / (generationOptions.maxTicks - 1), true);\n\t}\n\tvar niceMin = Math.floor(dataRange.min / spacing) * spacing;\n\tvar niceMax = Math.ceil(dataRange.max / spacing) * spacing;\n\n\t// If min, max and stepSize is set and they make an evenly spaced scale use it.\n\tif (generationOptions.min && generationOptions.max && generationOptions.stepSize) {\n\t\t// If very close to our whole number, use it.\n\t\tif (helpers.almostWhole((generationOptions.max - generationOptions.min) / generationOptions.stepSize, spacing / 1000)) {\n\t\t\tniceMin = generationOptions.min;\n\t\t\tniceMax = generationOptions.max;\n\t\t}\n\t}\n\n\tvar numSpaces = (niceMax - niceMin) / spacing;\n\t// If very close to our rounded value, use it.\n\tif (helpers.almostEquals(numSpaces, Math.round(numSpaces), spacing / 1000)) {\n\t\tnumSpaces = Math.round(numSpaces);\n\t} else {\n\t\tnumSpaces = Math.ceil(numSpaces);\n\t}\n\n\tvar precision = 1;\n\tif (spacing < 1) {\n\t\tprecision = Math.pow(10, spacing.toString().length - 2);\n\t\tniceMin = Math.round(niceMin * precision) / precision;\n\t\tniceMax = Math.round(niceMax * precision) / precision;\n\t}\n\tticks.push(generationOptions.min !== undefined ? generationOptions.min : niceMin);\n\tfor (var j = 1; j < numSpaces; ++j) {\n\t\tticks.push(Math.round((niceMin + j * spacing) * precision) / precision);\n\t}\n\tticks.push(generationOptions.max !== undefined ? generationOptions.max : niceMax);\n\n\treturn ticks;\n}\n\n\nmodule.exports = function(Chart) {\n\n\tvar noop = helpers.noop;\n\n\tChart.LinearScaleBase = Chart.Scale.extend({\n\t\tgetRightValue: function(value) {\n\t\t\tif (typeof value === 'string') {\n\t\t\t\treturn +value;\n\t\t\t}\n\t\t\treturn Chart.Scale.prototype.getRightValue.call(this, value);\n\t\t},\n\n\t\thandleTickRangeOptions: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\n\t\t\t// If we are forcing it to begin at 0, but 0 will already be rendered on the chart,\n\t\t\t// do nothing since that would make the chart weird. If the user really wants a weird chart\n\t\t\t// axis, they can manually override it\n\t\t\tif (tickOpts.beginAtZero) {\n\t\t\t\tvar minSign = helpers.sign(me.min);\n\t\t\t\tvar maxSign = helpers.sign(me.max);\n\n\t\t\t\tif (minSign < 0 && maxSign < 0) {\n\t\t\t\t\t// move the top up to 0\n\t\t\t\t\tme.max = 0;\n\t\t\t\t} else if (minSign > 0 && maxSign > 0) {\n\t\t\t\t\t// move the bottom down to 0\n\t\t\t\t\tme.min = 0;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar setMin = tickOpts.min !== undefined || tickOpts.suggestedMin !== undefined;\n\t\t\tvar setMax = tickOpts.max !== undefined || tickOpts.suggestedMax !== undefined;\n\n\t\t\tif (tickOpts.min !== undefined) {\n\t\t\t\tme.min = tickOpts.min;\n\t\t\t} else if (tickOpts.suggestedMin !== undefined) {\n\t\t\t\tif (me.min === null) {\n\t\t\t\t\tme.min = tickOpts.suggestedMin;\n\t\t\t\t} else {\n\t\t\t\t\tme.min = Math.min(me.min, tickOpts.suggestedMin);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (tickOpts.max !== undefined) {\n\t\t\t\tme.max = tickOpts.max;\n\t\t\t} else if (tickOpts.suggestedMax !== undefined) {\n\t\t\t\tif (me.max === null) {\n\t\t\t\t\tme.max = tickOpts.suggestedMax;\n\t\t\t\t} else {\n\t\t\t\t\tme.max = Math.max(me.max, tickOpts.suggestedMax);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (setMin !== setMax) {\n\t\t\t\t// We set the min or the max but not both.\n\t\t\t\t// So ensure that our range is good\n\t\t\t\t// Inverted or 0 length range can happen when\n\t\t\t\t// ticks.min is set, and no datasets are visible\n\t\t\t\tif (me.min >= me.max) {\n\t\t\t\t\tif (setMin) {\n\t\t\t\t\t\tme.max = me.min + 1;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tme.min = me.max - 1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (me.min === me.max) {\n\t\t\t\tme.max++;\n\n\t\t\t\tif (!tickOpts.beginAtZero) {\n\t\t\t\t\tme.min--;\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tgetTickLimit: noop,\n\t\thandleDirectionalChanges: noop,\n\n\t\tbuildTicks: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\n\t\t\t// Figure out what the max number of ticks we can support it is based on the size of\n\t\t\t// the axis area. For now, we say that the minimum tick spacing in pixels must be 50\n\t\t\t// We also limit the maximum number of ticks to 11 which gives a nice 10 squares on\n\t\t\t// the graph. Make sure we always have at least 2 ticks\n\t\t\tvar maxTicks = me.getTickLimit();\n\t\t\tmaxTicks = Math.max(2, maxTicks);\n\n\t\t\tvar numericGeneratorOptions = {\n\t\t\t\tmaxTicks: maxTicks,\n\t\t\t\tmin: tickOpts.min,\n\t\t\t\tmax: tickOpts.max,\n\t\t\t\tstepSize: helpers.valueOrDefault(tickOpts.fixedStepSize, tickOpts.stepSize)\n\t\t\t};\n\t\t\tvar ticks = me.ticks = generateTicks(numericGeneratorOptions, me);\n\n\t\t\tme.handleDirectionalChanges();\n\n\t\t\t// At this point, we need to update our max and min given the tick values since we have expanded the\n\t\t\t// range of the scale\n\t\t\tme.max = helpers.max(ticks);\n\t\t\tme.min = helpers.min(ticks);\n\n\t\t\tif (tickOpts.reverse) {\n\t\t\t\tticks.reverse();\n\n\t\t\t\tme.start = me.max;\n\t\t\t\tme.end = me.min;\n\t\t\t} else {\n\t\t\t\tme.start = me.min;\n\t\t\t\tme.end = me.max;\n\t\t\t}\n\t\t},\n\t\tconvertTicksToLabels: function() {\n\t\t\tvar me = this;\n\t\t\tme.ticksAsNumbers = me.ticks.slice();\n\t\t\tme.zeroLineIndex = me.ticks.indexOf(0);\n\n\t\t\tChart.Scale.prototype.convertTicksToLabels.call(me);\n\t\t}\n\t});\n};\n\n},{\"45\":45}],56:[function(require,module,exports){\n'use strict';\n\nvar helpers = require(45);\nvar Ticks = require(34);\n\n/**\n * Generate a set of logarithmic ticks\n * @param generationOptions the options used to generate the ticks\n * @param dataRange the range of the data\n * @returns {Array<Number>} array of tick values\n */\nfunction generateTicks(generationOptions, dataRange) {\n\tvar ticks = [];\n\tvar valueOrDefault = helpers.valueOrDefault;\n\n\t// Figure out what the max number of ticks we can support it is based on the size of\n\t// the axis area. For now, we say that the minimum tick spacing in pixels must be 50\n\t// We also limit the maximum number of ticks to 11 which gives a nice 10 squares on\n\t// the graph\n\tvar tickVal = valueOrDefault(generationOptions.min, Math.pow(10, Math.floor(helpers.log10(dataRange.min))));\n\n\tvar endExp = Math.floor(helpers.log10(dataRange.max));\n\tvar endSignificand = Math.ceil(dataRange.max / Math.pow(10, endExp));\n\tvar exp, significand;\n\n\tif (tickVal === 0) {\n\t\texp = Math.floor(helpers.log10(dataRange.minNotZero));\n\t\tsignificand = Math.floor(dataRange.minNotZero / Math.pow(10, exp));\n\n\t\tticks.push(tickVal);\n\t\ttickVal = significand * Math.pow(10, exp);\n\t} else {\n\t\texp = Math.floor(helpers.log10(tickVal));\n\t\tsignificand = Math.floor(tickVal / Math.pow(10, exp));\n\t}\n\tvar precision = exp < 0 ? Math.pow(10, Math.abs(exp)) : 1;\n\n\tdo {\n\t\tticks.push(tickVal);\n\n\t\t++significand;\n\t\tif (significand === 10) {\n\t\t\tsignificand = 1;\n\t\t\t++exp;\n\t\t\tprecision = exp >= 0 ? 1 : precision;\n\t\t}\n\n\t\ttickVal = Math.round(significand * Math.pow(10, exp) * precision) / precision;\n\t} while (exp < endExp || (exp === endExp && significand < endSignificand));\n\n\tvar lastTick = valueOrDefault(generationOptions.max, tickVal);\n\tticks.push(lastTick);\n\n\treturn ticks;\n}\n\n\nmodule.exports = function(Chart) {\n\n\tvar defaultConfig = {\n\t\tposition: 'left',\n\n\t\t// label settings\n\t\tticks: {\n\t\t\tcallback: Ticks.formatters.logarithmic\n\t\t}\n\t};\n\n\tvar LogarithmicScale = Chart.Scale.extend({\n\t\tdetermineDataLimits: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar chart = me.chart;\n\t\t\tvar data = chart.data;\n\t\t\tvar datasets = data.datasets;\n\t\t\tvar isHorizontal = me.isHorizontal();\n\t\t\tfunction IDMatches(meta) {\n\t\t\t\treturn isHorizontal ? meta.xAxisID === me.id : meta.yAxisID === me.id;\n\t\t\t}\n\n\t\t\t// Calculate Range\n\t\t\tme.min = null;\n\t\t\tme.max = null;\n\t\t\tme.minNotZero = null;\n\n\t\t\tvar hasStacks = opts.stacked;\n\t\t\tif (hasStacks === undefined) {\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tif (hasStacks) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta) &&\n\t\t\t\t\t\tmeta.stack !== undefined) {\n\t\t\t\t\t\thasStacks = true;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tif (opts.stacked || hasStacks) {\n\t\t\t\tvar valuesPerStack = {};\n\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tvar key = [\n\t\t\t\t\t\tmeta.type,\n\t\t\t\t\t\t// we have a separate stack for stack=undefined datasets when the opts.stacked is undefined\n\t\t\t\t\t\t((opts.stacked === undefined && meta.stack === undefined) ? datasetIndex : ''),\n\t\t\t\t\t\tmeta.stack\n\t\t\t\t\t].join('.');\n\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {\n\t\t\t\t\t\tif (valuesPerStack[key] === undefined) {\n\t\t\t\t\t\t\tvaluesPerStack[key] = [];\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thelpers.each(dataset.data, function(rawValue, index) {\n\t\t\t\t\t\t\tvar values = valuesPerStack[key];\n\t\t\t\t\t\t\tvar value = +me.getRightValue(rawValue);\n\t\t\t\t\t\t\t// invalid, hidden and negative values are ignored\n\t\t\t\t\t\t\tif (isNaN(value) || meta.data[index].hidden || value < 0) {\n\t\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvalues[index] = values[index] || 0;\n\t\t\t\t\t\t\tvalues[index] += value;\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\thelpers.each(valuesPerStack, function(valuesForType) {\n\t\t\t\t\tif (valuesForType.length > 0) {\n\t\t\t\t\t\tvar minVal = helpers.min(valuesForType);\n\t\t\t\t\t\tvar maxVal = helpers.max(valuesForType);\n\t\t\t\t\t\tme.min = me.min === null ? minVal : Math.min(me.min, minVal);\n\t\t\t\t\t\tme.max = me.max === null ? maxVal : Math.max(me.max, maxVal);\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t} else {\n\t\t\t\thelpers.each(datasets, function(dataset, datasetIndex) {\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\t\t\t\t\tif (chart.isDatasetVisible(datasetIndex) && IDMatches(meta)) {\n\t\t\t\t\t\thelpers.each(dataset.data, function(rawValue, index) {\n\t\t\t\t\t\t\tvar value = +me.getRightValue(rawValue);\n\t\t\t\t\t\t\t// invalid, hidden and negative values are ignored\n\t\t\t\t\t\t\tif (isNaN(value) || meta.data[index].hidden || value < 0) {\n\t\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (me.min === null) {\n\t\t\t\t\t\t\t\tme.min = value;\n\t\t\t\t\t\t\t} else if (value < me.min) {\n\t\t\t\t\t\t\t\tme.min = value;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (me.max === null) {\n\t\t\t\t\t\t\t\tme.max = value;\n\t\t\t\t\t\t\t} else if (value > me.max) {\n\t\t\t\t\t\t\t\tme.max = value;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (value !== 0 && (me.minNotZero === null || value < me.minNotZero)) {\n\t\t\t\t\t\t\t\tme.minNotZero = value;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Common base implementation to handle ticks.min, ticks.max\n\t\t\tthis.handleTickRangeOptions();\n\t\t},\n\t\thandleTickRangeOptions: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\t\t\tvar valueOrDefault = helpers.valueOrDefault;\n\t\t\tvar DEFAULT_MIN = 1;\n\t\t\tvar DEFAULT_MAX = 10;\n\n\t\t\tme.min = valueOrDefault(tickOpts.min, me.min);\n\t\t\tme.max = valueOrDefault(tickOpts.max, me.max);\n\n\t\t\tif (me.min === me.max) {\n\t\t\t\tif (me.min !== 0 && me.min !== null) {\n\t\t\t\t\tme.min = Math.pow(10, Math.floor(helpers.log10(me.min)) - 1);\n\t\t\t\t\tme.max = Math.pow(10, Math.floor(helpers.log10(me.max)) + 1);\n\t\t\t\t} else {\n\t\t\t\t\tme.min = DEFAULT_MIN;\n\t\t\t\t\tme.max = DEFAULT_MAX;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (me.min === null) {\n\t\t\t\tme.min = Math.pow(10, Math.floor(helpers.log10(me.max)) - 1);\n\t\t\t}\n\t\t\tif (me.max === null) {\n\t\t\t\tme.max = me.min !== 0\n\t\t\t\t\t? Math.pow(10, Math.floor(helpers.log10(me.min)) + 1)\n\t\t\t\t\t: DEFAULT_MAX;\n\t\t\t}\n\t\t\tif (me.minNotZero === null) {\n\t\t\t\tif (me.min > 0) {\n\t\t\t\t\tme.minNotZero = me.min;\n\t\t\t\t} else if (me.max < 1) {\n\t\t\t\t\tme.minNotZero = Math.pow(10, Math.floor(helpers.log10(me.max)));\n\t\t\t\t} else {\n\t\t\t\t\tme.minNotZero = DEFAULT_MIN;\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tbuildTicks: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\t\t\tvar reverse = !me.isHorizontal();\n\n\t\t\tvar generationOptions = {\n\t\t\t\tmin: tickOpts.min,\n\t\t\t\tmax: tickOpts.max\n\t\t\t};\n\t\t\tvar ticks = me.ticks = generateTicks(generationOptions, me);\n\n\t\t\t// At this point, we need to update our max and min given the tick values since we have expanded the\n\t\t\t// range of the scale\n\t\t\tme.max = helpers.max(ticks);\n\t\t\tme.min = helpers.min(ticks);\n\n\t\t\tif (tickOpts.reverse) {\n\t\t\t\treverse = !reverse;\n\t\t\t\tme.start = me.max;\n\t\t\t\tme.end = me.min;\n\t\t\t} else {\n\t\t\t\tme.start = me.min;\n\t\t\t\tme.end = me.max;\n\t\t\t}\n\t\t\tif (reverse) {\n\t\t\t\tticks.reverse();\n\t\t\t}\n\t\t},\n\t\tconvertTicksToLabels: function() {\n\t\t\tthis.tickValues = this.ticks.slice();\n\n\t\t\tChart.Scale.prototype.convertTicksToLabels.call(this);\n\t\t},\n\t\t// Get the correct tooltip label\n\t\tgetLabelForIndex: function(index, datasetIndex) {\n\t\t\treturn +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);\n\t\t},\n\t\tgetPixelForTick: function(index) {\n\t\t\treturn this.getPixelForValue(this.tickValues[index]);\n\t\t},\n\t\t/**\n\t\t * Returns the value of the first tick.\n\t\t * @param {Number} value - The minimum not zero value.\n\t\t * @return {Number} The first tick value.\n\t\t * @private\n\t\t */\n\t\t_getFirstTickValue: function(value) {\n\t\t\tvar exp = Math.floor(helpers.log10(value));\n\t\t\tvar significand = Math.floor(value / Math.pow(10, exp));\n\n\t\t\treturn significand * Math.pow(10, exp);\n\t\t},\n\t\tgetPixelForValue: function(value) {\n\t\t\tvar me = this;\n\t\t\tvar reverse = me.options.ticks.reverse;\n\t\t\tvar log10 = helpers.log10;\n\t\t\tvar firstTickValue = me._getFirstTickValue(me.minNotZero);\n\t\t\tvar offset = 0;\n\t\t\tvar innerDimension, pixel, start, end, sign;\n\n\t\t\tvalue = +me.getRightValue(value);\n\t\t\tif (reverse) {\n\t\t\t\tstart = me.end;\n\t\t\t\tend = me.start;\n\t\t\t\tsign = -1;\n\t\t\t} else {\n\t\t\t\tstart = me.start;\n\t\t\t\tend = me.end;\n\t\t\t\tsign = 1;\n\t\t\t}\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tinnerDimension = me.width;\n\t\t\t\tpixel = reverse ? me.right : me.left;\n\t\t\t} else {\n\t\t\t\tinnerDimension = me.height;\n\t\t\t\tsign *= -1; // invert, since the upper-left corner of the canvas is at pixel (0, 0)\n\t\t\t\tpixel = reverse ? me.top : me.bottom;\n\t\t\t}\n\t\t\tif (value !== start) {\n\t\t\t\tif (start === 0) { // include zero tick\n\t\t\t\t\toffset = helpers.getValueOrDefault(\n\t\t\t\t\t\tme.options.ticks.fontSize,\n\t\t\t\t\t\tChart.defaults.global.defaultFontSize\n\t\t\t\t\t);\n\t\t\t\t\tinnerDimension -= offset;\n\t\t\t\t\tstart = firstTickValue;\n\t\t\t\t}\n\t\t\t\tif (value !== 0) {\n\t\t\t\t\toffset += innerDimension / (log10(end) - log10(start)) * (log10(value) - log10(start));\n\t\t\t\t}\n\t\t\t\tpixel += sign * offset;\n\t\t\t}\n\t\t\treturn pixel;\n\t\t},\n\t\tgetValueForPixel: function(pixel) {\n\t\t\tvar me = this;\n\t\t\tvar reverse = me.options.ticks.reverse;\n\t\t\tvar log10 = helpers.log10;\n\t\t\tvar firstTickValue = me._getFirstTickValue(me.minNotZero);\n\t\t\tvar innerDimension, start, end, value;\n\n\t\t\tif (reverse) {\n\t\t\t\tstart = me.end;\n\t\t\t\tend = me.start;\n\t\t\t} else {\n\t\t\t\tstart = me.start;\n\t\t\t\tend = me.end;\n\t\t\t}\n\t\t\tif (me.isHorizontal()) {\n\t\t\t\tinnerDimension = me.width;\n\t\t\t\tvalue = reverse ? me.right - pixel : pixel - me.left;\n\t\t\t} else {\n\t\t\t\tinnerDimension = me.height;\n\t\t\t\tvalue = reverse ? pixel - me.top : me.bottom - pixel;\n\t\t\t}\n\t\t\tif (value !== start) {\n\t\t\t\tif (start === 0) { // include zero tick\n\t\t\t\t\tvar offset = helpers.getValueOrDefault(\n\t\t\t\t\t\tme.options.ticks.fontSize,\n\t\t\t\t\t\tChart.defaults.global.defaultFontSize\n\t\t\t\t\t);\n\t\t\t\t\tvalue -= offset;\n\t\t\t\t\tinnerDimension -= offset;\n\t\t\t\t\tstart = firstTickValue;\n\t\t\t\t}\n\t\t\t\tvalue *= log10(end) - log10(start);\n\t\t\t\tvalue /= innerDimension;\n\t\t\t\tvalue = Math.pow(10, log10(start) + value);\n\t\t\t}\n\t\t\treturn value;\n\t\t}\n\t});\n\tChart.scaleService.registerScaleType('logarithmic', LogarithmicScale, defaultConfig);\n\n};\n\n},{\"34\":34,\"45\":45}],57:[function(require,module,exports){\n'use strict';\n\nvar defaults = require(25);\nvar helpers = require(45);\nvar Ticks = require(34);\n\nmodule.exports = function(Chart) {\n\n\tvar globalDefaults = defaults.global;\n\n\tvar defaultConfig = {\n\t\tdisplay: true,\n\n\t\t// Boolean - Whether to animate scaling the chart from the centre\n\t\tanimate: true,\n\t\tposition: 'chartArea',\n\n\t\tangleLines: {\n\t\t\tdisplay: true,\n\t\t\tcolor: 'rgba(0, 0, 0, 0.1)',\n\t\t\tlineWidth: 1\n\t\t},\n\n\t\tgridLines: {\n\t\t\tcircular: false\n\t\t},\n\n\t\t// label settings\n\t\tticks: {\n\t\t\t// Boolean - Show a backdrop to the scale label\n\t\t\tshowLabelBackdrop: true,\n\n\t\t\t// String - The colour of the label backdrop\n\t\t\tbackdropColor: 'rgba(255,255,255,0.75)',\n\n\t\t\t// Number - The backdrop padding above & below the label in pixels\n\t\t\tbackdropPaddingY: 2,\n\n\t\t\t// Number - The backdrop padding to the side of the label in pixels\n\t\t\tbackdropPaddingX: 2,\n\n\t\t\tcallback: Ticks.formatters.linear\n\t\t},\n\n\t\tpointLabels: {\n\t\t\t// Boolean - if true, show point labels\n\t\t\tdisplay: true,\n\n\t\t\t// Number - Point label font size in pixels\n\t\t\tfontSize: 10,\n\n\t\t\t// Function - Used to convert point labels\n\t\t\tcallback: function(label) {\n\t\t\t\treturn label;\n\t\t\t}\n\t\t}\n\t};\n\n\tfunction getValueCount(scale) {\n\t\tvar opts = scale.options;\n\t\treturn opts.angleLines.display || opts.pointLabels.display ? scale.chart.data.labels.length : 0;\n\t}\n\n\tfunction getPointLabelFontOptions(scale) {\n\t\tvar pointLabelOptions = scale.options.pointLabels;\n\t\tvar fontSize = helpers.valueOrDefault(pointLabelOptions.fontSize, globalDefaults.defaultFontSize);\n\t\tvar fontStyle = helpers.valueOrDefault(pointLabelOptions.fontStyle, globalDefaults.defaultFontStyle);\n\t\tvar fontFamily = helpers.valueOrDefault(pointLabelOptions.fontFamily, globalDefaults.defaultFontFamily);\n\t\tvar font = helpers.fontString(fontSize, fontStyle, fontFamily);\n\n\t\treturn {\n\t\t\tsize: fontSize,\n\t\t\tstyle: fontStyle,\n\t\t\tfamily: fontFamily,\n\t\t\tfont: font\n\t\t};\n\t}\n\n\tfunction measureLabelSize(ctx, fontSize, label) {\n\t\tif (helpers.isArray(label)) {\n\t\t\treturn {\n\t\t\t\tw: helpers.longestText(ctx, ctx.font, label),\n\t\t\t\th: (label.length * fontSize) + ((label.length - 1) * 1.5 * fontSize)\n\t\t\t};\n\t\t}\n\n\t\treturn {\n\t\t\tw: ctx.measureText(label).width,\n\t\t\th: fontSize\n\t\t};\n\t}\n\n\tfunction determineLimits(angle, pos, size, min, max) {\n\t\tif (angle === min || angle === max) {\n\t\t\treturn {\n\t\t\t\tstart: pos - (size / 2),\n\t\t\t\tend: pos + (size / 2)\n\t\t\t};\n\t\t} else if (angle < min || angle > max) {\n\t\t\treturn {\n\t\t\t\tstart: pos - size - 5,\n\t\t\t\tend: pos\n\t\t\t};\n\t\t}\n\n\t\treturn {\n\t\t\tstart: pos,\n\t\t\tend: pos + size + 5\n\t\t};\n\t}\n\n\t/**\n\t * Helper function to fit a radial linear scale with point labels\n\t */\n\tfunction fitWithPointLabels(scale) {\n\t\t/*\n\t\t * Right, this is really confusing and there is a lot of maths going on here\n\t\t * The gist of the problem is here: https://gist.github.com/nnnick/696cc9c55f4b0beb8fe9\n\t\t *\n\t\t * Reaction: https://dl.dropboxusercontent.com/u/34601363/toomuchscience.gif\n\t\t *\n\t\t * Solution:\n\t\t *\n\t\t * We assume the radius of the polygon is half the size of the canvas at first\n\t\t * at each index we check if the text overlaps.\n\t\t *\n\t\t * Where it does, we store that angle and that index.\n\t\t *\n\t\t * After finding the largest index and angle we calculate how much we need to remove\n\t\t * from the shape radius to move the point inwards by that x.\n\t\t *\n\t\t * We average the left and right distances to get the maximum shape radius that can fit in the box\n\t\t * along with labels.\n\t\t *\n\t\t * Once we have that, we can find the centre point for the chart, by taking the x text protrusion\n\t\t * on each side, removing that from the size, halving it and adding the left x protrusion width.\n\t\t *\n\t\t * This will mean we have a shape fitted to the canvas, as large as it can be with the labels\n\t\t * and position it in the most space efficient manner\n\t\t *\n\t\t * https://dl.dropboxusercontent.com/u/34601363/yeahscience.gif\n\t\t */\n\n\t\tvar plFont = getPointLabelFontOptions(scale);\n\n\t\t// Get maximum radius of the polygon. Either half the height (minus the text width) or half the width.\n\t\t// Use this to calculate the offset + change. - Make sure L/R protrusion is at least 0 to stop issues with centre points\n\t\tvar largestPossibleRadius = Math.min(scale.height / 2, scale.width / 2);\n\t\tvar furthestLimits = {\n\t\t\tr: scale.width,\n\t\t\tl: 0,\n\t\t\tt: scale.height,\n\t\t\tb: 0\n\t\t};\n\t\tvar furthestAngles = {};\n\t\tvar i, textSize, pointPosition;\n\n\t\tscale.ctx.font = plFont.font;\n\t\tscale._pointLabelSizes = [];\n\n\t\tvar valueCount = getValueCount(scale);\n\t\tfor (i = 0; i < valueCount; i++) {\n\t\t\tpointPosition = scale.getPointPosition(i, largestPossibleRadius);\n\t\t\ttextSize = measureLabelSize(scale.ctx, plFont.size, scale.pointLabels[i] || '');\n\t\t\tscale._pointLabelSizes[i] = textSize;\n\n\t\t\t// Add quarter circle to make degree 0 mean top of circle\n\t\t\tvar angleRadians = scale.getIndexAngle(i);\n\t\t\tvar angle = helpers.toDegrees(angleRadians) % 360;\n\t\t\tvar hLimits = determineLimits(angle, pointPosition.x, textSize.w, 0, 180);\n\t\t\tvar vLimits = determineLimits(angle, pointPosition.y, textSize.h, 90, 270);\n\n\t\t\tif (hLimits.start < furthestLimits.l) {\n\t\t\t\tfurthestLimits.l = hLimits.start;\n\t\t\t\tfurthestAngles.l = angleRadians;\n\t\t\t}\n\n\t\t\tif (hLimits.end > furthestLimits.r) {\n\t\t\t\tfurthestLimits.r = hLimits.end;\n\t\t\t\tfurthestAngles.r = angleRadians;\n\t\t\t}\n\n\t\t\tif (vLimits.start < furthestLimits.t) {\n\t\t\t\tfurthestLimits.t = vLimits.start;\n\t\t\t\tfurthestAngles.t = angleRadians;\n\t\t\t}\n\n\t\t\tif (vLimits.end > furthestLimits.b) {\n\t\t\t\tfurthestLimits.b = vLimits.end;\n\t\t\t\tfurthestAngles.b = angleRadians;\n\t\t\t}\n\t\t}\n\n\t\tscale.setReductions(largestPossibleRadius, furthestLimits, furthestAngles);\n\t}\n\n\t/**\n\t * Helper function to fit a radial linear scale with no point labels\n\t */\n\tfunction fit(scale) {\n\t\tvar largestPossibleRadius = Math.min(scale.height / 2, scale.width / 2);\n\t\tscale.drawingArea = Math.round(largestPossibleRadius);\n\t\tscale.setCenterPoint(0, 0, 0, 0);\n\t}\n\n\tfunction getTextAlignForAngle(angle) {\n\t\tif (angle === 0 || angle === 180) {\n\t\t\treturn 'center';\n\t\t} else if (angle < 180) {\n\t\t\treturn 'left';\n\t\t}\n\n\t\treturn 'right';\n\t}\n\n\tfunction fillText(ctx, text, position, fontSize) {\n\t\tif (helpers.isArray(text)) {\n\t\t\tvar y = position.y;\n\t\t\tvar spacing = 1.5 * fontSize;\n\n\t\t\tfor (var i = 0; i < text.length; ++i) {\n\t\t\t\tctx.fillText(text[i], position.x, y);\n\t\t\t\ty += spacing;\n\t\t\t}\n\t\t} else {\n\t\t\tctx.fillText(text, position.x, position.y);\n\t\t}\n\t}\n\n\tfunction adjustPointPositionForLabelHeight(angle, textSize, position) {\n\t\tif (angle === 90 || angle === 270) {\n\t\t\tposition.y -= (textSize.h / 2);\n\t\t} else if (angle > 270 || angle < 90) {\n\t\t\tposition.y -= textSize.h;\n\t\t}\n\t}\n\n\tfunction drawPointLabels(scale) {\n\t\tvar ctx = scale.ctx;\n\t\tvar opts = scale.options;\n\t\tvar angleLineOpts = opts.angleLines;\n\t\tvar pointLabelOpts = opts.pointLabels;\n\n\t\tctx.lineWidth = angleLineOpts.lineWidth;\n\t\tctx.strokeStyle = angleLineOpts.color;\n\n\t\tvar outerDistance = scale.getDistanceFromCenterForValue(opts.ticks.reverse ? scale.min : scale.max);\n\n\t\t// Point Label Font\n\t\tvar plFont = getPointLabelFontOptions(scale);\n\n\t\tctx.textBaseline = 'top';\n\n\t\tfor (var i = getValueCount(scale) - 1; i >= 0; i--) {\n\t\t\tif (angleLineOpts.display) {\n\t\t\t\tvar outerPosition = scale.getPointPosition(i, outerDistance);\n\t\t\t\tctx.beginPath();\n\t\t\t\tctx.moveTo(scale.xCenter, scale.yCenter);\n\t\t\t\tctx.lineTo(outerPosition.x, outerPosition.y);\n\t\t\t\tctx.stroke();\n\t\t\t\tctx.closePath();\n\t\t\t}\n\n\t\t\tif (pointLabelOpts.display) {\n\t\t\t\t// Extra 3px out for some label spacing\n\t\t\t\tvar pointLabelPosition = scale.getPointPosition(i, outerDistance + 5);\n\n\t\t\t\t// Keep this in loop since we may support array properties here\n\t\t\t\tvar pointLabelFontColor = helpers.valueAtIndexOrDefault(pointLabelOpts.fontColor, i, globalDefaults.defaultFontColor);\n\t\t\t\tctx.font = plFont.font;\n\t\t\t\tctx.fillStyle = pointLabelFontColor;\n\n\t\t\t\tvar angleRadians = scale.getIndexAngle(i);\n\t\t\t\tvar angle = helpers.toDegrees(angleRadians);\n\t\t\t\tctx.textAlign = getTextAlignForAngle(angle);\n\t\t\t\tadjustPointPositionForLabelHeight(angle, scale._pointLabelSizes[i], pointLabelPosition);\n\t\t\t\tfillText(ctx, scale.pointLabels[i] || '', pointLabelPosition, plFont.size);\n\t\t\t}\n\t\t}\n\t}\n\n\tfunction drawRadiusLine(scale, gridLineOpts, radius, index) {\n\t\tvar ctx = scale.ctx;\n\t\tctx.strokeStyle = helpers.valueAtIndexOrDefault(gridLineOpts.color, index - 1);\n\t\tctx.lineWidth = helpers.valueAtIndexOrDefault(gridLineOpts.lineWidth, index - 1);\n\n\t\tif (scale.options.gridLines.circular) {\n\t\t\t// Draw circular arcs between the points\n\t\t\tctx.beginPath();\n\t\t\tctx.arc(scale.xCenter, scale.yCenter, radius, 0, Math.PI * 2);\n\t\t\tctx.closePath();\n\t\t\tctx.stroke();\n\t\t} else {\n\t\t\t// Draw straight lines connecting each index\n\t\t\tvar valueCount = getValueCount(scale);\n\n\t\t\tif (valueCount === 0) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tctx.beginPath();\n\t\t\tvar pointPosition = scale.getPointPosition(0, radius);\n\t\t\tctx.moveTo(pointPosition.x, pointPosition.y);\n\n\t\t\tfor (var i = 1; i < valueCount; i++) {\n\t\t\t\tpointPosition = scale.getPointPosition(i, radius);\n\t\t\t\tctx.lineTo(pointPosition.x, pointPosition.y);\n\t\t\t}\n\n\t\t\tctx.closePath();\n\t\t\tctx.stroke();\n\t\t}\n\t}\n\n\tfunction numberOrZero(param) {\n\t\treturn helpers.isNumber(param) ? param : 0;\n\t}\n\n\tvar LinearRadialScale = Chart.LinearScaleBase.extend({\n\t\tsetDimensions: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar tickOpts = opts.ticks;\n\t\t\t// Set the unconstrained dimension before label rotation\n\t\t\tme.width = me.maxWidth;\n\t\t\tme.height = me.maxHeight;\n\t\t\tme.xCenter = Math.round(me.width / 2);\n\t\t\tme.yCenter = Math.round(me.height / 2);\n\n\t\t\tvar minSize = helpers.min([me.height, me.width]);\n\t\t\tvar tickFontSize = helpers.valueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);\n\t\t\tme.drawingArea = opts.display ? (minSize / 2) - (tickFontSize / 2 + tickOpts.backdropPaddingY) : (minSize / 2);\n\t\t},\n\t\tdetermineDataLimits: function() {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar min = Number.POSITIVE_INFINITY;\n\t\t\tvar max = Number.NEGATIVE_INFINITY;\n\n\t\t\thelpers.each(chart.data.datasets, function(dataset, datasetIndex) {\n\t\t\t\tif (chart.isDatasetVisible(datasetIndex)) {\n\t\t\t\t\tvar meta = chart.getDatasetMeta(datasetIndex);\n\n\t\t\t\t\thelpers.each(dataset.data, function(rawValue, index) {\n\t\t\t\t\t\tvar value = +me.getRightValue(rawValue);\n\t\t\t\t\t\tif (isNaN(value) || meta.data[index].hidden) {\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tmin = Math.min(value, min);\n\t\t\t\t\t\tmax = Math.max(value, max);\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tme.min = (min === Number.POSITIVE_INFINITY ? 0 : min);\n\t\t\tme.max = (max === Number.NEGATIVE_INFINITY ? 0 : max);\n\n\t\t\t// Common base implementation to handle ticks.min, ticks.max, ticks.beginAtZero\n\t\t\tme.handleTickRangeOptions();\n\t\t},\n\t\tgetTickLimit: function() {\n\t\t\tvar tickOpts = this.options.ticks;\n\t\t\tvar tickFontSize = helpers.valueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);\n\t\t\treturn Math.min(tickOpts.maxTicksLimit ? tickOpts.maxTicksLimit : 11, Math.ceil(this.drawingArea / (1.5 * tickFontSize)));\n\t\t},\n\t\tconvertTicksToLabels: function() {\n\t\t\tvar me = this;\n\n\t\t\tChart.LinearScaleBase.prototype.convertTicksToLabels.call(me);\n\n\t\t\t// Point labels\n\t\t\tme.pointLabels = me.chart.data.labels.map(me.options.pointLabels.callback, me);\n\t\t},\n\t\tgetLabelForIndex: function(index, datasetIndex) {\n\t\t\treturn +this.getRightValue(this.chart.data.datasets[datasetIndex].data[index]);\n\t\t},\n\t\tfit: function() {\n\t\t\tif (this.options.pointLabels.display) {\n\t\t\t\tfitWithPointLabels(this);\n\t\t\t} else {\n\t\t\t\tfit(this);\n\t\t\t}\n\t\t},\n\t\t/**\n\t\t * Set radius reductions and determine new radius and center point\n\t\t * @private\n\t\t */\n\t\tsetReductions: function(largestPossibleRadius, furthestLimits, furthestAngles) {\n\t\t\tvar me = this;\n\t\t\tvar radiusReductionLeft = furthestLimits.l / Math.sin(furthestAngles.l);\n\t\t\tvar radiusReductionRight = Math.max(furthestLimits.r - me.width, 0) / Math.sin(furthestAngles.r);\n\t\t\tvar radiusReductionTop = -furthestLimits.t / Math.cos(furthestAngles.t);\n\t\t\tvar radiusReductionBottom = -Math.max(furthestLimits.b - me.height, 0) / Math.cos(furthestAngles.b);\n\n\t\t\tradiusReductionLeft = numberOrZero(radiusReductionLeft);\n\t\t\tradiusReductionRight = numberOrZero(radiusReductionRight);\n\t\t\tradiusReductionTop = numberOrZero(radiusReductionTop);\n\t\t\tradiusReductionBottom = numberOrZero(radiusReductionBottom);\n\n\t\t\tme.drawingArea = Math.min(\n\t\t\t\tMath.round(largestPossibleRadius - (radiusReductionLeft + radiusReductionRight) / 2),\n\t\t\t\tMath.round(largestPossibleRadius - (radiusReductionTop + radiusReductionBottom) / 2));\n\t\t\tme.setCenterPoint(radiusReductionLeft, radiusReductionRight, radiusReductionTop, radiusReductionBottom);\n\t\t},\n\t\tsetCenterPoint: function(leftMovement, rightMovement, topMovement, bottomMovement) {\n\t\t\tvar me = this;\n\t\t\tvar maxRight = me.width - rightMovement - me.drawingArea;\n\t\t\tvar maxLeft = leftMovement + me.drawingArea;\n\t\t\tvar maxTop = topMovement + me.drawingArea;\n\t\t\tvar maxBottom = me.height - bottomMovement - me.drawingArea;\n\n\t\t\tme.xCenter = Math.round(((maxLeft + maxRight) / 2) + me.left);\n\t\t\tme.yCenter = Math.round(((maxTop + maxBottom) / 2) + me.top);\n\t\t},\n\n\t\tgetIndexAngle: function(index) {\n\t\t\tvar angleMultiplier = (Math.PI * 2) / getValueCount(this);\n\t\t\tvar startAngle = this.chart.options && this.chart.options.startAngle ?\n\t\t\t\tthis.chart.options.startAngle :\n\t\t\t\t0;\n\n\t\t\tvar startAngleRadians = startAngle * Math.PI * 2 / 360;\n\n\t\t\t// Start from the top instead of right, so remove a quarter of the circle\n\t\t\treturn index * angleMultiplier + startAngleRadians;\n\t\t},\n\t\tgetDistanceFromCenterForValue: function(value) {\n\t\t\tvar me = this;\n\n\t\t\tif (value === null) {\n\t\t\t\treturn 0; // null always in center\n\t\t\t}\n\n\t\t\t// Take into account half font size + the yPadding of the top value\n\t\t\tvar scalingFactor = me.drawingArea / (me.max - me.min);\n\t\t\tif (me.options.ticks.reverse) {\n\t\t\t\treturn (me.max - value) * scalingFactor;\n\t\t\t}\n\t\t\treturn (value - me.min) * scalingFactor;\n\t\t},\n\t\tgetPointPosition: function(index, distanceFromCenter) {\n\t\t\tvar me = this;\n\t\t\tvar thisAngle = me.getIndexAngle(index) - (Math.PI / 2);\n\t\t\treturn {\n\t\t\t\tx: Math.round(Math.cos(thisAngle) * distanceFromCenter) + me.xCenter,\n\t\t\t\ty: Math.round(Math.sin(thisAngle) * distanceFromCenter) + me.yCenter\n\t\t\t};\n\t\t},\n\t\tgetPointPositionForValue: function(index, value) {\n\t\t\treturn this.getPointPosition(index, this.getDistanceFromCenterForValue(value));\n\t\t},\n\n\t\tgetBasePosition: function() {\n\t\t\tvar me = this;\n\t\t\tvar min = me.min;\n\t\t\tvar max = me.max;\n\n\t\t\treturn me.getPointPositionForValue(0,\n\t\t\t\tme.beginAtZero ? 0 :\n\t\t\t\tmin < 0 && max < 0 ? max :\n\t\t\t\tmin > 0 && max > 0 ? min :\n\t\t\t\t0);\n\t\t},\n\n\t\tdraw: function() {\n\t\t\tvar me = this;\n\t\t\tvar opts = me.options;\n\t\t\tvar gridLineOpts = opts.gridLines;\n\t\t\tvar tickOpts = opts.ticks;\n\t\t\tvar valueOrDefault = helpers.valueOrDefault;\n\n\t\t\tif (opts.display) {\n\t\t\t\tvar ctx = me.ctx;\n\t\t\t\tvar startAngle = this.getIndexAngle(0);\n\n\t\t\t\t// Tick Font\n\t\t\t\tvar tickFontSize = valueOrDefault(tickOpts.fontSize, globalDefaults.defaultFontSize);\n\t\t\t\tvar tickFontStyle = valueOrDefault(tickOpts.fontStyle, globalDefaults.defaultFontStyle);\n\t\t\t\tvar tickFontFamily = valueOrDefault(tickOpts.fontFamily, globalDefaults.defaultFontFamily);\n\t\t\t\tvar tickLabelFont = helpers.fontString(tickFontSize, tickFontStyle, tickFontFamily);\n\n\t\t\t\thelpers.each(me.ticks, function(label, index) {\n\t\t\t\t\t// Don't draw a centre value (if it is minimum)\n\t\t\t\t\tif (index > 0 || tickOpts.reverse) {\n\t\t\t\t\t\tvar yCenterOffset = me.getDistanceFromCenterForValue(me.ticksAsNumbers[index]);\n\n\t\t\t\t\t\t// Draw circular lines around the scale\n\t\t\t\t\t\tif (gridLineOpts.display && index !== 0) {\n\t\t\t\t\t\t\tdrawRadiusLine(me, gridLineOpts, yCenterOffset, index);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (tickOpts.display) {\n\t\t\t\t\t\t\tvar tickFontColor = valueOrDefault(tickOpts.fontColor, globalDefaults.defaultFontColor);\n\t\t\t\t\t\t\tctx.font = tickLabelFont;\n\n\t\t\t\t\t\t\tctx.save();\n\t\t\t\t\t\t\tctx.translate(me.xCenter, me.yCenter);\n\t\t\t\t\t\t\tctx.rotate(startAngle);\n\n\t\t\t\t\t\t\tif (tickOpts.showLabelBackdrop) {\n\t\t\t\t\t\t\t\tvar labelWidth = ctx.measureText(label).width;\n\t\t\t\t\t\t\t\tctx.fillStyle = tickOpts.backdropColor;\n\t\t\t\t\t\t\t\tctx.fillRect(\n\t\t\t\t\t\t\t\t\t-labelWidth / 2 - tickOpts.backdropPaddingX,\n\t\t\t\t\t\t\t\t\t-yCenterOffset - tickFontSize / 2 - tickOpts.backdropPaddingY,\n\t\t\t\t\t\t\t\t\tlabelWidth + tickOpts.backdropPaddingX * 2,\n\t\t\t\t\t\t\t\t\ttickFontSize + tickOpts.backdropPaddingY * 2\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tctx.textAlign = 'center';\n\t\t\t\t\t\t\tctx.textBaseline = 'middle';\n\t\t\t\t\t\t\tctx.fillStyle = tickFontColor;\n\t\t\t\t\t\t\tctx.fillText(label, 0, -yCenterOffset);\n\t\t\t\t\t\t\tctx.restore();\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\tif (opts.angleLines.display || opts.pointLabels.display) {\n\t\t\t\t\tdrawPointLabels(me);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t});\n\tChart.scaleService.registerScaleType('radialLinear', LinearRadialScale, defaultConfig);\n\n};\n\n},{\"25\":25,\"34\":34,\"45\":45}],58:[function(require,module,exports){\n/* global window: false */\n'use strict';\n\nvar moment = require(6);\nmoment = typeof moment === 'function' ? moment : window.moment;\n\nvar defaults = require(25);\nvar helpers = require(45);\n\n// Integer constants are from the ES6 spec.\nvar MIN_INTEGER = Number.MIN_SAFE_INTEGER || -9007199254740991;\nvar MAX_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991;\n\nvar INTERVALS = {\n\tmillisecond: {\n\t\tcommon: true,\n\t\tsize: 1,\n\t\tsteps: [1, 2, 5, 10, 20, 50, 100, 250, 500]\n\t},\n\tsecond: {\n\t\tcommon: true,\n\t\tsize: 1000,\n\t\tsteps: [1, 2, 5, 10, 30]\n\t},\n\tminute: {\n\t\tcommon: true,\n\t\tsize: 60000,\n\t\tsteps: [1, 2, 5, 10, 30]\n\t},\n\thour: {\n\t\tcommon: true,\n\t\tsize: 3600000,\n\t\tsteps: [1, 2, 3, 6, 12]\n\t},\n\tday: {\n\t\tcommon: true,\n\t\tsize: 86400000,\n\t\tsteps: [1, 2, 5]\n\t},\n\tweek: {\n\t\tcommon: false,\n\t\tsize: 604800000,\n\t\tsteps: [1, 2, 3, 4]\n\t},\n\tmonth: {\n\t\tcommon: true,\n\t\tsize: 2.628e9,\n\t\tsteps: [1, 2, 3]\n\t},\n\tquarter: {\n\t\tcommon: false,\n\t\tsize: 7.884e9,\n\t\tsteps: [1, 2, 3, 4]\n\t},\n\tyear: {\n\t\tcommon: true,\n\t\tsize: 3.154e10\n\t}\n};\n\nvar UNITS = Object.keys(INTERVALS);\n\nfunction sorter(a, b) {\n\treturn a - b;\n}\n\nfunction arrayUnique(items) {\n\tvar hash = {};\n\tvar out = [];\n\tvar i, ilen, item;\n\n\tfor (i = 0, ilen = items.length; i < ilen; ++i) {\n\t\titem = items[i];\n\t\tif (!hash[item]) {\n\t\t\thash[item] = true;\n\t\t\tout.push(item);\n\t\t}\n\t}\n\n\treturn out;\n}\n\n/**\n * Returns an array of {time, pos} objects used to interpolate a specific `time` or position\n * (`pos`) on the scale, by searching entries before and after the requested value. `pos` is\n * a decimal between 0 and 1: 0 being the start of the scale (left or top) and 1 the other\n * extremity (left + width or top + height). Note that it would be more optimized to directly\n * store pre-computed pixels, but the scale dimensions are not guaranteed at the time we need\n * to create the lookup table. The table ALWAYS contains at least two items: min and max.\n *\n * @param {Number[]} timestamps - timestamps sorted from lowest to highest.\n * @param {String} distribution - If 'linear', timestamps will be spread linearly along the min\n * and max range, so basically, the table will contains only two items: {min, 0} and {max, 1}.\n * If 'series', timestamps will be positioned at the same distance from each other. In this\n * case, only timestamps that break the time linearity are registered, meaning that in the\n * best case, all timestamps are linear, the table contains only min and max.\n */\nfunction buildLookupTable(timestamps, min, max, distribution) {\n\tif (distribution === 'linear' || !timestamps.length) {\n\t\treturn [\n\t\t\t{time: min, pos: 0},\n\t\t\t{time: max, pos: 1}\n\t\t];\n\t}\n\n\tvar table = [];\n\tvar items = [min];\n\tvar i, ilen, prev, curr, next;\n\n\tfor (i = 0, ilen = timestamps.length; i < ilen; ++i) {\n\t\tcurr = timestamps[i];\n\t\tif (curr > min && curr < max) {\n\t\t\titems.push(curr);\n\t\t}\n\t}\n\n\titems.push(max);\n\n\tfor (i = 0, ilen = items.length; i < ilen; ++i) {\n\t\tnext = items[i + 1];\n\t\tprev = items[i - 1];\n\t\tcurr = items[i];\n\n\t\t// only add points that breaks the scale linearity\n\t\tif (prev === undefined || next === undefined || Math.round((next + prev) / 2) !== curr) {\n\t\t\ttable.push({time: curr, pos: i / (ilen - 1)});\n\t\t}\n\t}\n\n\treturn table;\n}\n\n// @see adapted from http://www.anujgakhar.com/2014/03/01/binary-search-in-javascript/\nfunction lookup(table, key, value) {\n\tvar lo = 0;\n\tvar hi = table.length - 1;\n\tvar mid, i0, i1;\n\n\twhile (lo >= 0 && lo <= hi) {\n\t\tmid = (lo + hi) >> 1;\n\t\ti0 = table[mid - 1] || null;\n\t\ti1 = table[mid];\n\n\t\tif (!i0) {\n\t\t\t// given value is outside table (before first item)\n\t\t\treturn {lo: null, hi: i1};\n\t\t} else if (i1[key] < value) {\n\t\t\tlo = mid + 1;\n\t\t} else if (i0[key] > value) {\n\t\t\thi = mid - 1;\n\t\t} else {\n\t\t\treturn {lo: i0, hi: i1};\n\t\t}\n\t}\n\n\t// given value is outside table (after last item)\n\treturn {lo: i1, hi: null};\n}\n\n/**\n * Linearly interpolates the given source `value` using the table items `skey` values and\n * returns the associated `tkey` value. For example, interpolate(table, 'time', 42, 'pos')\n * returns the position for a timestamp equal to 42. If value is out of bounds, values at\n * index [0, 1] or [n - 1, n] are used for the interpolation.\n */\nfunction interpolate(table, skey, sval, tkey) {\n\tvar range = lookup(table, skey, sval);\n\n\t// Note: the lookup table ALWAYS contains at least 2 items (min and max)\n\tvar prev = !range.lo ? table[0] : !range.hi ? table[table.length - 2] : range.lo;\n\tvar next = !range.lo ? table[1] : !range.hi ? table[table.length - 1] : range.hi;\n\n\tvar span = next[skey] - prev[skey];\n\tvar ratio = span ? (sval - prev[skey]) / span : 0;\n\tvar offset = (next[tkey] - prev[tkey]) * ratio;\n\n\treturn prev[tkey] + offset;\n}\n\n/**\n * Convert the given value to a moment object using the given time options.\n * @see http://momentjs.com/docs/#/parsing/\n */\nfunction momentify(value, options) {\n\tvar parser = options.parser;\n\tvar format = options.parser || options.format;\n\n\tif (typeof parser === 'function') {\n\t\treturn parser(value);\n\t}\n\n\tif (typeof value === 'string' && typeof format === 'string') {\n\t\treturn moment(value, format);\n\t}\n\n\tif (!(value instanceof moment)) {\n\t\tvalue = moment(value);\n\t}\n\n\tif (value.isValid()) {\n\t\treturn value;\n\t}\n\n\t// Labels are in an incompatible moment format and no `parser` has been provided.\n\t// The user might still use the deprecated `format` option to convert his inputs.\n\tif (typeof format === 'function') {\n\t\treturn format(value);\n\t}\n\n\treturn value;\n}\n\nfunction parse(input, scale) {\n\tif (helpers.isNullOrUndef(input)) {\n\t\treturn null;\n\t}\n\n\tvar options = scale.options.time;\n\tvar value = momentify(scale.getRightValue(input), options);\n\tif (!value.isValid()) {\n\t\treturn null;\n\t}\n\n\tif (options.round) {\n\t\tvalue.startOf(options.round);\n\t}\n\n\treturn value.valueOf();\n}\n\n/**\n * Returns the number of unit to skip to be able to display up to `capacity` number of ticks\n * in `unit` for the given `min` / `max` range and respecting the interval steps constraints.\n */\nfunction determineStepSize(min, max, unit, capacity) {\n\tvar range = max - min;\n\tvar interval = INTERVALS[unit];\n\tvar milliseconds = interval.size;\n\tvar steps = interval.steps;\n\tvar i, ilen, factor;\n\n\tif (!steps) {\n\t\treturn Math.ceil(range / (capacity * milliseconds));\n\t}\n\n\tfor (i = 0, ilen = steps.length; i < ilen; ++i) {\n\t\tfactor = steps[i];\n\t\tif (Math.ceil(range / (milliseconds * factor)) <= capacity) {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn factor;\n}\n\n/**\n * Figures out what unit results in an appropriate number of auto-generated ticks\n */\nfunction determineUnitForAutoTicks(minUnit, min, max, capacity) {\n\tvar ilen = UNITS.length;\n\tvar i, interval, factor;\n\n\tfor (i = UNITS.indexOf(minUnit); i < ilen - 1; ++i) {\n\t\tinterval = INTERVALS[UNITS[i]];\n\t\tfactor = interval.steps ? interval.steps[interval.steps.length - 1] : MAX_INTEGER;\n\n\t\tif (interval.common && Math.ceil((max - min) / (factor * interval.size)) <= capacity) {\n\t\t\treturn UNITS[i];\n\t\t}\n\t}\n\n\treturn UNITS[ilen - 1];\n}\n\n/**\n * Figures out what unit to format a set of ticks with\n */\nfunction determineUnitForFormatting(ticks, minUnit, min, max) {\n\tvar duration = moment.duration(moment(max).diff(moment(min)));\n\tvar ilen = UNITS.length;\n\tvar i, unit;\n\n\tfor (i = ilen - 1; i >= UNITS.indexOf(minUnit); i--) {\n\t\tunit = UNITS[i];\n\t\tif (INTERVALS[unit].common && duration.as(unit) >= ticks.length) {\n\t\t\treturn unit;\n\t\t}\n\t}\n\n\treturn UNITS[minUnit ? UNITS.indexOf(minUnit) : 0];\n}\n\nfunction determineMajorUnit(unit) {\n\tfor (var i = UNITS.indexOf(unit) + 1, ilen = UNITS.length; i < ilen; ++i) {\n\t\tif (INTERVALS[UNITS[i]].common) {\n\t\t\treturn UNITS[i];\n\t\t}\n\t}\n}\n\n/**\n * Generates a maximum of `capacity` timestamps between min and max, rounded to the\n * `minor` unit, aligned on the `major` unit and using the given scale time `options`.\n * Important: this method can return ticks outside the min and max range, it's the\n * responsibility of the calling code to clamp values if needed.\n */\nfunction generate(min, max, capacity, options) {\n\tvar timeOpts = options.time;\n\tvar minor = timeOpts.unit || determineUnitForAutoTicks(timeOpts.minUnit, min, max, capacity);\n\tvar major = determineMajorUnit(minor);\n\tvar stepSize = helpers.valueOrDefault(timeOpts.stepSize, timeOpts.unitStepSize);\n\tvar weekday = minor === 'week' ? timeOpts.isoWeekday : false;\n\tvar majorTicksEnabled = options.ticks.major.enabled;\n\tvar interval = INTERVALS[minor];\n\tvar first = moment(min);\n\tvar last = moment(max);\n\tvar ticks = [];\n\tvar time;\n\n\tif (!stepSize) {\n\t\tstepSize = determineStepSize(min, max, minor, capacity);\n\t}\n\n\t// For 'week' unit, handle the first day of week option\n\tif (weekday) {\n\t\tfirst = first.isoWeekday(weekday);\n\t\tlast = last.isoWeekday(weekday);\n\t}\n\n\t// Align first/last ticks on unit\n\tfirst = first.startOf(weekday ? 'day' : minor);\n\tlast = last.startOf(weekday ? 'day' : minor);\n\n\t// Make sure that the last tick include max\n\tif (last < max) {\n\t\tlast.add(1, minor);\n\t}\n\n\ttime = moment(first);\n\n\tif (majorTicksEnabled && major && !weekday && !timeOpts.round) {\n\t\t// Align the first tick on the previous `minor` unit aligned on the `major` unit:\n\t\t// we first aligned time on the previous `major` unit then add the number of full\n\t\t// stepSize there is between first and the previous major time.\n\t\ttime.startOf(major);\n\t\ttime.add(~~((first - time) / (interval.size * stepSize)) * stepSize, minor);\n\t}\n\n\tfor (; time < last; time.add(stepSize, minor)) {\n\t\tticks.push(+time);\n\t}\n\n\tticks.push(+time);\n\n\treturn ticks;\n}\n\n/**\n * Returns the right and left offsets from edges in the form of {left, right}.\n * Offsets are added when the `offset` option is true.\n */\nfunction computeOffsets(table, ticks, min, max, options) {\n\tvar left = 0;\n\tvar right = 0;\n\tvar upper, lower;\n\n\tif (options.offset && ticks.length) {\n\t\tif (!options.time.min) {\n\t\t\tupper = ticks.length > 1 ? ticks[1] : max;\n\t\t\tlower = ticks[0];\n\t\t\tleft = (\n\t\t\t\tinterpolate(table, 'time', upper, 'pos') -\n\t\t\t\tinterpolate(table, 'time', lower, 'pos')\n\t\t\t) / 2;\n\t\t}\n\t\tif (!options.time.max) {\n\t\t\tupper = ticks[ticks.length - 1];\n\t\t\tlower = ticks.length > 1 ? ticks[ticks.length - 2] : min;\n\t\t\tright = (\n\t\t\t\tinterpolate(table, 'time', upper, 'pos') -\n\t\t\t\tinterpolate(table, 'time', lower, 'pos')\n\t\t\t) / 2;\n\t\t}\n\t}\n\n\treturn {left: left, right: right};\n}\n\nfunction ticksFromTimestamps(values, majorUnit) {\n\tvar ticks = [];\n\tvar i, ilen, value, major;\n\n\tfor (i = 0, ilen = values.length; i < ilen; ++i) {\n\t\tvalue = values[i];\n\t\tmajor = majorUnit ? value === +moment(value).startOf(majorUnit) : false;\n\n\t\tticks.push({\n\t\t\tvalue: value,\n\t\t\tmajor: major\n\t\t});\n\t}\n\n\treturn ticks;\n}\n\nfunction determineLabelFormat(data, timeOpts) {\n\tvar i, momentDate, hasTime;\n\tvar ilen = data.length;\n\n\t// find the label with the most parts (milliseconds, minutes, etc.)\n\t// format all labels with the same level of detail as the most specific label\n\tfor (i = 0; i < ilen; i++) {\n\t\tmomentDate = momentify(data[i], timeOpts);\n\t\tif (momentDate.millisecond() !== 0) {\n\t\t\treturn 'MMM D, YYYY h:mm:ss.SSS a';\n\t\t}\n\t\tif (momentDate.second() !== 0 || momentDate.minute() !== 0 || momentDate.hour() !== 0) {\n\t\t\thasTime = true;\n\t\t}\n\t}\n\tif (hasTime) {\n\t\treturn 'MMM D, YYYY h:mm:ss a';\n\t}\n\treturn 'MMM D, YYYY';\n}\n\nmodule.exports = function(Chart) {\n\n\tvar defaultConfig = {\n\t\tposition: 'bottom',\n\n\t\t/**\n\t\t * Data distribution along the scale:\n\t\t * - 'linear': data are spread according to their time (distances can vary),\n\t\t * - 'series': data are spread at the same distance from each other.\n\t\t * @see https://github.com/chartjs/Chart.js/pull/4507\n\t\t * @since 2.7.0\n\t\t */\n\t\tdistribution: 'linear',\n\n\t\t/**\n\t\t * Scale boundary strategy (bypassed by min/max time options)\n\t\t * - `data`: make sure data are fully visible, ticks outside are removed\n\t\t * - `ticks`: make sure ticks are fully visible, data outside are truncated\n\t\t * @see https://github.com/chartjs/Chart.js/pull/4556\n\t\t * @since 2.7.0\n\t\t */\n\t\tbounds: 'data',\n\n\t\ttime: {\n\t\t\tparser: false, // false == a pattern string from http://momentjs.com/docs/#/parsing/string-format/ or a custom callback that converts its argument to a moment\n\t\t\tformat: false, // DEPRECATED false == date objects, moment object, callback or a pattern string from http://momentjs.com/docs/#/parsing/string-format/\n\t\t\tunit: false, // false == automatic or override with week, month, year, etc.\n\t\t\tround: false, // none, or override with week, month, year, etc.\n\t\t\tdisplayFormat: false, // DEPRECATED\n\t\t\tisoWeekday: false, // override week start day - see http://momentjs.com/docs/#/get-set/iso-weekday/\n\t\t\tminUnit: 'millisecond',\n\n\t\t\t// defaults to unit's corresponding unitFormat below or override using pattern string from http://momentjs.com/docs/#/displaying/format/\n\t\t\tdisplayFormats: {\n\t\t\t\tmillisecond: 'h:mm:ss.SSS a', // 11:20:01.123 AM,\n\t\t\t\tsecond: 'h:mm:ss a', // 11:20:01 AM\n\t\t\t\tminute: 'h:mm a', // 11:20 AM\n\t\t\t\thour: 'hA', // 5PM\n\t\t\t\tday: 'MMM D', // Sep 4\n\t\t\t\tweek: 'll', // Week 46, or maybe \"[W]WW - YYYY\" ?\n\t\t\t\tmonth: 'MMM YYYY', // Sept 2015\n\t\t\t\tquarter: '[Q]Q - YYYY', // Q3\n\t\t\t\tyear: 'YYYY' // 2015\n\t\t\t},\n\t\t},\n\t\tticks: {\n\t\t\tautoSkip: false,\n\n\t\t\t/**\n\t\t\t * Ticks generation input values:\n\t\t\t * - 'auto': generates \"optimal\" ticks based on scale size and time options.\n\t\t\t * - 'data': generates ticks from data (including labels from data {t|x|y} objects).\n\t\t\t * - 'labels': generates ticks from user given `data.labels` values ONLY.\n\t\t\t * @see https://github.com/chartjs/Chart.js/pull/4507\n\t\t\t * @since 2.7.0\n\t\t\t */\n\t\t\tsource: 'auto',\n\n\t\t\tmajor: {\n\t\t\t\tenabled: false\n\t\t\t}\n\t\t}\n\t};\n\n\tvar TimeScale = Chart.Scale.extend({\n\t\tinitialize: function() {\n\t\t\tif (!moment) {\n\t\t\t\tthrow new Error('Chart.js - Moment.js could not be found! You must include it before Chart.js to use the time scale. Download at https://momentjs.com');\n\t\t\t}\n\n\t\t\tthis.mergeTicksOptions();\n\n\t\t\tChart.Scale.prototype.initialize.call(this);\n\t\t},\n\n\t\tupdate: function() {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options;\n\n\t\t\t// DEPRECATIONS: output a message only one time per update\n\t\t\tif (options.time && options.time.format) {\n\t\t\t\tconsole.warn('options.time.format is deprecated and replaced by options.time.parser.');\n\t\t\t}\n\n\t\t\treturn Chart.Scale.prototype.update.apply(me, arguments);\n\t\t},\n\n\t\t/**\n\t\t * Allows data to be referenced via 't' attribute\n\t\t */\n\t\tgetRightValue: function(rawValue) {\n\t\t\tif (rawValue && rawValue.t !== undefined) {\n\t\t\t\trawValue = rawValue.t;\n\t\t\t}\n\t\t\treturn Chart.Scale.prototype.getRightValue.call(this, rawValue);\n\t\t},\n\n\t\tdetermineDataLimits: function() {\n\t\t\tvar me = this;\n\t\t\tvar chart = me.chart;\n\t\t\tvar timeOpts = me.options.time;\n\t\t\tvar unit = timeOpts.unit || 'day';\n\t\t\tvar min = MAX_INTEGER;\n\t\t\tvar max = MIN_INTEGER;\n\t\t\tvar timestamps = [];\n\t\t\tvar datasets = [];\n\t\t\tvar labels = [];\n\t\t\tvar i, j, ilen, jlen, data, timestamp;\n\n\t\t\t// Convert labels to timestamps\n\t\t\tfor (i = 0, ilen = chart.data.labels.length; i < ilen; ++i) {\n\t\t\t\tlabels.push(parse(chart.data.labels[i], me));\n\t\t\t}\n\n\t\t\t// Convert data to timestamps\n\t\t\tfor (i = 0, ilen = (chart.data.datasets || []).length; i < ilen; ++i) {\n\t\t\t\tif (chart.isDatasetVisible(i)) {\n\t\t\t\t\tdata = chart.data.datasets[i].data;\n\n\t\t\t\t\t// Let's consider that all data have the same format.\n\t\t\t\t\tif (helpers.isObject(data[0])) {\n\t\t\t\t\t\tdatasets[i] = [];\n\n\t\t\t\t\t\tfor (j = 0, jlen = data.length; j < jlen; ++j) {\n\t\t\t\t\t\t\ttimestamp = parse(data[j], me);\n\t\t\t\t\t\t\ttimestamps.push(timestamp);\n\t\t\t\t\t\t\tdatasets[i][j] = timestamp;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttimestamps.push.apply(timestamps, labels);\n\t\t\t\t\t\tdatasets[i] = labels.slice(0);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tdatasets[i] = [];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (labels.length) {\n\t\t\t\t// Sort labels **after** data have been converted\n\t\t\t\tlabels = arrayUnique(labels).sort(sorter);\n\t\t\t\tmin = Math.min(min, labels[0]);\n\t\t\t\tmax = Math.max(max, labels[labels.length - 1]);\n\t\t\t}\n\n\t\t\tif (timestamps.length) {\n\t\t\t\ttimestamps = arrayUnique(timestamps).sort(sorter);\n\t\t\t\tmin = Math.min(min, timestamps[0]);\n\t\t\t\tmax = Math.max(max, timestamps[timestamps.length - 1]);\n\t\t\t}\n\n\t\t\tmin = parse(timeOpts.min, me) || min;\n\t\t\tmax = parse(timeOpts.max, me) || max;\n\n\t\t\t// In case there is no valid min/max, set limits based on unit time option\n\t\t\tmin = min === MAX_INTEGER ? +moment().startOf(unit) : min;\n\t\t\tmax = max === MIN_INTEGER ? +moment().endOf(unit) + 1 : max;\n\n\t\t\t// Make sure that max is strictly higher than min (required by the lookup table)\n\t\t\tme.min = Math.min(min, max);\n\t\t\tme.max = Math.max(min + 1, max);\n\n\t\t\t// PRIVATE\n\t\t\tme._horizontal = me.isHorizontal();\n\t\t\tme._table = [];\n\t\t\tme._timestamps = {\n\t\t\t\tdata: timestamps,\n\t\t\t\tdatasets: datasets,\n\t\t\t\tlabels: labels\n\t\t\t};\n\t\t},\n\n\t\tbuildTicks: function() {\n\t\t\tvar me = this;\n\t\t\tvar min = me.min;\n\t\t\tvar max = me.max;\n\t\t\tvar options = me.options;\n\t\t\tvar timeOpts = options.time;\n\t\t\tvar timestamps = [];\n\t\t\tvar ticks = [];\n\t\t\tvar i, ilen, timestamp;\n\n\t\t\tswitch (options.ticks.source) {\n\t\t\tcase 'data':\n\t\t\t\ttimestamps = me._timestamps.data;\n\t\t\t\tbreak;\n\t\t\tcase 'labels':\n\t\t\t\ttimestamps = me._timestamps.labels;\n\t\t\t\tbreak;\n\t\t\tcase 'auto':\n\t\t\tdefault:\n\t\t\t\ttimestamps = generate(min, max, me.getLabelCapacity(min), options);\n\t\t\t}\n\n\t\t\tif (options.bounds === 'ticks' && timestamps.length) {\n\t\t\t\tmin = timestamps[0];\n\t\t\t\tmax = timestamps[timestamps.length - 1];\n\t\t\t}\n\n\t\t\t// Enforce limits with user min/max options\n\t\t\tmin = parse(timeOpts.min, me) || min;\n\t\t\tmax = parse(timeOpts.max, me) || max;\n\n\t\t\t// Remove ticks outside the min/max range\n\t\t\tfor (i = 0, ilen = timestamps.length; i < ilen; ++i) {\n\t\t\t\ttimestamp = timestamps[i];\n\t\t\t\tif (timestamp >= min && timestamp <= max) {\n\t\t\t\t\tticks.push(timestamp);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tme.min = min;\n\t\t\tme.max = max;\n\n\t\t\t// PRIVATE\n\t\t\tme._unit = timeOpts.unit || determineUnitForFormatting(ticks, timeOpts.minUnit, me.min, me.max);\n\t\t\tme._majorUnit = determineMajorUnit(me._unit);\n\t\t\tme._table = buildLookupTable(me._timestamps.data, min, max, options.distribution);\n\t\t\tme._offsets = computeOffsets(me._table, ticks, min, max, options);\n\t\t\tme._labelFormat = determineLabelFormat(me._timestamps.data, timeOpts);\n\n\t\t\treturn ticksFromTimestamps(ticks, me._majorUnit);\n\t\t},\n\n\t\tgetLabelForIndex: function(index, datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tvar data = me.chart.data;\n\t\t\tvar timeOpts = me.options.time;\n\t\t\tvar label = data.labels && index < data.labels.length ? data.labels[index] : '';\n\t\t\tvar value = data.datasets[datasetIndex].data[index];\n\n\t\t\tif (helpers.isObject(value)) {\n\t\t\t\tlabel = me.getRightValue(value);\n\t\t\t}\n\t\t\tif (timeOpts.tooltipFormat) {\n\t\t\t\treturn momentify(label, timeOpts).format(timeOpts.tooltipFormat);\n\t\t\t}\n\t\t\tif (typeof label === 'string') {\n\t\t\t\treturn label;\n\t\t\t}\n\n\t\t\treturn momentify(label, timeOpts).format(me._labelFormat);\n\t\t},\n\n\t\t/**\n\t\t * Function to format an individual tick mark\n\t\t * @private\n\t\t */\n\t\ttickFormatFunction: function(tick, index, ticks, formatOverride) {\n\t\t\tvar me = this;\n\t\t\tvar options = me.options;\n\t\t\tvar time = tick.valueOf();\n\t\t\tvar formats = options.time.displayFormats;\n\t\t\tvar minorFormat = formats[me._unit];\n\t\t\tvar majorUnit = me._majorUnit;\n\t\t\tvar majorFormat = formats[majorUnit];\n\t\t\tvar majorTime = tick.clone().startOf(majorUnit).valueOf();\n\t\t\tvar majorTickOpts = options.ticks.major;\n\t\t\tvar major = majorTickOpts.enabled && majorUnit && majorFormat && time === majorTime;\n\t\t\tvar label = tick.format(formatOverride ? formatOverride : major ? majorFormat : minorFormat);\n\t\t\tvar tickOpts = major ? majorTickOpts : options.ticks.minor;\n\t\t\tvar formatter = helpers.valueOrDefault(tickOpts.callback, tickOpts.userCallback);\n\n\t\t\treturn formatter ? formatter(label, index, ticks) : label;\n\t\t},\n\n\t\tconvertTicksToLabels: function(ticks) {\n\t\t\tvar labels = [];\n\t\t\tvar i, ilen;\n\n\t\t\tfor (i = 0, ilen = ticks.length; i < ilen; ++i) {\n\t\t\t\tlabels.push(this.tickFormatFunction(moment(ticks[i].value), i, ticks));\n\t\t\t}\n\n\t\t\treturn labels;\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetPixelForOffset: function(time) {\n\t\t\tvar me = this;\n\t\t\tvar size = me._horizontal ? me.width : me.height;\n\t\t\tvar start = me._horizontal ? me.left : me.top;\n\t\t\tvar pos = interpolate(me._table, 'time', time, 'pos');\n\n\t\t\treturn start + size * (me._offsets.left + pos) / (me._offsets.left + 1 + me._offsets.right);\n\t\t},\n\n\t\tgetPixelForValue: function(value, index, datasetIndex) {\n\t\t\tvar me = this;\n\t\t\tvar time = null;\n\n\t\t\tif (index !== undefined && datasetIndex !== undefined) {\n\t\t\t\ttime = me._timestamps.datasets[datasetIndex][index];\n\t\t\t}\n\n\t\t\tif (time === null) {\n\t\t\t\ttime = parse(value, me);\n\t\t\t}\n\n\t\t\tif (time !== null) {\n\t\t\t\treturn me.getPixelForOffset(time);\n\t\t\t}\n\t\t},\n\n\t\tgetPixelForTick: function(index) {\n\t\t\tvar ticks = this.getTicks();\n\t\t\treturn index >= 0 && index < ticks.length ?\n\t\t\t\tthis.getPixelForOffset(ticks[index].value) :\n\t\t\t\tnull;\n\t\t},\n\n\t\tgetValueForPixel: function(pixel) {\n\t\t\tvar me = this;\n\t\t\tvar size = me._horizontal ? me.width : me.height;\n\t\t\tvar start = me._horizontal ? me.left : me.top;\n\t\t\tvar pos = (size ? (pixel - start) / size : 0) * (me._offsets.left + 1 + me._offsets.left) - me._offsets.right;\n\t\t\tvar time = interpolate(me._table, 'pos', pos, 'time');\n\n\t\t\treturn moment(time);\n\t\t},\n\n\t\t/**\n\t\t * Crude approximation of what the label width might be\n\t\t * @private\n\t\t */\n\t\tgetLabelWidth: function(label) {\n\t\t\tvar me = this;\n\t\t\tvar ticksOpts = me.options.ticks;\n\t\t\tvar tickLabelWidth = me.ctx.measureText(label).width;\n\t\t\tvar angle = helpers.toRadians(ticksOpts.maxRotation);\n\t\t\tvar cosRotation = Math.cos(angle);\n\t\t\tvar sinRotation = Math.sin(angle);\n\t\t\tvar tickFontSize = helpers.valueOrDefault(ticksOpts.fontSize, defaults.global.defaultFontSize);\n\n\t\t\treturn (tickLabelWidth * cosRotation) + (tickFontSize * sinRotation);\n\t\t},\n\n\t\t/**\n\t\t * @private\n\t\t */\n\t\tgetLabelCapacity: function(exampleTime) {\n\t\t\tvar me = this;\n\n\t\t\tvar formatOverride = me.options.time.displayFormats.millisecond;\t// Pick the longest format for guestimation\n\n\t\t\tvar exampleLabel = me.tickFormatFunction(moment(exampleTime), 0, [], formatOverride);\n\t\t\tvar tickLabelWidth = me.getLabelWidth(exampleLabel);\n\t\t\tvar innerWidth = me.isHorizontal() ? me.width : me.height;\n\n\t\t\tvar capacity = Math.floor(innerWidth / tickLabelWidth);\n\t\t\treturn capacity > 0 ? capacity : 1;\n\t\t}\n\t});\n\n\tChart.scaleService.registerScaleType('time', TimeScale, defaultConfig);\n};\n\n},{\"25\":25,\"45\":45,\"6\":6}]},{},[7])(7)\n});"
  },
  {
    "path": "web_gui/gui_v3/js/bootstrap-slider.js",
    "content": "/*! =======================================================\n                      VERSION  9.7.1              \n========================================================= */\n\"use strict\";\n\nvar _typeof = typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; };\n\n/*! =========================================================\n * bootstrap-slider.js\n *\n * Maintainers:\n *\t\tKyle Kemp\n *\t\t\t- Twitter: @seiyria\n *\t\t\t- Github:  seiyria\n *\t\tRohit Kalkur\n *\t\t\t- Twitter: @Rovolutionary\n *\t\t\t- Github:  rovolution\n *\n * =========================================================\n *\n * bootstrap-slider is released under the MIT License\n * Copyright (c) 2017 Kyle Kemp, Rohit Kalkur, and contributors\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n *\n * ========================================================= */\n\n/**\n * Bridget makes jQuery widgets\n * v1.0.1\n * MIT license\n */\nvar windowIsDefined = (typeof window === \"undefined\" ? \"undefined\" : _typeof(window)) === \"object\";\n\n(function (factory) {\n\tif (typeof define === \"function\" && define.amd) {\n\t\tdefine([\"jquery\"], factory);\n\t} else if ((typeof module === \"undefined\" ? \"undefined\" : _typeof(module)) === \"object\" && module.exports) {\n\t\tvar jQuery;\n\t\ttry {\n\t\t\tjQuery = require(\"jquery\");\n\t\t} catch (err) {\n\t\t\tjQuery = null;\n\t\t}\n\t\tmodule.exports = factory(jQuery);\n\t} else if (window) {\n\t\twindow.Slider = factory(window.jQuery);\n\t}\n})(function ($) {\n\t// Constants\n\tvar NAMESPACE_MAIN = 'slider';\n\tvar NAMESPACE_ALTERNATE = 'bootstrapSlider';\n\n\t// Polyfill console methods\n\tif (windowIsDefined && !window.console) {\n\t\twindow.console = {};\n\t}\n\tif (windowIsDefined && !window.console.log) {\n\t\twindow.console.log = function () {};\n\t}\n\tif (windowIsDefined && !window.console.warn) {\n\t\twindow.console.warn = function () {};\n\t}\n\n\t// Reference to Slider constructor\n\tvar Slider;\n\n\t(function ($) {\n\n\t\t'use strict';\n\n\t\t// -------------------------- utils -------------------------- //\n\n\t\tvar slice = Array.prototype.slice;\n\n\t\tfunction noop() {}\n\n\t\t// -------------------------- definition -------------------------- //\n\n\t\tfunction defineBridget($) {\n\n\t\t\t// bail if no jQuery\n\t\t\tif (!$) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// -------------------------- addOptionMethod -------------------------- //\n\n\t\t\t/**\n    * adds option method -> $().plugin('option', {...})\n    * @param {Function} PluginClass - constructor class\n    */\n\t\t\tfunction addOptionMethod(PluginClass) {\n\t\t\t\t// don't overwrite original option method\n\t\t\t\tif (PluginClass.prototype.option) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// option setter\n\t\t\t\tPluginClass.prototype.option = function (opts) {\n\t\t\t\t\t// bail out if not an object\n\t\t\t\t\tif (!$.isPlainObject(opts)) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\tthis.options = $.extend(true, this.options, opts);\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// -------------------------- plugin bridge -------------------------- //\n\n\t\t\t// helper function for logging errors\n\t\t\t// $.error breaks jQuery chaining\n\t\t\tvar logError = typeof console === 'undefined' ? noop : function (message) {\n\t\t\t\tconsole.error(message);\n\t\t\t};\n\n\t\t\t/**\n    * jQuery plugin bridge, access methods like $elem.plugin('method')\n    * @param {String} namespace - plugin name\n    * @param {Function} PluginClass - constructor class\n    */\n\t\t\tfunction bridge(namespace, PluginClass) {\n\t\t\t\t// add to jQuery fn namespace\n\t\t\t\t$.fn[namespace] = function (options) {\n\t\t\t\t\tif (typeof options === 'string') {\n\t\t\t\t\t\t// call plugin method when first argument is a string\n\t\t\t\t\t\t// get arguments for method\n\t\t\t\t\t\tvar args = slice.call(arguments, 1);\n\n\t\t\t\t\t\tfor (var i = 0, len = this.length; i < len; i++) {\n\t\t\t\t\t\t\tvar elem = this[i];\n\t\t\t\t\t\t\tvar instance = $.data(elem, namespace);\n\t\t\t\t\t\t\tif (!instance) {\n\t\t\t\t\t\t\t\tlogError(\"cannot call methods on \" + namespace + \" prior to initialization; \" + \"attempted to call '\" + options + \"'\");\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (!$.isFunction(instance[options]) || options.charAt(0) === '_') {\n\t\t\t\t\t\t\t\tlogError(\"no such method '\" + options + \"' for \" + namespace + \" instance\");\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// trigger method with arguments\n\t\t\t\t\t\t\tvar returnValue = instance[options].apply(instance, args);\n\n\t\t\t\t\t\t\t// break look and return first value if provided\n\t\t\t\t\t\t\tif (returnValue !== undefined && returnValue !== instance) {\n\t\t\t\t\t\t\t\treturn returnValue;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// return this if no return value\n\t\t\t\t\t\treturn this;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar objects = this.map(function () {\n\t\t\t\t\t\t\tvar instance = $.data(this, namespace);\n\t\t\t\t\t\t\tif (instance) {\n\t\t\t\t\t\t\t\t// apply options & init\n\t\t\t\t\t\t\t\tinstance.option(options);\n\t\t\t\t\t\t\t\tinstance._init();\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// initialize new instance\n\t\t\t\t\t\t\t\tinstance = new PluginClass(this, options);\n\t\t\t\t\t\t\t\t$.data(this, namespace, instance);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn $(this);\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (!objects || objects.length > 1) {\n\t\t\t\t\t\t\treturn objects;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn objects[0];\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// -------------------------- bridget -------------------------- //\n\n\t\t\t/**\n    * converts a Prototypical class into a proper jQuery plugin\n    *   the class must have a ._init method\n    * @param {String} namespace - plugin name, used in $().pluginName\n    * @param {Function} PluginClass - constructor class\n    */\n\t\t\t$.bridget = function (namespace, PluginClass) {\n\t\t\t\taddOptionMethod(PluginClass);\n\t\t\t\tbridge(namespace, PluginClass);\n\t\t\t};\n\n\t\t\treturn $.bridget;\n\t\t}\n\n\t\t// get jquery from browser global\n\t\tdefineBridget($);\n\t})($);\n\n\t/*************************************************\n \t\t\tBOOTSTRAP-SLIDER SOURCE CODE\n \t**************************************************/\n\n\t(function ($) {\n\n\t\tvar ErrorMsgs = {\n\t\t\tformatInvalidInputErrorMsg: function formatInvalidInputErrorMsg(input) {\n\t\t\t\treturn \"Invalid input value '\" + input + \"' passed in\";\n\t\t\t},\n\t\t\tcallingContextNotSliderInstance: \"Calling context element does not have instance of Slider bound to it. Check your code to make sure the JQuery object returned from the call to the slider() initializer is calling the method\"\n\t\t};\n\n\t\tvar SliderScale = {\n\t\t\tlinear: {\n\t\t\t\ttoValue: function toValue(percentage) {\n\t\t\t\t\tvar rawValue = percentage / 100 * (this.options.max - this.options.min);\n\t\t\t\t\tvar shouldAdjustWithBase = true;\n\t\t\t\t\tif (this.options.ticks_positions.length > 0) {\n\t\t\t\t\t\tvar minv,\n\t\t\t\t\t\t    maxv,\n\t\t\t\t\t\t    minp,\n\t\t\t\t\t\t    maxp = 0;\n\t\t\t\t\t\tfor (var i = 1; i < this.options.ticks_positions.length; i++) {\n\t\t\t\t\t\t\tif (percentage <= this.options.ticks_positions[i]) {\n\t\t\t\t\t\t\t\tminv = this.options.ticks[i - 1];\n\t\t\t\t\t\t\t\tminp = this.options.ticks_positions[i - 1];\n\t\t\t\t\t\t\t\tmaxv = this.options.ticks[i];\n\t\t\t\t\t\t\t\tmaxp = this.options.ticks_positions[i];\n\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvar partialPercentage = (percentage - minp) / (maxp - minp);\n\t\t\t\t\t\trawValue = minv + partialPercentage * (maxv - minv);\n\t\t\t\t\t\tshouldAdjustWithBase = false;\n\t\t\t\t\t}\n\n\t\t\t\t\tvar adjustment = shouldAdjustWithBase ? this.options.min : 0;\n\t\t\t\t\tvar value = adjustment + Math.round(rawValue / this.options.step) * this.options.step;\n\t\t\t\t\tif (value < this.options.min) {\n\t\t\t\t\t\treturn this.options.min;\n\t\t\t\t\t} else if (value > this.options.max) {\n\t\t\t\t\t\treturn this.options.max;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn value;\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\ttoPercentage: function toPercentage(value) {\n\t\t\t\t\tif (this.options.max === this.options.min) {\n\t\t\t\t\t\treturn 0;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (this.options.ticks_positions.length > 0) {\n\t\t\t\t\t\tvar minv,\n\t\t\t\t\t\t    maxv,\n\t\t\t\t\t\t    minp,\n\t\t\t\t\t\t    maxp = 0;\n\t\t\t\t\t\tfor (var i = 0; i < this.options.ticks.length; i++) {\n\t\t\t\t\t\t\tif (value <= this.options.ticks[i]) {\n\t\t\t\t\t\t\t\tminv = i > 0 ? this.options.ticks[i - 1] : 0;\n\t\t\t\t\t\t\t\tminp = i > 0 ? this.options.ticks_positions[i - 1] : 0;\n\t\t\t\t\t\t\t\tmaxv = this.options.ticks[i];\n\t\t\t\t\t\t\t\tmaxp = this.options.ticks_positions[i];\n\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (i > 0) {\n\t\t\t\t\t\t\tvar partialPercentage = (value - minv) / (maxv - minv);\n\t\t\t\t\t\t\treturn minp + partialPercentage * (maxp - minp);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn 100 * (value - this.options.min) / (this.options.max - this.options.min);\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tlogarithmic: {\n\t\t\t\t/* Based on http://stackoverflow.com/questions/846221/logarithmic-slider */\n\t\t\t\ttoValue: function toValue(percentage) {\n\t\t\t\t\tvar min = this.options.min === 0 ? 0 : Math.log(this.options.min);\n\t\t\t\t\tvar max = Math.log(this.options.max);\n\t\t\t\t\tvar value = Math.exp(min + (max - min) * percentage / 100);\n\t\t\t\t\tvalue = this.options.min + Math.round((value - this.options.min) / this.options.step) * this.options.step;\n\t\t\t\t\t/* Rounding to the nearest step could exceed the min or\n      * max, so clip to those values. */\n\t\t\t\t\tif (value < this.options.min) {\n\t\t\t\t\t\treturn this.options.min;\n\t\t\t\t\t} else if (value > this.options.max) {\n\t\t\t\t\t\treturn this.options.max;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn value;\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\ttoPercentage: function toPercentage(value) {\n\t\t\t\t\tif (this.options.max === this.options.min) {\n\t\t\t\t\t\treturn 0;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar max = Math.log(this.options.max);\n\t\t\t\t\t\tvar min = this.options.min === 0 ? 0 : Math.log(this.options.min);\n\t\t\t\t\t\tvar v = value === 0 ? 0 : Math.log(value);\n\t\t\t\t\t\treturn 100 * (v - min) / (max - min);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t/*************************************************\n  \t\t\t\t\t\tCONSTRUCTOR\n  \t**************************************************/\n\t\tSlider = function Slider(element, options) {\n\t\t\tcreateNewSlider.call(this, element, options);\n\t\t\treturn this;\n\t\t};\n\n\t\tfunction createNewSlider(element, options) {\n\n\t\t\t/*\n   \tThe internal state object is used to store data about the current 'state' of slider.\n   \tThis includes values such as the `value`, `enabled`, etc...\n   */\n\t\t\tthis._state = {\n\t\t\t\tvalue: null,\n\t\t\t\tenabled: null,\n\t\t\t\toffset: null,\n\t\t\t\tsize: null,\n\t\t\t\tpercentage: null,\n\t\t\t\tinDrag: false,\n\t\t\t\tover: false\n\t\t\t};\n\n\t\t\t// The objects used to store the reference to the tick methods if ticks_tooltip is on\n\t\t\tthis.ticksCallbackMap = {};\n\t\t\tthis.handleCallbackMap = {};\n\n\t\t\tif (typeof element === \"string\") {\n\t\t\t\tthis.element = document.querySelector(element);\n\t\t\t} else if (element instanceof HTMLElement) {\n\t\t\t\tthis.element = element;\n\t\t\t}\n\n\t\t\t/*************************************************\n   \t\t\t\t\tProcess Options\n   \t**************************************************/\n\t\t\toptions = options ? options : {};\n\t\t\tvar optionTypes = Object.keys(this.defaultOptions);\n\n\t\t\tfor (var i = 0; i < optionTypes.length; i++) {\n\t\t\t\tvar optName = optionTypes[i];\n\n\t\t\t\t// First check if an option was passed in via the constructor\n\t\t\t\tvar val = options[optName];\n\t\t\t\t// If no data attrib, then check data atrributes\n\t\t\t\tval = typeof val !== 'undefined' ? val : getDataAttrib(this.element, optName);\n\t\t\t\t// Finally, if nothing was specified, use the defaults\n\t\t\t\tval = val !== null ? val : this.defaultOptions[optName];\n\n\t\t\t\t// Set all options on the instance of the Slider\n\t\t\t\tif (!this.options) {\n\t\t\t\t\tthis.options = {};\n\t\t\t\t}\n\t\t\t\tthis.options[optName] = val;\n\t\t\t}\n\n\t\t\t// Check options.rtl\n\t\t\tif (this.options.rtl === 'auto') {\n\t\t\t\tthis.options.rtl = window.getComputedStyle(this.element).direction === 'rtl';\n\t\t\t}\n\n\t\t\t/*\n   \tValidate `tooltip_position` against 'orientation`\n   \t- if `tooltip_position` is incompatible with orientation, swith it to a default compatible with specified `orientation`\n   \t\t-- default for \"vertical\" -> \"right\", \"left\" if rtl\n   \t\t-- default for \"horizontal\" -> \"top\"\n   */\n\t\t\tif (this.options.orientation === \"vertical\" && (this.options.tooltip_position === \"top\" || this.options.tooltip_position === \"bottom\")) {\n\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\tthis.options.tooltip_position = \"left\";\n\t\t\t\t} else {\n\t\t\t\t\tthis.options.tooltip_position = \"right\";\n\t\t\t\t}\n\t\t\t} else if (this.options.orientation === \"horizontal\" && (this.options.tooltip_position === \"left\" || this.options.tooltip_position === \"right\")) {\n\n\t\t\t\tthis.options.tooltip_position = \"top\";\n\t\t\t}\n\n\t\t\tfunction getDataAttrib(element, optName) {\n\t\t\t\tvar dataName = \"data-slider-\" + optName.replace(/_/g, '-');\n\t\t\t\tvar dataValString = element.getAttribute(dataName);\n\n\t\t\t\ttry {\n\t\t\t\t\treturn JSON.parse(dataValString);\n\t\t\t\t} catch (err) {\n\t\t\t\t\treturn dataValString;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t/*************************************************\n   \t\t\t\t\tCreate Markup\n   \t**************************************************/\n\n\t\t\tvar origWidth = this.element.style.width;\n\t\t\tvar updateSlider = false;\n\t\t\tvar parent = this.element.parentNode;\n\t\t\tvar sliderTrackSelection;\n\t\t\tvar sliderTrackLow, sliderTrackHigh;\n\t\t\tvar sliderMinHandle;\n\t\t\tvar sliderMaxHandle;\n\n\t\t\tif (this.sliderElem) {\n\t\t\t\tupdateSlider = true;\n\t\t\t} else {\n\t\t\t\t/* Create elements needed for slider */\n\t\t\t\tthis.sliderElem = document.createElement(\"div\");\n\t\t\t\tthis.sliderElem.className = \"slider\";\n\n\t\t\t\t/* Create slider track elements */\n\t\t\t\tvar sliderTrack = document.createElement(\"div\");\n\t\t\t\tsliderTrack.className = \"slider-track\";\n\n\t\t\t\tsliderTrackLow = document.createElement(\"div\");\n\t\t\t\tsliderTrackLow.className = \"slider-track-low\";\n\n\t\t\t\tsliderTrackSelection = document.createElement(\"div\");\n\t\t\t\tsliderTrackSelection.className = \"slider-selection\";\n\n\t\t\t\tsliderTrackHigh = document.createElement(\"div\");\n\t\t\t\tsliderTrackHigh.className = \"slider-track-high\";\n\n\t\t\t\tsliderMinHandle = document.createElement(\"div\");\n\t\t\t\tsliderMinHandle.className = \"slider-handle min-slider-handle\";\n\t\t\t\tsliderMinHandle.setAttribute('role', 'slider');\n\t\t\t\tsliderMinHandle.setAttribute('aria-valuemin', this.options.min);\n\t\t\t\tsliderMinHandle.setAttribute('aria-valuemax', this.options.max);\n\n\t\t\t\tsliderMaxHandle = document.createElement(\"div\");\n\t\t\t\tsliderMaxHandle.className = \"slider-handle max-slider-handle\";\n\t\t\t\tsliderMaxHandle.setAttribute('role', 'slider');\n\t\t\t\tsliderMaxHandle.setAttribute('aria-valuemin', this.options.min);\n\t\t\t\tsliderMaxHandle.setAttribute('aria-valuemax', this.options.max);\n\n\t\t\t\tsliderTrack.appendChild(sliderTrackLow);\n\t\t\t\tsliderTrack.appendChild(sliderTrackSelection);\n\t\t\t\tsliderTrack.appendChild(sliderTrackHigh);\n\n\t\t\t\t/* Create highlight range elements */\n\t\t\t\tthis.rangeHighlightElements = [];\n\t\t\t\tif (Array.isArray(this.options.rangeHighlights) && this.options.rangeHighlights.length > 0) {\n\t\t\t\t\tfor (var j = 0; j < this.options.rangeHighlights.length; j++) {\n\n\t\t\t\t\t\tvar rangeHighlightElement = document.createElement(\"div\");\n\t\t\t\t\t\trangeHighlightElement.className = \"slider-rangeHighlight slider-selection\";\n\n\t\t\t\t\t\tthis.rangeHighlightElements.push(rangeHighlightElement);\n\t\t\t\t\t\tsliderTrack.appendChild(rangeHighlightElement);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t/* Add aria-labelledby to handle's */\n\t\t\t\tvar isLabelledbyArray = Array.isArray(this.options.labelledby);\n\t\t\t\tif (isLabelledbyArray && this.options.labelledby[0]) {\n\t\t\t\t\tsliderMinHandle.setAttribute('aria-labelledby', this.options.labelledby[0]);\n\t\t\t\t}\n\t\t\t\tif (isLabelledbyArray && this.options.labelledby[1]) {\n\t\t\t\t\tsliderMaxHandle.setAttribute('aria-labelledby', this.options.labelledby[1]);\n\t\t\t\t}\n\t\t\t\tif (!isLabelledbyArray && this.options.labelledby) {\n\t\t\t\t\tsliderMinHandle.setAttribute('aria-labelledby', this.options.labelledby);\n\t\t\t\t\tsliderMaxHandle.setAttribute('aria-labelledby', this.options.labelledby);\n\t\t\t\t}\n\n\t\t\t\t/* Create ticks */\n\t\t\t\tthis.ticks = [];\n\t\t\t\tif (Array.isArray(this.options.ticks) && this.options.ticks.length > 0) {\n\t\t\t\t\tthis.ticksContainer = document.createElement('div');\n\t\t\t\t\tthis.ticksContainer.className = 'slider-tick-container';\n\n\t\t\t\t\tfor (i = 0; i < this.options.ticks.length; i++) {\n\t\t\t\t\t\tvar tick = document.createElement('div');\n\t\t\t\t\t\ttick.className = 'slider-tick';\n\t\t\t\t\t\tif (this.options.ticks_tooltip) {\n\t\t\t\t\t\t\tvar tickListenerReference = this._addTickListener();\n\t\t\t\t\t\t\tvar enterCallback = tickListenerReference.addMouseEnter(this, tick, i);\n\t\t\t\t\t\t\tvar leaveCallback = tickListenerReference.addMouseLeave(this, tick);\n\n\t\t\t\t\t\t\tthis.ticksCallbackMap[i] = {\n\t\t\t\t\t\t\t\tmouseEnter: enterCallback,\n\t\t\t\t\t\t\t\tmouseLeave: leaveCallback\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t}\n\t\t\t\t\t\tthis.ticks.push(tick);\n\t\t\t\t\t\tthis.ticksContainer.appendChild(tick);\n\t\t\t\t\t}\n\n\t\t\t\t\tsliderTrackSelection.className += \" tick-slider-selection\";\n\t\t\t\t}\n\n\t\t\t\tthis.tickLabels = [];\n\t\t\t\tif (Array.isArray(this.options.ticks_labels) && this.options.ticks_labels.length > 0) {\n\t\t\t\t\tthis.tickLabelContainer = document.createElement('div');\n\t\t\t\t\tthis.tickLabelContainer.className = 'slider-tick-label-container';\n\n\t\t\t\t\tfor (i = 0; i < this.options.ticks_labels.length; i++) {\n\t\t\t\t\t\tvar label = document.createElement('div');\n\t\t\t\t\t\tvar noTickPositionsSpecified = this.options.ticks_positions.length === 0;\n\t\t\t\t\t\tvar tickLabelsIndex = this.options.reversed && noTickPositionsSpecified ? this.options.ticks_labels.length - (i + 1) : i;\n\t\t\t\t\t\tlabel.className = 'slider-tick-label';\n\t\t\t\t\t\tlabel.innerHTML = this.options.ticks_labels[tickLabelsIndex];\n\n\t\t\t\t\t\tthis.tickLabels.push(label);\n\t\t\t\t\t\tthis.tickLabelContainer.appendChild(label);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar createAndAppendTooltipSubElements = function createAndAppendTooltipSubElements(tooltipElem) {\n\t\t\t\t\tvar arrow = document.createElement(\"div\");\n\t\t\t\t\tarrow.className = \"tooltip-arrow\";\n\n\t\t\t\t\tvar inner = document.createElement(\"div\");\n\t\t\t\t\tinner.className = \"tooltip-inner\";\n\n\t\t\t\t\ttooltipElem.appendChild(arrow);\n\t\t\t\t\ttooltipElem.appendChild(inner);\n\t\t\t\t};\n\n\t\t\t\t/* Create tooltip elements */\n\t\t\t\tvar sliderTooltip = document.createElement(\"div\");\n\t\t\t\tsliderTooltip.className = \"tooltip tooltip-main\";\n\t\t\t\tsliderTooltip.setAttribute('role', 'presentation');\n\t\t\t\tcreateAndAppendTooltipSubElements(sliderTooltip);\n\n\t\t\t\tvar sliderTooltipMin = document.createElement(\"div\");\n\t\t\t\tsliderTooltipMin.className = \"tooltip tooltip-min\";\n\t\t\t\tsliderTooltipMin.setAttribute('role', 'presentation');\n\t\t\t\tcreateAndAppendTooltipSubElements(sliderTooltipMin);\n\n\t\t\t\tvar sliderTooltipMax = document.createElement(\"div\");\n\t\t\t\tsliderTooltipMax.className = \"tooltip tooltip-max\";\n\t\t\t\tsliderTooltipMax.setAttribute('role', 'presentation');\n\t\t\t\tcreateAndAppendTooltipSubElements(sliderTooltipMax);\n\n\t\t\t\t/* Append components to sliderElem */\n\t\t\t\tthis.sliderElem.appendChild(sliderTrack);\n\t\t\t\tthis.sliderElem.appendChild(sliderTooltip);\n\t\t\t\tthis.sliderElem.appendChild(sliderTooltipMin);\n\t\t\t\tthis.sliderElem.appendChild(sliderTooltipMax);\n\n\t\t\t\tif (this.tickLabelContainer) {\n\t\t\t\t\tthis.sliderElem.appendChild(this.tickLabelContainer);\n\t\t\t\t}\n\t\t\t\tif (this.ticksContainer) {\n\t\t\t\t\tthis.sliderElem.appendChild(this.ticksContainer);\n\t\t\t\t}\n\n\t\t\t\tthis.sliderElem.appendChild(sliderMinHandle);\n\t\t\t\tthis.sliderElem.appendChild(sliderMaxHandle);\n\n\t\t\t\t/* Append slider element to parent container, right before the original <input> element */\n\t\t\t\tparent.insertBefore(this.sliderElem, this.element);\n\n\t\t\t\t/* Hide original <input> element */\n\t\t\t\tthis.element.style.display = \"none\";\n\t\t\t}\n\t\t\t/* If JQuery exists, cache JQ references */\n\t\t\tif ($) {\n\t\t\t\tthis.$element = $(this.element);\n\t\t\t\tthis.$sliderElem = $(this.sliderElem);\n\t\t\t}\n\n\t\t\t/*************************************************\n   \t\t\t\t\t\tSetup\n   \t**************************************************/\n\t\t\tthis.eventToCallbackMap = {};\n\t\t\tthis.sliderElem.id = this.options.id;\n\n\t\t\tthis.touchCapable = 'ontouchstart' in window || window.DocumentTouch && document instanceof window.DocumentTouch;\n\n\t\t\tthis.touchX = 0;\n\t\t\tthis.touchY = 0;\n\n\t\t\tthis.tooltip = this.sliderElem.querySelector('.tooltip-main');\n\t\t\tthis.tooltipInner = this.tooltip.querySelector('.tooltip-inner');\n\n\t\t\tthis.tooltip_min = this.sliderElem.querySelector('.tooltip-min');\n\t\t\tthis.tooltipInner_min = this.tooltip_min.querySelector('.tooltip-inner');\n\n\t\t\tthis.tooltip_max = this.sliderElem.querySelector('.tooltip-max');\n\t\t\tthis.tooltipInner_max = this.tooltip_max.querySelector('.tooltip-inner');\n\n\t\t\tif (SliderScale[this.options.scale]) {\n\t\t\t\tthis.options.scale = SliderScale[this.options.scale];\n\t\t\t}\n\n\t\t\tif (updateSlider === true) {\n\t\t\t\t// Reset classes\n\t\t\t\tthis._removeClass(this.sliderElem, 'slider-horizontal');\n\t\t\t\tthis._removeClass(this.sliderElem, 'slider-vertical');\n\t\t\t\tthis._removeClass(this.sliderElem, 'slider-rtl');\n\t\t\t\tthis._removeClass(this.tooltip, 'hide');\n\t\t\t\tthis._removeClass(this.tooltip_min, 'hide');\n\t\t\t\tthis._removeClass(this.tooltip_max, 'hide');\n\n\t\t\t\t// Undo existing inline styles for track\n\t\t\t\t[\"left\", \"right\", \"top\", \"width\", \"height\"].forEach(function (prop) {\n\t\t\t\t\tthis._removeProperty(this.trackLow, prop);\n\t\t\t\t\tthis._removeProperty(this.trackSelection, prop);\n\t\t\t\t\tthis._removeProperty(this.trackHigh, prop);\n\t\t\t\t}, this);\n\n\t\t\t\t// Undo inline styles on handles\n\t\t\t\t[this.handle1, this.handle2].forEach(function (handle) {\n\t\t\t\t\tthis._removeProperty(handle, 'left');\n\t\t\t\t\tthis._removeProperty(handle, 'right');\n\t\t\t\t\tthis._removeProperty(handle, 'top');\n\t\t\t\t}, this);\n\n\t\t\t\t// Undo inline styles and classes on tooltips\n\t\t\t\t[this.tooltip, this.tooltip_min, this.tooltip_max].forEach(function (tooltip) {\n\t\t\t\t\tthis._removeProperty(tooltip, 'left');\n\t\t\t\t\tthis._removeProperty(tooltip, 'right');\n\t\t\t\t\tthis._removeProperty(tooltip, 'top');\n\t\t\t\t\tthis._removeProperty(tooltip, 'margin-left');\n\t\t\t\t\tthis._removeProperty(tooltip, 'margin-right');\n\t\t\t\t\tthis._removeProperty(tooltip, 'margin-top');\n\n\t\t\t\t\tthis._removeClass(tooltip, 'right');\n\t\t\t\t\tthis._removeClass(tooltip, 'left');\n\t\t\t\t\tthis._removeClass(tooltip, 'top');\n\t\t\t\t}, this);\n\t\t\t}\n\n\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\tthis._addClass(this.sliderElem, 'slider-vertical');\n\t\t\t\tthis.stylePos = 'top';\n\t\t\t\tthis.mousePos = 'pageY';\n\t\t\t\tthis.sizePos = 'offsetHeight';\n\t\t\t} else {\n\t\t\t\tthis._addClass(this.sliderElem, 'slider-horizontal');\n\t\t\t\tthis.sliderElem.style.width = origWidth;\n\t\t\t\tthis.options.orientation = 'horizontal';\n\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\tthis.stylePos = 'right';\n\t\t\t\t} else {\n\t\t\t\t\tthis.stylePos = 'left';\n\t\t\t\t}\n\t\t\t\tthis.mousePos = 'pageX';\n\t\t\t\tthis.sizePos = 'offsetWidth';\n\t\t\t}\n\t\t\t// specific rtl class\n\t\t\tif (this.options.rtl) {\n\t\t\t\tthis._addClass(this.sliderElem, 'slider-rtl');\n\t\t\t}\n\t\t\tthis._setTooltipPosition();\n\t\t\t/* In case ticks are specified, overwrite the min and max bounds */\n\t\t\tif (Array.isArray(this.options.ticks) && this.options.ticks.length > 0) {\n\t\t\t\tthis.options.max = Math.max.apply(Math, this.options.ticks);\n\t\t\t\tthis.options.min = Math.min.apply(Math, this.options.ticks);\n\t\t\t}\n\n\t\t\tif (Array.isArray(this.options.value)) {\n\t\t\t\tthis.options.range = true;\n\t\t\t\tthis._state.value = this.options.value;\n\t\t\t} else if (this.options.range) {\n\t\t\t\t// User wants a range, but value is not an array\n\t\t\t\tthis._state.value = [this.options.value, this.options.max];\n\t\t\t} else {\n\t\t\t\tthis._state.value = this.options.value;\n\t\t\t}\n\n\t\t\tthis.trackLow = sliderTrackLow || this.trackLow;\n\t\t\tthis.trackSelection = sliderTrackSelection || this.trackSelection;\n\t\t\tthis.trackHigh = sliderTrackHigh || this.trackHigh;\n\n\t\t\tif (this.options.selection === 'none') {\n\t\t\t\tthis._addClass(this.trackLow, 'hide');\n\t\t\t\tthis._addClass(this.trackSelection, 'hide');\n\t\t\t\tthis._addClass(this.trackHigh, 'hide');\n\t\t\t} else if (this.options.selection === 'after' || this.options.selection === 'before') {\n\t\t\t\tthis._removeClass(this.trackLow, 'hide');\n\t\t\t\tthis._removeClass(this.trackSelection, 'hide');\n\t\t\t\tthis._removeClass(this.trackHigh, 'hide');\n\t\t\t}\n\n\t\t\tthis.handle1 = sliderMinHandle || this.handle1;\n\t\t\tthis.handle2 = sliderMaxHandle || this.handle2;\n\n\t\t\tif (updateSlider === true) {\n\t\t\t\t// Reset classes\n\t\t\t\tthis._removeClass(this.handle1, 'round triangle');\n\t\t\t\tthis._removeClass(this.handle2, 'round triangle hide');\n\n\t\t\t\tfor (i = 0; i < this.ticks.length; i++) {\n\t\t\t\t\tthis._removeClass(this.ticks[i], 'round triangle hide');\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar availableHandleModifiers = ['round', 'triangle', 'custom'];\n\t\t\tvar isValidHandleType = availableHandleModifiers.indexOf(this.options.handle) !== -1;\n\t\t\tif (isValidHandleType) {\n\t\t\t\tthis._addClass(this.handle1, this.options.handle);\n\t\t\t\tthis._addClass(this.handle2, this.options.handle);\n\n\t\t\t\tfor (i = 0; i < this.ticks.length; i++) {\n\t\t\t\t\tthis._addClass(this.ticks[i], this.options.handle);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tthis._state.offset = this._offset(this.sliderElem);\n\t\t\tthis._state.size = this.sliderElem[this.sizePos];\n\t\t\tthis.setValue(this._state.value);\n\n\t\t\t/******************************************\n   \t\t\t\tBind Event Listeners\n   \t******************************************/\n\n\t\t\t// Bind keyboard handlers\n\t\t\tthis.handle1Keydown = this._keydown.bind(this, 0);\n\t\t\tthis.handle1.addEventListener(\"keydown\", this.handle1Keydown, false);\n\n\t\t\tthis.handle2Keydown = this._keydown.bind(this, 1);\n\t\t\tthis.handle2.addEventListener(\"keydown\", this.handle2Keydown, false);\n\n\t\t\tthis.mousedown = this._mousedown.bind(this);\n\t\t\tthis.touchstart = this._touchstart.bind(this);\n\t\t\tthis.touchmove = this._touchmove.bind(this);\n\n\t\t\tif (this.touchCapable) {\n\t\t\t\t// Test for passive event support\n\t\t\t\tvar supportsPassive = false;\n\t\t\t\ttry {\n\t\t\t\t\tvar opts = Object.defineProperty({}, 'passive', {\n\t\t\t\t\t\tget: function get() {\n\t\t\t\t\t\t\tsupportsPassive = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\twindow.addEventListener(\"test\", null, opts);\n\t\t\t\t} catch (e) {}\n\t\t\t\t// Use our detect's results. passive applied if supported, capture will be false either way.\n\t\t\t\tvar eventOptions = supportsPassive ? { passive: true } : false;\n\t\t\t\t// Bind touch handlers\n\t\t\t\tthis.sliderElem.addEventListener(\"touchstart\", this.touchstart, eventOptions);\n\t\t\t\tthis.sliderElem.addEventListener(\"touchmove\", this.touchmove, eventOptions);\n\t\t\t}\n\t\t\tthis.sliderElem.addEventListener(\"mousedown\", this.mousedown, false);\n\n\t\t\t// Bind window handlers\n\t\t\tthis.resize = this._resize.bind(this);\n\t\t\twindow.addEventListener(\"resize\", this.resize, false);\n\n\t\t\t// Bind tooltip-related handlers\n\t\t\tif (this.options.tooltip === 'hide') {\n\t\t\t\tthis._addClass(this.tooltip, 'hide');\n\t\t\t\tthis._addClass(this.tooltip_min, 'hide');\n\t\t\t\tthis._addClass(this.tooltip_max, 'hide');\n\t\t\t} else if (this.options.tooltip === 'always') {\n\t\t\t\tthis._showTooltip();\n\t\t\t\tthis._alwaysShowTooltip = true;\n\t\t\t} else {\n\t\t\t\tthis.showTooltip = this._showTooltip.bind(this);\n\t\t\t\tthis.hideTooltip = this._hideTooltip.bind(this);\n\n\t\t\t\tif (this.options.ticks_tooltip) {\n\t\t\t\t\tvar callbackHandle = this._addTickListener();\n\t\t\t\t\t//create handle1 listeners and store references in map\n\t\t\t\t\tvar mouseEnter = callbackHandle.addMouseEnter(this, this.handle1);\n\t\t\t\t\tvar mouseLeave = callbackHandle.addMouseLeave(this, this.handle1);\n\t\t\t\t\tthis.handleCallbackMap.handle1 = {\n\t\t\t\t\t\tmouseEnter: mouseEnter,\n\t\t\t\t\t\tmouseLeave: mouseLeave\n\t\t\t\t\t};\n\t\t\t\t\t//create handle2 listeners and store references in map\n\t\t\t\t\tmouseEnter = callbackHandle.addMouseEnter(this, this.handle2);\n\t\t\t\t\tmouseLeave = callbackHandle.addMouseLeave(this, this.handle2);\n\t\t\t\t\tthis.handleCallbackMap.handle2 = {\n\t\t\t\t\t\tmouseEnter: mouseEnter,\n\t\t\t\t\t\tmouseLeave: mouseLeave\n\t\t\t\t\t};\n\t\t\t\t} else {\n\t\t\t\t\tthis.sliderElem.addEventListener(\"mouseenter\", this.showTooltip, false);\n\t\t\t\t\tthis.sliderElem.addEventListener(\"mouseleave\", this.hideTooltip, false);\n\t\t\t\t}\n\n\t\t\t\tthis.handle1.addEventListener(\"focus\", this.showTooltip, false);\n\t\t\t\tthis.handle1.addEventListener(\"blur\", this.hideTooltip, false);\n\n\t\t\t\tthis.handle2.addEventListener(\"focus\", this.showTooltip, false);\n\t\t\t\tthis.handle2.addEventListener(\"blur\", this.hideTooltip, false);\n\t\t\t}\n\n\t\t\tif (this.options.enabled) {\n\t\t\t\tthis.enable();\n\t\t\t} else {\n\t\t\t\tthis.disable();\n\t\t\t}\n\t\t}\n\n\t\t/*************************************************\n  \t\t\t\tINSTANCE PROPERTIES/METHODS\n  \t- Any methods bound to the prototype are considered\n  part of the plugin's `public` interface\n  \t**************************************************/\n\t\tSlider.prototype = {\n\t\t\t_init: function _init() {}, // NOTE: Must exist to support bridget\n\n\t\t\tconstructor: Slider,\n\n\t\t\tdefaultOptions: {\n\t\t\t\tid: \"\",\n\t\t\t\tmin: 0,\n\t\t\t\tmax: 10,\n\t\t\t\tstep: 1,\n\t\t\t\tprecision: 0,\n\t\t\t\torientation: 'horizontal',\n\t\t\t\tvalue: 5,\n\t\t\t\trange: false,\n\t\t\t\tselection: 'before',\n\t\t\t\ttooltip: 'show',\n\t\t\t\ttooltip_split: false,\n\t\t\t\thandle: 'round',\n\t\t\t\treversed: false,\n\t\t\t\trtl: 'auto',\n\t\t\t\tenabled: true,\n\t\t\t\tformatter: function formatter(val) {\n\t\t\t\t\tif (Array.isArray(val)) {\n\t\t\t\t\t\treturn val[0] + \" : \" + val[1];\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn val;\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\tnatural_arrow_keys: false,\n\t\t\t\tticks: [],\n\t\t\t\tticks_positions: [],\n\t\t\t\tticks_labels: [],\n\t\t\t\tticks_snap_bounds: 0,\n\t\t\t\tticks_tooltip: false,\n\t\t\t\tscale: 'linear',\n\t\t\t\tfocus: false,\n\t\t\t\ttooltip_position: null,\n\t\t\t\tlabelledby: null,\n\t\t\t\trangeHighlights: []\n\t\t\t},\n\n\t\t\tgetElement: function getElement() {\n\t\t\t\treturn this.sliderElem;\n\t\t\t},\n\n\t\t\tgetValue: function getValue() {\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\treturn this._state.value;\n\t\t\t\t} else {\n\t\t\t\t\treturn this._state.value[0];\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tsetValue: function setValue(val, triggerSlideEvent, triggerChangeEvent) {\n\t\t\t\tif (!val) {\n\t\t\t\t\tval = 0;\n\t\t\t\t}\n\t\t\t\tvar oldValue = this.getValue();\n\t\t\t\tthis._state.value = this._validateInputValue(val);\n\t\t\t\tvar applyPrecision = this._applyPrecision.bind(this);\n\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tthis._state.value[0] = applyPrecision(this._state.value[0]);\n\t\t\t\t\tthis._state.value[1] = applyPrecision(this._state.value[1]);\n\n\t\t\t\t\tthis._state.value[0] = Math.max(this.options.min, Math.min(this.options.max, this._state.value[0]));\n\t\t\t\t\tthis._state.value[1] = Math.max(this.options.min, Math.min(this.options.max, this._state.value[1]));\n\t\t\t\t} else {\n\t\t\t\t\tthis._state.value = applyPrecision(this._state.value);\n\t\t\t\t\tthis._state.value = [Math.max(this.options.min, Math.min(this.options.max, this._state.value))];\n\t\t\t\t\tthis._addClass(this.handle2, 'hide');\n\t\t\t\t\tif (this.options.selection === 'after') {\n\t\t\t\t\t\tthis._state.value[1] = this.options.max;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis._state.value[1] = this.options.min;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (this.options.max > this.options.min) {\n\t\t\t\t\tthis._state.percentage = [this._toPercentage(this._state.value[0]), this._toPercentage(this._state.value[1]), this.options.step * 100 / (this.options.max - this.options.min)];\n\t\t\t\t} else {\n\t\t\t\t\tthis._state.percentage = [0, 0, 100];\n\t\t\t\t}\n\n\t\t\t\tthis._layout();\n\t\t\t\tvar newValue = this.options.range ? this._state.value : this._state.value[0];\n\n\t\t\t\tthis._setDataVal(newValue);\n\t\t\t\tif (triggerSlideEvent === true) {\n\t\t\t\t\tthis._trigger('slide', newValue);\n\t\t\t\t}\n\t\t\t\tif (oldValue !== newValue && triggerChangeEvent === true) {\n\t\t\t\t\tthis._trigger('change', {\n\t\t\t\t\t\toldValue: oldValue,\n\t\t\t\t\t\tnewValue: newValue\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\tdestroy: function destroy() {\n\t\t\t\t// Remove event handlers on slider elements\n\t\t\t\tthis._removeSliderEventHandlers();\n\n\t\t\t\t// Remove the slider from the DOM\n\t\t\t\tthis.sliderElem.parentNode.removeChild(this.sliderElem);\n\t\t\t\t/* Show original <input> element */\n\t\t\t\tthis.element.style.display = \"\";\n\n\t\t\t\t// Clear out custom event bindings\n\t\t\t\tthis._cleanUpEventCallbacksMap();\n\n\t\t\t\t// Remove data values\n\t\t\t\tthis.element.removeAttribute(\"data\");\n\n\t\t\t\t// Remove JQuery handlers/data\n\t\t\t\tif ($) {\n\t\t\t\t\tthis._unbindJQueryEventHandlers();\n\t\t\t\t\tthis.$element.removeData('slider');\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tdisable: function disable() {\n\t\t\t\tthis._state.enabled = false;\n\t\t\t\tthis.handle1.removeAttribute(\"tabindex\");\n\t\t\t\tthis.handle2.removeAttribute(\"tabindex\");\n\t\t\t\tthis._addClass(this.sliderElem, 'slider-disabled');\n\t\t\t\tthis._trigger('slideDisabled');\n\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\tenable: function enable() {\n\t\t\t\tthis._state.enabled = true;\n\t\t\t\tthis.handle1.setAttribute(\"tabindex\", 0);\n\t\t\t\tthis.handle2.setAttribute(\"tabindex\", 0);\n\t\t\t\tthis._removeClass(this.sliderElem, 'slider-disabled');\n\t\t\t\tthis._trigger('slideEnabled');\n\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\ttoggle: function toggle() {\n\t\t\t\tif (this._state.enabled) {\n\t\t\t\t\tthis.disable();\n\t\t\t\t} else {\n\t\t\t\t\tthis.enable();\n\t\t\t\t}\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\tisEnabled: function isEnabled() {\n\t\t\t\treturn this._state.enabled;\n\t\t\t},\n\n\t\t\ton: function on(evt, callback) {\n\t\t\t\tthis._bindNonQueryEventHandler(evt, callback);\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\toff: function off(evt, callback) {\n\t\t\t\tif ($) {\n\t\t\t\t\tthis.$element.off(evt, callback);\n\t\t\t\t\tthis.$sliderElem.off(evt, callback);\n\t\t\t\t} else {\n\t\t\t\t\tthis._unbindNonQueryEventHandler(evt, callback);\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tgetAttribute: function getAttribute(attribute) {\n\t\t\t\tif (attribute) {\n\t\t\t\t\treturn this.options[attribute];\n\t\t\t\t} else {\n\t\t\t\t\treturn this.options;\n\t\t\t\t}\n\t\t\t},\n\n\t\t\tsetAttribute: function setAttribute(attribute, value) {\n\t\t\t\tthis.options[attribute] = value;\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\trefresh: function refresh() {\n\t\t\t\tthis._removeSliderEventHandlers();\n\t\t\t\tcreateNewSlider.call(this, this.element, this.options);\n\t\t\t\tif ($) {\n\t\t\t\t\t// Bind new instance of slider to the element\n\t\t\t\t\t$.data(this.element, 'slider', this);\n\t\t\t\t}\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\trelayout: function relayout() {\n\t\t\t\tthis._resize();\n\t\t\t\tthis._layout();\n\t\t\t\treturn this;\n\t\t\t},\n\n\t\t\t/******************************+\n   \t\t\t\tHELPERS\n   \t- Any method that is not part of the public interface.\n   - Place it underneath this comment block and write its signature like so:\n   \t\t_fnName : function() {...}\n   \t********************************/\n\t\t\t_removeSliderEventHandlers: function _removeSliderEventHandlers() {\n\t\t\t\t// Remove keydown event listeners\n\t\t\t\tthis.handle1.removeEventListener(\"keydown\", this.handle1Keydown, false);\n\t\t\t\tthis.handle2.removeEventListener(\"keydown\", this.handle2Keydown, false);\n\n\t\t\t\t//remove the listeners from the ticks and handles if they had their own listeners\n\t\t\t\tif (this.options.ticks_tooltip) {\n\t\t\t\t\tvar ticks = this.ticksContainer.getElementsByClassName('slider-tick');\n\t\t\t\t\tfor (var i = 0; i < ticks.length; i++) {\n\t\t\t\t\t\tticks[i].removeEventListener('mouseenter', this.ticksCallbackMap[i].mouseEnter, false);\n\t\t\t\t\t\tticks[i].removeEventListener('mouseleave', this.ticksCallbackMap[i].mouseLeave, false);\n\t\t\t\t\t}\n\t\t\t\t\tthis.handle1.removeEventListener('mouseenter', this.handleCallbackMap.handle1.mouseEnter, false);\n\t\t\t\t\tthis.handle2.removeEventListener('mouseenter', this.handleCallbackMap.handle2.mouseEnter, false);\n\t\t\t\t\tthis.handle1.removeEventListener('mouseleave', this.handleCallbackMap.handle1.mouseLeave, false);\n\t\t\t\t\tthis.handle2.removeEventListener('mouseleave', this.handleCallbackMap.handle2.mouseLeave, false);\n\t\t\t\t}\n\n\t\t\t\tthis.handleCallbackMap = null;\n\t\t\t\tthis.ticksCallbackMap = null;\n\n\t\t\t\tif (this.showTooltip) {\n\t\t\t\t\tthis.handle1.removeEventListener(\"focus\", this.showTooltip, false);\n\t\t\t\t\tthis.handle2.removeEventListener(\"focus\", this.showTooltip, false);\n\t\t\t\t}\n\t\t\t\tif (this.hideTooltip) {\n\t\t\t\t\tthis.handle1.removeEventListener(\"blur\", this.hideTooltip, false);\n\t\t\t\t\tthis.handle2.removeEventListener(\"blur\", this.hideTooltip, false);\n\t\t\t\t}\n\n\t\t\t\t// Remove event listeners from sliderElem\n\t\t\t\tif (this.showTooltip) {\n\t\t\t\t\tthis.sliderElem.removeEventListener(\"mouseenter\", this.showTooltip, false);\n\t\t\t\t}\n\t\t\t\tif (this.hideTooltip) {\n\t\t\t\t\tthis.sliderElem.removeEventListener(\"mouseleave\", this.hideTooltip, false);\n\t\t\t\t}\n\t\t\t\tthis.sliderElem.removeEventListener(\"touchstart\", this.touchstart, false);\n\t\t\t\tthis.sliderElem.removeEventListener(\"touchmove\", this.touchmove, false);\n\t\t\t\tthis.sliderElem.removeEventListener(\"mousedown\", this.mousedown, false);\n\n\t\t\t\t// Remove window event listener\n\t\t\t\twindow.removeEventListener(\"resize\", this.resize, false);\n\t\t\t},\n\t\t\t_bindNonQueryEventHandler: function _bindNonQueryEventHandler(evt, callback) {\n\t\t\t\tif (this.eventToCallbackMap[evt] === undefined) {\n\t\t\t\t\tthis.eventToCallbackMap[evt] = [];\n\t\t\t\t}\n\t\t\t\tthis.eventToCallbackMap[evt].push(callback);\n\t\t\t},\n\t\t\t_unbindNonQueryEventHandler: function _unbindNonQueryEventHandler(evt, callback) {\n\t\t\t\tvar callbacks = this.eventToCallbackMap[evt];\n\t\t\t\tif (callbacks !== undefined) {\n\t\t\t\t\tfor (var i = 0; i < callbacks.length; i++) {\n\t\t\t\t\t\tif (callbacks[i] === callback) {\n\t\t\t\t\t\t\tcallbacks.splice(i, 1);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t_cleanUpEventCallbacksMap: function _cleanUpEventCallbacksMap() {\n\t\t\t\tvar eventNames = Object.keys(this.eventToCallbackMap);\n\t\t\t\tfor (var i = 0; i < eventNames.length; i++) {\n\t\t\t\t\tvar eventName = eventNames[i];\n\t\t\t\t\tdelete this.eventToCallbackMap[eventName];\n\t\t\t\t}\n\t\t\t},\n\t\t\t_showTooltip: function _showTooltip() {\n\t\t\t\tif (this.options.tooltip_split === false) {\n\t\t\t\t\tthis._addClass(this.tooltip, 'in');\n\t\t\t\t\tthis.tooltip_min.style.display = 'none';\n\t\t\t\t\tthis.tooltip_max.style.display = 'none';\n\t\t\t\t} else {\n\t\t\t\t\tthis._addClass(this.tooltip_min, 'in');\n\t\t\t\t\tthis._addClass(this.tooltip_max, 'in');\n\t\t\t\t\tthis.tooltip.style.display = 'none';\n\t\t\t\t}\n\t\t\t\tthis._state.over = true;\n\t\t\t},\n\t\t\t_hideTooltip: function _hideTooltip() {\n\t\t\t\tif (this._state.inDrag === false && this.alwaysShowTooltip !== true) {\n\t\t\t\t\tthis._removeClass(this.tooltip, 'in');\n\t\t\t\t\tthis._removeClass(this.tooltip_min, 'in');\n\t\t\t\t\tthis._removeClass(this.tooltip_max, 'in');\n\t\t\t\t}\n\t\t\t\tthis._state.over = false;\n\t\t\t},\n\t\t\t_setToolTipOnMouseOver: function _setToolTipOnMouseOver(tempState) {\n\t\t\t\tvar formattedTooltipVal = this.options.formatter(!tempState ? this._state.value[0] : tempState.value[0]);\n\t\t\t\tvar positionPercentages = !tempState ? getPositionPercentages(this._state, this.options.reversed) : getPositionPercentages(tempState, this.options.reversed);\n\t\t\t\tthis._setText(this.tooltipInner, formattedTooltipVal);\n\n\t\t\t\tthis.tooltip.style[this.stylePos] = positionPercentages[0] + \"%\";\n\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetHeight / 2 + \"px\");\n\t\t\t\t} else {\n\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetWidth / 2 + \"px\");\n\t\t\t\t}\n\n\t\t\t\tfunction getPositionPercentages(state, reversed) {\n\t\t\t\t\tif (reversed) {\n\t\t\t\t\t\treturn [100 - state.percentage[0], this.options.range ? 100 - state.percentage[1] : state.percentage[1]];\n\t\t\t\t\t}\n\t\t\t\t\treturn [state.percentage[0], state.percentage[1]];\n\t\t\t\t}\n\t\t\t},\n\t\t\t_addTickListener: function _addTickListener() {\n\t\t\t\treturn {\n\t\t\t\t\taddMouseEnter: function addMouseEnter(reference, tick, index) {\n\t\t\t\t\t\tvar enter = function enter() {\n\t\t\t\t\t\t\tvar tempState = reference._state;\n\t\t\t\t\t\t\tvar idString = index >= 0 ? index : this.attributes['aria-valuenow'].value;\n\t\t\t\t\t\t\tvar hoverIndex = parseInt(idString, 10);\n\t\t\t\t\t\t\ttempState.value[0] = hoverIndex;\n\t\t\t\t\t\t\ttempState.percentage[0] = reference.options.ticks_positions[hoverIndex];\n\t\t\t\t\t\t\treference._setToolTipOnMouseOver(tempState);\n\t\t\t\t\t\t\treference._showTooltip();\n\t\t\t\t\t\t};\n\t\t\t\t\t\ttick.addEventListener(\"mouseenter\", enter, false);\n\t\t\t\t\t\treturn enter;\n\t\t\t\t\t},\n\t\t\t\t\taddMouseLeave: function addMouseLeave(reference, tick) {\n\t\t\t\t\t\tvar leave = function leave() {\n\t\t\t\t\t\t\treference._hideTooltip();\n\t\t\t\t\t\t};\n\t\t\t\t\t\ttick.addEventListener(\"mouseleave\", leave, false);\n\t\t\t\t\t\treturn leave;\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t},\n\t\t\t_layout: function _layout() {\n\t\t\t\tvar positionPercentages;\n\n\t\t\t\tif (this.options.reversed) {\n\t\t\t\t\tpositionPercentages = [100 - this._state.percentage[0], this.options.range ? 100 - this._state.percentage[1] : this._state.percentage[1]];\n\t\t\t\t} else {\n\t\t\t\t\tpositionPercentages = [this._state.percentage[0], this._state.percentage[1]];\n\t\t\t\t}\n\n\t\t\t\tthis.handle1.style[this.stylePos] = positionPercentages[0] + \"%\";\n\t\t\t\tthis.handle1.setAttribute('aria-valuenow', this._state.value[0]);\n\t\t\t\tif (isNaN(this.options.formatter(this._state.value[0]))) {\n\t\t\t\t\tthis.handle1.setAttribute('aria-valuetext', this.options.formatter(this._state.value[0]));\n\t\t\t\t}\n\n\t\t\t\tthis.handle2.style[this.stylePos] = positionPercentages[1] + \"%\";\n\t\t\t\tthis.handle2.setAttribute('aria-valuenow', this._state.value[1]);\n\t\t\t\tif (isNaN(this.options.formatter(this._state.value[1]))) {\n\t\t\t\t\tthis.handle2.setAttribute('aria-valuetext', this.options.formatter(this._state.value[1]));\n\t\t\t\t}\n\n\t\t\t\t/* Position highlight range elements */\n\t\t\t\tif (this.rangeHighlightElements.length > 0 && Array.isArray(this.options.rangeHighlights) && this.options.rangeHighlights.length > 0) {\n\t\t\t\t\tfor (var _i = 0; _i < this.options.rangeHighlights.length; _i++) {\n\t\t\t\t\t\tvar startPercent = this._toPercentage(this.options.rangeHighlights[_i].start);\n\t\t\t\t\t\tvar endPercent = this._toPercentage(this.options.rangeHighlights[_i].end);\n\n\t\t\t\t\t\tif (this.options.reversed) {\n\t\t\t\t\t\t\tvar sp = 100 - endPercent;\n\t\t\t\t\t\t\tendPercent = 100 - startPercent;\n\t\t\t\t\t\t\tstartPercent = sp;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tvar currentRange = this._createHighlightRange(startPercent, endPercent);\n\n\t\t\t\t\t\tif (currentRange) {\n\t\t\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.top = currentRange.start + \"%\";\n\t\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.height = currentRange.size + \"%\";\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.right = currentRange.start + \"%\";\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.left = currentRange.start + \"%\";\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.width = currentRange.size + \"%\";\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tthis.rangeHighlightElements[_i].style.display = \"none\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t/* Position ticks and labels */\n\t\t\t\tif (Array.isArray(this.options.ticks) && this.options.ticks.length > 0) {\n\n\t\t\t\t\tvar styleSize = this.options.orientation === 'vertical' ? 'height' : 'width';\n\t\t\t\t\tvar styleMargin;\n\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\tstyleMargin = 'marginTop';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\t\t\tstyleMargin = 'marginRight';\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tstyleMargin = 'marginLeft';\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvar labelSize = this._state.size / (this.options.ticks.length - 1);\n\n\t\t\t\t\tif (this.tickLabelContainer) {\n\t\t\t\t\t\tvar extraMargin = 0;\n\t\t\t\t\t\tif (this.options.ticks_positions.length === 0) {\n\t\t\t\t\t\t\tif (this.options.orientation !== 'vertical') {\n\t\t\t\t\t\t\t\tthis.tickLabelContainer.style[styleMargin] = -labelSize / 2 + \"px\";\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\textraMargin = this.tickLabelContainer.offsetHeight;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t/* Chidren are position absolute, calculate height by finding the max offsetHeight of a child */\n\t\t\t\t\t\t\tfor (i = 0; i < this.tickLabelContainer.childNodes.length; i++) {\n\t\t\t\t\t\t\t\tif (this.tickLabelContainer.childNodes[i].offsetHeight > extraMargin) {\n\t\t\t\t\t\t\t\t\textraMargin = this.tickLabelContainer.childNodes[i].offsetHeight;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (this.options.orientation === 'horizontal') {\n\t\t\t\t\t\t\tthis.sliderElem.style.marginBottom = extraMargin + \"px\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor (var i = 0; i < this.options.ticks.length; i++) {\n\n\t\t\t\t\t\tvar percentage = this.options.ticks_positions[i] || this._toPercentage(this.options.ticks[i]);\n\n\t\t\t\t\t\tif (this.options.reversed) {\n\t\t\t\t\t\t\tpercentage = 100 - percentage;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tthis.ticks[i].style[this.stylePos] = percentage + \"%\";\n\n\t\t\t\t\t\t/* Set class labels to denote whether ticks are in the selection */\n\t\t\t\t\t\tthis._removeClass(this.ticks[i], 'in-selection');\n\t\t\t\t\t\tif (!this.options.range) {\n\t\t\t\t\t\t\tif (this.options.selection === 'after' && percentage >= positionPercentages[0]) {\n\t\t\t\t\t\t\t\tthis._addClass(this.ticks[i], 'in-selection');\n\t\t\t\t\t\t\t} else if (this.options.selection === 'before' && percentage <= positionPercentages[0]) {\n\t\t\t\t\t\t\t\tthis._addClass(this.ticks[i], 'in-selection');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (percentage >= positionPercentages[0] && percentage <= positionPercentages[1]) {\n\t\t\t\t\t\t\tthis._addClass(this.ticks[i], 'in-selection');\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (this.tickLabels[i]) {\n\t\t\t\t\t\t\tthis.tickLabels[i].style[styleSize] = labelSize + \"px\";\n\n\t\t\t\t\t\t\tif (this.options.orientation !== 'vertical' && this.options.ticks_positions[i] !== undefined) {\n\t\t\t\t\t\t\t\tthis.tickLabels[i].style.position = 'absolute';\n\t\t\t\t\t\t\t\tthis.tickLabels[i].style[this.stylePos] = percentage + \"%\";\n\t\t\t\t\t\t\t\tthis.tickLabels[i].style[styleMargin] = -labelSize / 2 + 'px';\n\t\t\t\t\t\t\t} else if (this.options.orientation === 'vertical') {\n\t\t\t\t\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\t\t\t\t\tthis.tickLabels[i].style['marginRight'] = this.sliderElem.offsetWidth + \"px\";\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tthis.tickLabels[i].style['marginLeft'] = this.sliderElem.offsetWidth + \"px\";\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tthis.tickLabelContainer.style[styleMargin] = this.sliderElem.offsetWidth / 2 * -1 + 'px';\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar formattedTooltipVal;\n\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tformattedTooltipVal = this.options.formatter(this._state.value);\n\t\t\t\t\tthis._setText(this.tooltipInner, formattedTooltipVal);\n\t\t\t\t\tthis.tooltip.style[this.stylePos] = (positionPercentages[1] + positionPercentages[0]) / 2 + \"%\";\n\n\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetHeight / 2 + \"px\");\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetWidth / 2 + \"px\");\n\t\t\t\t\t}\n\n\t\t\t\t\tvar innerTooltipMinText = this.options.formatter(this._state.value[0]);\n\t\t\t\t\tthis._setText(this.tooltipInner_min, innerTooltipMinText);\n\n\t\t\t\t\tvar innerTooltipMaxText = this.options.formatter(this._state.value[1]);\n\t\t\t\t\tthis._setText(this.tooltipInner_max, innerTooltipMaxText);\n\n\t\t\t\t\tthis.tooltip_min.style[this.stylePos] = positionPercentages[0] + \"%\";\n\n\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\tthis._css(this.tooltip_min, \"margin-\" + this.stylePos, -this.tooltip_min.offsetHeight / 2 + \"px\");\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis._css(this.tooltip_min, \"margin-\" + this.stylePos, -this.tooltip_min.offsetWidth / 2 + \"px\");\n\t\t\t\t\t}\n\n\t\t\t\t\tthis.tooltip_max.style[this.stylePos] = positionPercentages[1] + \"%\";\n\n\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\tthis._css(this.tooltip_max, \"margin-\" + this.stylePos, -this.tooltip_max.offsetHeight / 2 + \"px\");\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis._css(this.tooltip_max, \"margin-\" + this.stylePos, -this.tooltip_max.offsetWidth / 2 + \"px\");\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tformattedTooltipVal = this.options.formatter(this._state.value[0]);\n\t\t\t\t\tthis._setText(this.tooltipInner, formattedTooltipVal);\n\n\t\t\t\t\tthis.tooltip.style[this.stylePos] = positionPercentages[0] + \"%\";\n\t\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetHeight / 2 + \"px\");\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis._css(this.tooltip, \"margin-\" + this.stylePos, -this.tooltip.offsetWidth / 2 + \"px\");\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\tthis.trackLow.style.top = '0';\n\t\t\t\t\tthis.trackLow.style.height = Math.min(positionPercentages[0], positionPercentages[1]) + '%';\n\n\t\t\t\t\tthis.trackSelection.style.top = Math.min(positionPercentages[0], positionPercentages[1]) + '%';\n\t\t\t\t\tthis.trackSelection.style.height = Math.abs(positionPercentages[0] - positionPercentages[1]) + '%';\n\n\t\t\t\t\tthis.trackHigh.style.bottom = '0';\n\t\t\t\t\tthis.trackHigh.style.height = 100 - Math.min(positionPercentages[0], positionPercentages[1]) - Math.abs(positionPercentages[0] - positionPercentages[1]) + '%';\n\t\t\t\t} else {\n\t\t\t\t\tif (this.stylePos === 'right') {\n\t\t\t\t\t\tthis.trackLow.style.right = '0';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis.trackLow.style.left = '0';\n\t\t\t\t\t}\n\t\t\t\t\tthis.trackLow.style.width = Math.min(positionPercentages[0], positionPercentages[1]) + '%';\n\n\t\t\t\t\tif (this.stylePos === 'right') {\n\t\t\t\t\t\tthis.trackSelection.style.right = Math.min(positionPercentages[0], positionPercentages[1]) + '%';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis.trackSelection.style.left = Math.min(positionPercentages[0], positionPercentages[1]) + '%';\n\t\t\t\t\t}\n\t\t\t\t\tthis.trackSelection.style.width = Math.abs(positionPercentages[0] - positionPercentages[1]) + '%';\n\n\t\t\t\t\tif (this.stylePos === 'right') {\n\t\t\t\t\t\tthis.trackHigh.style.left = '0';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthis.trackHigh.style.right = '0';\n\t\t\t\t\t}\n\t\t\t\t\tthis.trackHigh.style.width = 100 - Math.min(positionPercentages[0], positionPercentages[1]) - Math.abs(positionPercentages[0] - positionPercentages[1]) + '%';\n\n\t\t\t\t\tvar offset_min = this.tooltip_min.getBoundingClientRect();\n\t\t\t\t\tvar offset_max = this.tooltip_max.getBoundingClientRect();\n\n\t\t\t\t\tif (this.options.tooltip_position === 'bottom') {\n\t\t\t\t\t\tif (offset_min.right > offset_max.left) {\n\t\t\t\t\t\t\tthis._removeClass(this.tooltip_max, 'bottom');\n\t\t\t\t\t\t\tthis._addClass(this.tooltip_max, 'top');\n\t\t\t\t\t\t\tthis.tooltip_max.style.top = '';\n\t\t\t\t\t\t\tthis.tooltip_max.style.bottom = 22 + 'px';\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tthis._removeClass(this.tooltip_max, 'top');\n\t\t\t\t\t\t\tthis._addClass(this.tooltip_max, 'bottom');\n\t\t\t\t\t\t\tthis.tooltip_max.style.top = this.tooltip_min.style.top;\n\t\t\t\t\t\t\tthis.tooltip_max.style.bottom = '';\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif (offset_min.right > offset_max.left) {\n\t\t\t\t\t\t\tthis._removeClass(this.tooltip_max, 'top');\n\t\t\t\t\t\t\tthis._addClass(this.tooltip_max, 'bottom');\n\t\t\t\t\t\t\tthis.tooltip_max.style.top = 18 + 'px';\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tthis._removeClass(this.tooltip_max, 'bottom');\n\t\t\t\t\t\t\tthis._addClass(this.tooltip_max, 'top');\n\t\t\t\t\t\t\tthis.tooltip_max.style.top = this.tooltip_min.style.top;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t_createHighlightRange: function _createHighlightRange(start, end) {\n\t\t\t\tif (this._isHighlightRange(start, end)) {\n\t\t\t\t\tif (start > end) {\n\t\t\t\t\t\treturn { 'start': end, 'size': start - end };\n\t\t\t\t\t}\n\t\t\t\t\treturn { 'start': start, 'size': end - start };\n\t\t\t\t}\n\t\t\t\treturn null;\n\t\t\t},\n\t\t\t_isHighlightRange: function _isHighlightRange(start, end) {\n\t\t\t\tif (0 <= start && start <= 100 && 0 <= end && end <= 100) {\n\t\t\t\t\treturn true;\n\t\t\t\t} else {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t},\n\t\t\t_resize: function _resize(ev) {\n\t\t\t\t/*jshint unused:false*/\n\t\t\t\tthis._state.offset = this._offset(this.sliderElem);\n\t\t\t\tthis._state.size = this.sliderElem[this.sizePos];\n\t\t\t\tthis._layout();\n\t\t\t},\n\t\t\t_removeProperty: function _removeProperty(element, prop) {\n\t\t\t\tif (element.style.removeProperty) {\n\t\t\t\t\telement.style.removeProperty(prop);\n\t\t\t\t} else {\n\t\t\t\t\telement.style.removeAttribute(prop);\n\t\t\t\t}\n\t\t\t},\n\t\t\t_mousedown: function _mousedown(ev) {\n\t\t\t\tif (!this._state.enabled) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\t\t\t\tthis._state.offset = this._offset(this.sliderElem);\n\t\t\t\tthis._state.size = this.sliderElem[this.sizePos];\n\n\t\t\t\tvar percentage = this._getPercentage(ev);\n\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tvar diff1 = Math.abs(this._state.percentage[0] - percentage);\n\t\t\t\t\tvar diff2 = Math.abs(this._state.percentage[1] - percentage);\n\t\t\t\t\tthis._state.dragged = diff1 < diff2 ? 0 : 1;\n\t\t\t\t\tthis._adjustPercentageForRangeSliders(percentage);\n\t\t\t\t} else {\n\t\t\t\t\tthis._state.dragged = 0;\n\t\t\t\t}\n\n\t\t\t\tthis._state.percentage[this._state.dragged] = percentage;\n\t\t\t\tthis._layout();\n\n\t\t\t\tif (this.touchCapable) {\n\t\t\t\t\tdocument.removeEventListener(\"touchmove\", this.mousemove, false);\n\t\t\t\t\tdocument.removeEventListener(\"touchend\", this.mouseup, false);\n\t\t\t\t}\n\n\t\t\t\tif (this.mousemove) {\n\t\t\t\t\tdocument.removeEventListener(\"mousemove\", this.mousemove, false);\n\t\t\t\t}\n\t\t\t\tif (this.mouseup) {\n\t\t\t\t\tdocument.removeEventListener(\"mouseup\", this.mouseup, false);\n\t\t\t\t}\n\n\t\t\t\tthis.mousemove = this._mousemove.bind(this);\n\t\t\t\tthis.mouseup = this._mouseup.bind(this);\n\n\t\t\t\tif (this.touchCapable) {\n\t\t\t\t\t// Touch: Bind touch events:\n\t\t\t\t\tdocument.addEventListener(\"touchmove\", this.mousemove, false);\n\t\t\t\t\tdocument.addEventListener(\"touchend\", this.mouseup, false);\n\t\t\t\t}\n\t\t\t\t// Bind mouse events:\n\t\t\t\tdocument.addEventListener(\"mousemove\", this.mousemove, false);\n\t\t\t\tdocument.addEventListener(\"mouseup\", this.mouseup, false);\n\n\t\t\t\tthis._state.inDrag = true;\n\t\t\t\tvar newValue = this._calculateValue();\n\n\t\t\t\tthis._trigger('slideStart', newValue);\n\n\t\t\t\tthis._setDataVal(newValue);\n\t\t\t\tthis.setValue(newValue, false, true);\n\n\t\t\t\tev.returnValue = false;\n\n\t\t\t\tif (this.options.focus) {\n\t\t\t\t\tthis._triggerFocusOnHandle(this._state.dragged);\n\t\t\t\t}\n\n\t\t\t\treturn true;\n\t\t\t},\n\t\t\t_touchstart: function _touchstart(ev) {\n\t\t\t\tif (ev.changedTouches === undefined) {\n\t\t\t\t\tthis._mousedown(ev);\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tvar touch = ev.changedTouches[0];\n\t\t\t\tthis.touchX = touch.pageX;\n\t\t\t\tthis.touchY = touch.pageY;\n\t\t\t},\n\t\t\t_triggerFocusOnHandle: function _triggerFocusOnHandle(handleIdx) {\n\t\t\t\tif (handleIdx === 0) {\n\t\t\t\t\tthis.handle1.focus();\n\t\t\t\t}\n\t\t\t\tif (handleIdx === 1) {\n\t\t\t\t\tthis.handle2.focus();\n\t\t\t\t}\n\t\t\t},\n\t\t\t_keydown: function _keydown(handleIdx, ev) {\n\t\t\t\tif (!this._state.enabled) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\t\t\t\tvar dir;\n\t\t\t\tswitch (ev.keyCode) {\n\t\t\t\t\tcase 37: // left\n\t\t\t\t\tcase 40:\n\t\t\t\t\t\t// down\n\t\t\t\t\t\tdir = -1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 39: // right\n\t\t\t\t\tcase 38:\n\t\t\t\t\t\t// up\n\t\t\t\t\t\tdir = 1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tif (!dir) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// use natural arrow keys instead of from min to max\n\t\t\t\tif (this.options.natural_arrow_keys) {\n\t\t\t\t\tvar ifVerticalAndNotReversed = this.options.orientation === 'vertical' && !this.options.reversed;\n\t\t\t\t\tvar ifHorizontalAndReversed = this.options.orientation === 'horizontal' && this.options.reversed; // @todo control with rtl\n\n\t\t\t\t\tif (ifVerticalAndNotReversed || ifHorizontalAndReversed) {\n\t\t\t\t\t\tdir = -dir;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar val = this._state.value[handleIdx] + dir * this.options.step;\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tval = [!handleIdx ? val : this._state.value[0], handleIdx ? val : this._state.value[1]];\n\t\t\t\t}\n\n\t\t\t\tthis._trigger('slideStart', val);\n\t\t\t\tthis._setDataVal(val);\n\t\t\t\tthis.setValue(val, true, true);\n\n\t\t\t\tthis._setDataVal(val);\n\t\t\t\tthis._trigger('slideStop', val);\n\t\t\t\tthis._layout();\n\n\t\t\t\tthis._pauseEvent(ev);\n\n\t\t\t\treturn false;\n\t\t\t},\n\t\t\t_pauseEvent: function _pauseEvent(ev) {\n\t\t\t\tif (ev.stopPropagation) {\n\t\t\t\t\tev.stopPropagation();\n\t\t\t\t}\n\t\t\t\tif (ev.preventDefault) {\n\t\t\t\t\tev.preventDefault();\n\t\t\t\t}\n\t\t\t\tev.cancelBubble = true;\n\t\t\t\tev.returnValue = false;\n\t\t\t},\n\t\t\t_mousemove: function _mousemove(ev) {\n\t\t\t\tif (!this._state.enabled) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\t\t\t\tvar percentage = this._getPercentage(ev);\n\t\t\t\tthis._adjustPercentageForRangeSliders(percentage);\n\t\t\t\tthis._state.percentage[this._state.dragged] = percentage;\n\t\t\t\tthis._layout();\n\n\t\t\t\tvar val = this._calculateValue(true);\n\t\t\t\tthis.setValue(val, true, true);\n\n\t\t\t\treturn false;\n\t\t\t},\n\t\t\t_touchmove: function _touchmove(ev) {\n\t\t\t\tif (ev.changedTouches === undefined) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tvar touch = ev.changedTouches[0];\n\n\t\t\t\tvar xDiff = touch.pageX - this.touchX;\n\t\t\t\tvar yDiff = touch.pageY - this.touchY;\n\n\t\t\t\tif (!this._state.inDrag) {\n\t\t\t\t\t// Vertical Slider\n\t\t\t\t\tif (this.options.orientation === 'vertical' && xDiff <= 5 && xDiff >= -5 && (yDiff >= 15 || yDiff <= -15)) {\n\t\t\t\t\t\tthis._mousedown(ev);\n\t\t\t\t\t}\n\t\t\t\t\t// Horizontal slider.\n\t\t\t\t\telse if (yDiff <= 5 && yDiff >= -5 && (xDiff >= 15 || xDiff <= -15)) {\n\t\t\t\t\t\t\tthis._mousedown(ev);\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t_adjustPercentageForRangeSliders: function _adjustPercentageForRangeSliders(percentage) {\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tvar precision = this._getNumDigitsAfterDecimalPlace(percentage);\n\t\t\t\t\tprecision = precision ? precision - 1 : 0;\n\t\t\t\t\tvar percentageWithAdjustedPrecision = this._applyToFixedAndParseFloat(percentage, precision);\n\t\t\t\t\tif (this._state.dragged === 0 && this._applyToFixedAndParseFloat(this._state.percentage[1], precision) < percentageWithAdjustedPrecision) {\n\t\t\t\t\t\tthis._state.percentage[0] = this._state.percentage[1];\n\t\t\t\t\t\tthis._state.dragged = 1;\n\t\t\t\t\t} else if (this._state.dragged === 1 && this._applyToFixedAndParseFloat(this._state.percentage[0], precision) > percentageWithAdjustedPrecision) {\n\t\t\t\t\t\tthis._state.percentage[1] = this._state.percentage[0];\n\t\t\t\t\t\tthis._state.dragged = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t_mouseup: function _mouseup() {\n\t\t\t\tif (!this._state.enabled) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\tif (this.touchCapable) {\n\t\t\t\t\t// Touch: Unbind touch event handlers:\n\t\t\t\t\tdocument.removeEventListener(\"touchmove\", this.mousemove, false);\n\t\t\t\t\tdocument.removeEventListener(\"touchend\", this.mouseup, false);\n\t\t\t\t}\n\t\t\t\t// Unbind mouse event handlers:\n\t\t\t\tdocument.removeEventListener(\"mousemove\", this.mousemove, false);\n\t\t\t\tdocument.removeEventListener(\"mouseup\", this.mouseup, false);\n\n\t\t\t\tthis._state.inDrag = false;\n\t\t\t\tif (this._state.over === false) {\n\t\t\t\t\tthis._hideTooltip();\n\t\t\t\t}\n\t\t\t\tvar val = this._calculateValue(true);\n\n\t\t\t\tthis._layout();\n\t\t\t\tthis._setDataVal(val);\n\t\t\t\tthis._trigger('slideStop', val);\n\n\t\t\t\treturn false;\n\t\t\t},\n\t\t\t_calculateValue: function _calculateValue(snapToClosestTick) {\n\t\t\t\tvar val;\n\t\t\t\tif (this.options.range) {\n\t\t\t\t\tval = [this.options.min, this.options.max];\n\t\t\t\t\tif (this._state.percentage[0] !== 0) {\n\t\t\t\t\t\tval[0] = this._toValue(this._state.percentage[0]);\n\t\t\t\t\t\tval[0] = this._applyPrecision(val[0]);\n\t\t\t\t\t}\n\t\t\t\t\tif (this._state.percentage[1] !== 100) {\n\t\t\t\t\t\tval[1] = this._toValue(this._state.percentage[1]);\n\t\t\t\t\t\tval[1] = this._applyPrecision(val[1]);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tval = this._toValue(this._state.percentage[0]);\n\t\t\t\t\tval = parseFloat(val);\n\t\t\t\t\tval = this._applyPrecision(val);\n\t\t\t\t}\n\n\t\t\t\tif (snapToClosestTick) {\n\t\t\t\t\tvar min = [val, Infinity];\n\t\t\t\t\tfor (var i = 0; i < this.options.ticks.length; i++) {\n\t\t\t\t\t\tvar diff = Math.abs(this.options.ticks[i] - val);\n\t\t\t\t\t\tif (diff <= min[1]) {\n\t\t\t\t\t\t\tmin = [this.options.ticks[i], diff];\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif (min[1] <= this.options.ticks_snap_bounds) {\n\t\t\t\t\t\treturn min[0];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn val;\n\t\t\t},\n\t\t\t_applyPrecision: function _applyPrecision(val) {\n\t\t\t\tvar precision = this.options.precision || this._getNumDigitsAfterDecimalPlace(this.options.step);\n\t\t\t\treturn this._applyToFixedAndParseFloat(val, precision);\n\t\t\t},\n\t\t\t_getNumDigitsAfterDecimalPlace: function _getNumDigitsAfterDecimalPlace(num) {\n\t\t\t\tvar match = ('' + num).match(/(?:\\.(\\d+))?(?:[eE]([+-]?\\d+))?$/);\n\t\t\t\tif (!match) {\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t\treturn Math.max(0, (match[1] ? match[1].length : 0) - (match[2] ? +match[2] : 0));\n\t\t\t},\n\t\t\t_applyToFixedAndParseFloat: function _applyToFixedAndParseFloat(num, toFixedInput) {\n\t\t\t\tvar truncatedNum = num.toFixed(toFixedInput);\n\t\t\t\treturn parseFloat(truncatedNum);\n\t\t\t},\n\t\t\t/*\n   \tCredits to Mike Samuel for the following method!\n   \tSource: http://stackoverflow.com/questions/10454518/javascript-how-to-retrieve-the-number-of-decimals-of-a-string-number\n   */\n\t\t\t_getPercentage: function _getPercentage(ev) {\n\t\t\t\tif (this.touchCapable && (ev.type === 'touchstart' || ev.type === 'touchmove')) {\n\t\t\t\t\tev = ev.touches[0];\n\t\t\t\t}\n\n\t\t\t\tvar eventPosition = ev[this.mousePos];\n\t\t\t\tvar sliderOffset = this._state.offset[this.stylePos];\n\t\t\t\tvar distanceToSlide = eventPosition - sliderOffset;\n\t\t\t\tif (this.stylePos === 'right') {\n\t\t\t\t\tdistanceToSlide = -distanceToSlide;\n\t\t\t\t}\n\t\t\t\t// Calculate what percent of the length the slider handle has slid\n\t\t\t\tvar percentage = distanceToSlide / this._state.size * 100;\n\t\t\t\tpercentage = Math.round(percentage / this._state.percentage[2]) * this._state.percentage[2];\n\t\t\t\tif (this.options.reversed) {\n\t\t\t\t\tpercentage = 100 - percentage;\n\t\t\t\t}\n\n\t\t\t\t// Make sure the percent is within the bounds of the slider.\n\t\t\t\t// 0% corresponds to the 'min' value of the slide\n\t\t\t\t// 100% corresponds to the 'max' value of the slide\n\t\t\t\treturn Math.max(0, Math.min(100, percentage));\n\t\t\t},\n\t\t\t_validateInputValue: function _validateInputValue(val) {\n\t\t\t\tif (!isNaN(+val)) {\n\t\t\t\t\treturn +val;\n\t\t\t\t} else if (Array.isArray(val)) {\n\t\t\t\t\tthis._validateArray(val);\n\t\t\t\t\treturn val;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new Error(ErrorMsgs.formatInvalidInputErrorMsg(val));\n\t\t\t\t}\n\t\t\t},\n\t\t\t_validateArray: function _validateArray(val) {\n\t\t\t\tfor (var i = 0; i < val.length; i++) {\n\t\t\t\t\tvar input = val[i];\n\t\t\t\t\tif (typeof input !== 'number') {\n\t\t\t\t\t\tthrow new Error(ErrorMsgs.formatInvalidInputErrorMsg(input));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\t_setDataVal: function _setDataVal(val) {\n\t\t\t\tthis.element.setAttribute('data-value', val);\n\t\t\t\tthis.element.setAttribute('value', val);\n\t\t\t\tthis.element.value = val;\n\t\t\t},\n\t\t\t_trigger: function _trigger(evt, val) {\n\t\t\t\tval = val || val === 0 ? val : undefined;\n\n\t\t\t\tvar callbackFnArray = this.eventToCallbackMap[evt];\n\t\t\t\tif (callbackFnArray && callbackFnArray.length) {\n\t\t\t\t\tfor (var i = 0; i < callbackFnArray.length; i++) {\n\t\t\t\t\t\tvar callbackFn = callbackFnArray[i];\n\t\t\t\t\t\tcallbackFn(val);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t/* If JQuery exists, trigger JQuery events */\n\t\t\t\tif ($) {\n\t\t\t\t\tthis._triggerJQueryEvent(evt, val);\n\t\t\t\t}\n\t\t\t},\n\t\t\t_triggerJQueryEvent: function _triggerJQueryEvent(evt, val) {\n\t\t\t\tvar eventData = {\n\t\t\t\t\ttype: evt,\n\t\t\t\t\tvalue: val\n\t\t\t\t};\n\t\t\t\tthis.$element.trigger(eventData);\n\t\t\t\tthis.$sliderElem.trigger(eventData);\n\t\t\t},\n\t\t\t_unbindJQueryEventHandlers: function _unbindJQueryEventHandlers() {\n\t\t\t\tthis.$element.off();\n\t\t\t\tthis.$sliderElem.off();\n\t\t\t},\n\t\t\t_setText: function _setText(element, text) {\n\t\t\t\tif (typeof element.textContent !== \"undefined\") {\n\t\t\t\t\telement.textContent = text;\n\t\t\t\t} else if (typeof element.innerText !== \"undefined\") {\n\t\t\t\t\telement.innerText = text;\n\t\t\t\t}\n\t\t\t},\n\t\t\t_removeClass: function _removeClass(element, classString) {\n\t\t\t\tvar classes = classString.split(\" \");\n\t\t\t\tvar newClasses = element.className;\n\n\t\t\t\tfor (var i = 0; i < classes.length; i++) {\n\t\t\t\t\tvar classTag = classes[i];\n\t\t\t\t\tvar regex = new RegExp(\"(?:\\\\s|^)\" + classTag + \"(?:\\\\s|$)\");\n\t\t\t\t\tnewClasses = newClasses.replace(regex, \" \");\n\t\t\t\t}\n\n\t\t\t\telement.className = newClasses.trim();\n\t\t\t},\n\t\t\t_addClass: function _addClass(element, classString) {\n\t\t\t\tvar classes = classString.split(\" \");\n\t\t\t\tvar newClasses = element.className;\n\n\t\t\t\tfor (var i = 0; i < classes.length; i++) {\n\t\t\t\t\tvar classTag = classes[i];\n\t\t\t\t\tvar regex = new RegExp(\"(?:\\\\s|^)\" + classTag + \"(?:\\\\s|$)\");\n\t\t\t\t\tvar ifClassExists = regex.test(newClasses);\n\n\t\t\t\t\tif (!ifClassExists) {\n\t\t\t\t\t\tnewClasses += \" \" + classTag;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\telement.className = newClasses.trim();\n\t\t\t},\n\t\t\t_offsetLeft: function _offsetLeft(obj) {\n\t\t\t\treturn obj.getBoundingClientRect().left;\n\t\t\t},\n\t\t\t_offsetRight: function _offsetRight(obj) {\n\t\t\t\treturn obj.getBoundingClientRect().right;\n\t\t\t},\n\t\t\t_offsetTop: function _offsetTop(obj) {\n\t\t\t\tvar offsetTop = obj.offsetTop;\n\t\t\t\twhile ((obj = obj.offsetParent) && !isNaN(obj.offsetTop)) {\n\t\t\t\t\toffsetTop += obj.offsetTop;\n\t\t\t\t\tif (obj.tagName !== 'BODY') {\n\t\t\t\t\t\toffsetTop -= obj.scrollTop;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn offsetTop;\n\t\t\t},\n\t\t\t_offset: function _offset(obj) {\n\t\t\t\treturn {\n\t\t\t\t\tleft: this._offsetLeft(obj),\n\t\t\t\t\tright: this._offsetRight(obj),\n\t\t\t\t\ttop: this._offsetTop(obj)\n\t\t\t\t};\n\t\t\t},\n\t\t\t_css: function _css(elementRef, styleName, value) {\n\t\t\t\tif ($) {\n\t\t\t\t\t$.style(elementRef, styleName, value);\n\t\t\t\t} else {\n\t\t\t\t\tvar style = styleName.replace(/^-ms-/, \"ms-\").replace(/-([\\da-z])/gi, function (all, letter) {\n\t\t\t\t\t\treturn letter.toUpperCase();\n\t\t\t\t\t});\n\t\t\t\t\telementRef.style[style] = value;\n\t\t\t\t}\n\t\t\t},\n\t\t\t_toValue: function _toValue(percentage) {\n\t\t\t\treturn this.options.scale.toValue.apply(this, [percentage]);\n\t\t\t},\n\t\t\t_toPercentage: function _toPercentage(value) {\n\t\t\t\treturn this.options.scale.toPercentage.apply(this, [value]);\n\t\t\t},\n\t\t\t_setTooltipPosition: function _setTooltipPosition() {\n\t\t\t\tvar tooltips = [this.tooltip, this.tooltip_min, this.tooltip_max];\n\t\t\t\tif (this.options.orientation === 'vertical') {\n\t\t\t\t\tvar tooltipPos;\n\t\t\t\t\tif (this.options.tooltip_position) {\n\t\t\t\t\t\ttooltipPos = this.options.tooltip_position;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif (this.options.rtl) {\n\t\t\t\t\t\t\ttooltipPos = 'left';\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttooltipPos = 'right';\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvar oppositeSide = tooltipPos === 'left' ? 'right' : 'left';\n\t\t\t\t\ttooltips.forEach(function (tooltip) {\n\t\t\t\t\t\tthis._addClass(tooltip, tooltipPos);\n\t\t\t\t\t\ttooltip.style[oppositeSide] = '100%';\n\t\t\t\t\t}.bind(this));\n\t\t\t\t} else if (this.options.tooltip_position === 'bottom') {\n\t\t\t\t\ttooltips.forEach(function (tooltip) {\n\t\t\t\t\t\tthis._addClass(tooltip, 'bottom');\n\t\t\t\t\t\ttooltip.style.top = 22 + 'px';\n\t\t\t\t\t}.bind(this));\n\t\t\t\t} else {\n\t\t\t\t\ttooltips.forEach(function (tooltip) {\n\t\t\t\t\t\tthis._addClass(tooltip, 'top');\n\t\t\t\t\t\ttooltip.style.top = -this.tooltip.outerHeight - 14 + 'px';\n\t\t\t\t\t}.bind(this));\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t/*********************************\n  \t\tAttach to global namespace\n  \t*********************************/\n\t\tif ($ && $.fn) {\n\t\t\t(function () {\n\t\t\t\tvar autoRegisterNamespace = void 0;\n\n\t\t\t\tif (!$.fn.slider) {\n\t\t\t\t\t$.bridget(NAMESPACE_MAIN, Slider);\n\t\t\t\t\tautoRegisterNamespace = NAMESPACE_MAIN;\n\t\t\t\t} else {\n\t\t\t\t\tif (windowIsDefined) {\n\t\t\t\t\t\twindow.console.warn(\"bootstrap-slider.js - WARNING: $.fn.slider namespace is already bound. Use the $.fn.bootstrapSlider namespace instead.\");\n\t\t\t\t\t}\n\t\t\t\t\tautoRegisterNamespace = NAMESPACE_ALTERNATE;\n\t\t\t\t}\n\t\t\t\t$.bridget(NAMESPACE_ALTERNATE, Slider);\n\n\t\t\t\t// Auto-Register data-provide=\"slider\" Elements\n\t\t\t\t$(function () {\n\t\t\t\t\t$(\"input[data-provide=slider]\")[autoRegisterNamespace]();\n\t\t\t\t});\n\t\t\t})();\n\t\t}\n\t})($);\n\n\treturn Slider;\n});\n"
  },
  {
    "path": "web_gui/gui_v3/js/bootstrap.js",
    "content": "/*!\n * Bootstrap v3.3.6 (http://getbootstrap.com)\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under the MIT license\n */\n\nif (typeof jQuery === 'undefined') {\n  throw new Error('Bootstrap\\'s JavaScript requires jQuery')\n}\n\n+function ($) {\n  'use strict';\n  var version = $.fn.jquery.split(' ')[0].split('.')\n  if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 9 && version[2] < 1) || (version[0] > 2)) {\n    throw new Error('Bootstrap\\'s JavaScript requires jQuery version 1.9.1 or higher, but lower than version 3')\n  }\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: transition.js v3.3.6\n * http://getbootstrap.com/javascript/#transitions\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)\n  // ============================================================\n\n  function transitionEnd() {\n    var el = document.createElement('bootstrap')\n\n    var transEndEventNames = {\n      WebkitTransition : 'webkitTransitionEnd',\n      MozTransition    : 'transitionend',\n      OTransition      : 'oTransitionEnd otransitionend',\n      transition       : 'transitionend'\n    }\n\n    for (var name in transEndEventNames) {\n      if (el.style[name] !== undefined) {\n        return { end: transEndEventNames[name] }\n      }\n    }\n\n    return false // explicit for ie8 (  ._.)\n  }\n\n  // http://blog.alexmaccaw.com/css-transitions\n  $.fn.emulateTransitionEnd = function (duration) {\n    var called = false\n    var $el = this\n    $(this).one('bsTransitionEnd', function () { called = true })\n    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }\n    setTimeout(callback, duration)\n    return this\n  }\n\n  $(function () {\n    $.support.transition = transitionEnd()\n\n    if (!$.support.transition) return\n\n    $.event.special.bsTransitionEnd = {\n      bindType: $.support.transition.end,\n      delegateType: $.support.transition.end,\n      handle: function (e) {\n        if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)\n      }\n    }\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: alert.js v3.3.6\n * http://getbootstrap.com/javascript/#alerts\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // ALERT CLASS DEFINITION\n  // ======================\n\n  var dismiss = '[data-dismiss=\"alert\"]'\n  var Alert   = function (el) {\n    $(el).on('click', dismiss, this.close)\n  }\n\n  Alert.VERSION = '3.3.6'\n\n  Alert.TRANSITION_DURATION = 150\n\n  Alert.prototype.close = function (e) {\n    var $this    = $(this)\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    var $parent = $(selector)\n\n    if (e) e.preventDefault()\n\n    if (!$parent.length) {\n      $parent = $this.closest('.alert')\n    }\n\n    $parent.trigger(e = $.Event('close.bs.alert'))\n\n    if (e.isDefaultPrevented()) return\n\n    $parent.removeClass('in')\n\n    function removeElement() {\n      // detach from parent, fire event then clean up data\n      $parent.detach().trigger('closed.bs.alert').remove()\n    }\n\n    $.support.transition && $parent.hasClass('fade') ?\n      $parent\n        .one('bsTransitionEnd', removeElement)\n        .emulateTransitionEnd(Alert.TRANSITION_DURATION) :\n      removeElement()\n  }\n\n\n  // ALERT PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.alert')\n\n      if (!data) $this.data('bs.alert', (data = new Alert(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  var old = $.fn.alert\n\n  $.fn.alert             = Plugin\n  $.fn.alert.Constructor = Alert\n\n\n  // ALERT NO CONFLICT\n  // =================\n\n  $.fn.alert.noConflict = function () {\n    $.fn.alert = old\n    return this\n  }\n\n\n  // ALERT DATA-API\n  // ==============\n\n  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: button.js v3.3.6\n * http://getbootstrap.com/javascript/#buttons\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // BUTTON PUBLIC CLASS DEFINITION\n  // ==============================\n\n  var Button = function (element, options) {\n    this.$element  = $(element)\n    this.options   = $.extend({}, Button.DEFAULTS, options)\n    this.isLoading = false\n  }\n\n  Button.VERSION  = '3.3.6'\n\n  Button.DEFAULTS = {\n    loadingText: 'loading...'\n  }\n\n  Button.prototype.setState = function (state) {\n    var d    = 'disabled'\n    var $el  = this.$element\n    var val  = $el.is('input') ? 'val' : 'html'\n    var data = $el.data()\n\n    state += 'Text'\n\n    if (data.resetText == null) $el.data('resetText', $el[val]())\n\n    // push to event loop to allow forms to submit\n    setTimeout($.proxy(function () {\n      $el[val](data[state] == null ? this.options[state] : data[state])\n\n      if (state == 'loadingText') {\n        this.isLoading = true\n        $el.addClass(d).attr(d, d)\n      } else if (this.isLoading) {\n        this.isLoading = false\n        $el.removeClass(d).removeAttr(d)\n      }\n    }, this), 0)\n  }\n\n  Button.prototype.toggle = function () {\n    var changed = true\n    var $parent = this.$element.closest('[data-toggle=\"buttons\"]')\n\n    if ($parent.length) {\n      var $input = this.$element.find('input')\n      if ($input.prop('type') == 'radio') {\n        if ($input.prop('checked')) changed = false\n        $parent.find('.active').removeClass('active')\n        this.$element.addClass('active')\n      } else if ($input.prop('type') == 'checkbox') {\n        if (($input.prop('checked')) !== this.$element.hasClass('active')) changed = false\n        this.$element.toggleClass('active')\n      }\n      $input.prop('checked', this.$element.hasClass('active'))\n      if (changed) $input.trigger('change')\n    } else {\n      this.$element.attr('aria-pressed', !this.$element.hasClass('active'))\n      this.$element.toggleClass('active')\n    }\n  }\n\n\n  // BUTTON PLUGIN DEFINITION\n  // ========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.button')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.button', (data = new Button(this, options)))\n\n      if (option == 'toggle') data.toggle()\n      else if (option) data.setState(option)\n    })\n  }\n\n  var old = $.fn.button\n\n  $.fn.button             = Plugin\n  $.fn.button.Constructor = Button\n\n\n  // BUTTON NO CONFLICT\n  // ==================\n\n  $.fn.button.noConflict = function () {\n    $.fn.button = old\n    return this\n  }\n\n\n  // BUTTON DATA-API\n  // ===============\n\n  $(document)\n    .on('click.bs.button.data-api', '[data-toggle^=\"button\"]', function (e) {\n      var $btn = $(e.target)\n      if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')\n      Plugin.call($btn, 'toggle')\n      if (!($(e.target).is('input[type=\"radio\"]') || $(e.target).is('input[type=\"checkbox\"]'))) e.preventDefault()\n    })\n    .on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^=\"button\"]', function (e) {\n      $(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type))\n    })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: carousel.js v3.3.6\n * http://getbootstrap.com/javascript/#carousel\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CAROUSEL CLASS DEFINITION\n  // =========================\n\n  var Carousel = function (element, options) {\n    this.$element    = $(element)\n    this.$indicators = this.$element.find('.carousel-indicators')\n    this.options     = options\n    this.paused      = null\n    this.sliding     = null\n    this.interval    = null\n    this.$active     = null\n    this.$items      = null\n\n    this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this))\n\n    this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element\n      .on('mouseenter.bs.carousel', $.proxy(this.pause, this))\n      .on('mouseleave.bs.carousel', $.proxy(this.cycle, this))\n  }\n\n  Carousel.VERSION  = '3.3.6'\n\n  Carousel.TRANSITION_DURATION = 600\n\n  Carousel.DEFAULTS = {\n    interval: 5000,\n    pause: 'hover',\n    wrap: true,\n    keyboard: true\n  }\n\n  Carousel.prototype.keydown = function (e) {\n    if (/input|textarea/i.test(e.target.tagName)) return\n    switch (e.which) {\n      case 37: this.prev(); break\n      case 39: this.next(); break\n      default: return\n    }\n\n    e.preventDefault()\n  }\n\n  Carousel.prototype.cycle = function (e) {\n    e || (this.paused = false)\n\n    this.interval && clearInterval(this.interval)\n\n    this.options.interval\n      && !this.paused\n      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))\n\n    return this\n  }\n\n  Carousel.prototype.getItemIndex = function (item) {\n    this.$items = item.parent().children('.item')\n    return this.$items.index(item || this.$active)\n  }\n\n  Carousel.prototype.getItemForDirection = function (direction, active) {\n    var activeIndex = this.getItemIndex(active)\n    var willWrap = (direction == 'prev' && activeIndex === 0)\n                || (direction == 'next' && activeIndex == (this.$items.length - 1))\n    if (willWrap && !this.options.wrap) return active\n    var delta = direction == 'prev' ? -1 : 1\n    var itemIndex = (activeIndex + delta) % this.$items.length\n    return this.$items.eq(itemIndex)\n  }\n\n  Carousel.prototype.to = function (pos) {\n    var that        = this\n    var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active'))\n\n    if (pos > (this.$items.length - 1) || pos < 0) return\n\n    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, \"slid\"\n    if (activeIndex == pos) return this.pause().cycle()\n\n    return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos))\n  }\n\n  Carousel.prototype.pause = function (e) {\n    e || (this.paused = true)\n\n    if (this.$element.find('.next, .prev').length && $.support.transition) {\n      this.$element.trigger($.support.transition.end)\n      this.cycle(true)\n    }\n\n    this.interval = clearInterval(this.interval)\n\n    return this\n  }\n\n  Carousel.prototype.next = function () {\n    if (this.sliding) return\n    return this.slide('next')\n  }\n\n  Carousel.prototype.prev = function () {\n    if (this.sliding) return\n    return this.slide('prev')\n  }\n\n  Carousel.prototype.slide = function (type, next) {\n    var $active   = this.$element.find('.item.active')\n    var $next     = next || this.getItemForDirection(type, $active)\n    var isCycling = this.interval\n    var direction = type == 'next' ? 'left' : 'right'\n    var that      = this\n\n    if ($next.hasClass('active')) return (this.sliding = false)\n\n    var relatedTarget = $next[0]\n    var slideEvent = $.Event('slide.bs.carousel', {\n      relatedTarget: relatedTarget,\n      direction: direction\n    })\n    this.$element.trigger(slideEvent)\n    if (slideEvent.isDefaultPrevented()) return\n\n    this.sliding = true\n\n    isCycling && this.pause()\n\n    if (this.$indicators.length) {\n      this.$indicators.find('.active').removeClass('active')\n      var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)])\n      $nextIndicator && $nextIndicator.addClass('active')\n    }\n\n    var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, \"slid\"\n    if ($.support.transition && this.$element.hasClass('slide')) {\n      $next.addClass(type)\n      $next[0].offsetWidth // force reflow\n      $active.addClass(direction)\n      $next.addClass(direction)\n      $active\n        .one('bsTransitionEnd', function () {\n          $next.removeClass([type, direction].join(' ')).addClass('active')\n          $active.removeClass(['active', direction].join(' '))\n          that.sliding = false\n          setTimeout(function () {\n            that.$element.trigger(slidEvent)\n          }, 0)\n        })\n        .emulateTransitionEnd(Carousel.TRANSITION_DURATION)\n    } else {\n      $active.removeClass('active')\n      $next.addClass('active')\n      this.sliding = false\n      this.$element.trigger(slidEvent)\n    }\n\n    isCycling && this.cycle()\n\n    return this\n  }\n\n\n  // CAROUSEL PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.carousel')\n      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)\n      var action  = typeof option == 'string' ? option : options.slide\n\n      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))\n      if (typeof option == 'number') data.to(option)\n      else if (action) data[action]()\n      else if (options.interval) data.pause().cycle()\n    })\n  }\n\n  var old = $.fn.carousel\n\n  $.fn.carousel             = Plugin\n  $.fn.carousel.Constructor = Carousel\n\n\n  // CAROUSEL NO CONFLICT\n  // ====================\n\n  $.fn.carousel.noConflict = function () {\n    $.fn.carousel = old\n    return this\n  }\n\n\n  // CAROUSEL DATA-API\n  // =================\n\n  var clickHandler = function (e) {\n    var href\n    var $this   = $(this)\n    var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '')) // strip for ie7\n    if (!$target.hasClass('carousel')) return\n    var options = $.extend({}, $target.data(), $this.data())\n    var slideIndex = $this.attr('data-slide-to')\n    if (slideIndex) options.interval = false\n\n    Plugin.call($target, options)\n\n    if (slideIndex) {\n      $target.data('bs.carousel').to(slideIndex)\n    }\n\n    e.preventDefault()\n  }\n\n  $(document)\n    .on('click.bs.carousel.data-api', '[data-slide]', clickHandler)\n    .on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler)\n\n  $(window).on('load', function () {\n    $('[data-ride=\"carousel\"]').each(function () {\n      var $carousel = $(this)\n      Plugin.call($carousel, $carousel.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: collapse.js v3.3.6\n * http://getbootstrap.com/javascript/#collapse\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // COLLAPSE PUBLIC CLASS DEFINITION\n  // ================================\n\n  var Collapse = function (element, options) {\n    this.$element      = $(element)\n    this.options       = $.extend({}, Collapse.DEFAULTS, options)\n    this.$trigger      = $('[data-toggle=\"collapse\"][href=\"#' + element.id + '\"],' +\n                           '[data-toggle=\"collapse\"][data-target=\"#' + element.id + '\"]')\n    this.transitioning = null\n\n    if (this.options.parent) {\n      this.$parent = this.getParent()\n    } else {\n      this.addAriaAndCollapsedClass(this.$element, this.$trigger)\n    }\n\n    if (this.options.toggle) this.toggle()\n  }\n\n  Collapse.VERSION  = '3.3.6'\n\n  Collapse.TRANSITION_DURATION = 350\n\n  Collapse.DEFAULTS = {\n    toggle: true\n  }\n\n  Collapse.prototype.dimension = function () {\n    var hasWidth = this.$element.hasClass('width')\n    return hasWidth ? 'width' : 'height'\n  }\n\n  Collapse.prototype.show = function () {\n    if (this.transitioning || this.$element.hasClass('in')) return\n\n    var activesData\n    var actives = this.$parent && this.$parent.children('.panel').children('.in, .collapsing')\n\n    if (actives && actives.length) {\n      activesData = actives.data('bs.collapse')\n      if (activesData && activesData.transitioning) return\n    }\n\n    var startEvent = $.Event('show.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    if (actives && actives.length) {\n      Plugin.call(actives, 'hide')\n      activesData || actives.data('bs.collapse', null)\n    }\n\n    var dimension = this.dimension()\n\n    this.$element\n      .removeClass('collapse')\n      .addClass('collapsing')[dimension](0)\n      .attr('aria-expanded', true)\n\n    this.$trigger\n      .removeClass('collapsed')\n      .attr('aria-expanded', true)\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.$element\n        .removeClass('collapsing')\n        .addClass('collapse in')[dimension]('')\n      this.transitioning = 0\n      this.$element\n        .trigger('shown.bs.collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    var scrollSize = $.camelCase(['scroll', dimension].join('-'))\n\n    this.$element\n      .one('bsTransitionEnd', $.proxy(complete, this))\n      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize])\n  }\n\n  Collapse.prototype.hide = function () {\n    if (this.transitioning || !this.$element.hasClass('in')) return\n\n    var startEvent = $.Event('hide.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    var dimension = this.dimension()\n\n    this.$element[dimension](this.$element[dimension]())[0].offsetHeight\n\n    this.$element\n      .addClass('collapsing')\n      .removeClass('collapse in')\n      .attr('aria-expanded', false)\n\n    this.$trigger\n      .addClass('collapsed')\n      .attr('aria-expanded', false)\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.transitioning = 0\n      this.$element\n        .removeClass('collapsing')\n        .addClass('collapse')\n        .trigger('hidden.bs.collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    this.$element\n      [dimension](0)\n      .one('bsTransitionEnd', $.proxy(complete, this))\n      .emulateTransitionEnd(Collapse.TRANSITION_DURATION)\n  }\n\n  Collapse.prototype.toggle = function () {\n    this[this.$element.hasClass('in') ? 'hide' : 'show']()\n  }\n\n  Collapse.prototype.getParent = function () {\n    return $(this.options.parent)\n      .find('[data-toggle=\"collapse\"][data-parent=\"' + this.options.parent + '\"]')\n      .each($.proxy(function (i, element) {\n        var $element = $(element)\n        this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element)\n      }, this))\n      .end()\n  }\n\n  Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) {\n    var isOpen = $element.hasClass('in')\n\n    $element.attr('aria-expanded', isOpen)\n    $trigger\n      .toggleClass('collapsed', !isOpen)\n      .attr('aria-expanded', isOpen)\n  }\n\n  function getTargetFromTrigger($trigger) {\n    var href\n    var target = $trigger.attr('data-target')\n      || (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '') // strip for ie7\n\n    return $(target)\n  }\n\n\n  // COLLAPSE PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.collapse')\n      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data && options.toggle && /show|hide/.test(option)) options.toggle = false\n      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.collapse\n\n  $.fn.collapse             = Plugin\n  $.fn.collapse.Constructor = Collapse\n\n\n  // COLLAPSE NO CONFLICT\n  // ====================\n\n  $.fn.collapse.noConflict = function () {\n    $.fn.collapse = old\n    return this\n  }\n\n\n  // COLLAPSE DATA-API\n  // =================\n\n  $(document).on('click.bs.collapse.data-api', '[data-toggle=\"collapse\"]', function (e) {\n    var $this   = $(this)\n\n    if (!$this.attr('data-target')) e.preventDefault()\n\n    var $target = getTargetFromTrigger($this)\n    var data    = $target.data('bs.collapse')\n    var option  = data ? 'toggle' : $this.data()\n\n    Plugin.call($target, option)\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: dropdown.js v3.3.6\n * http://getbootstrap.com/javascript/#dropdowns\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // DROPDOWN CLASS DEFINITION\n  // =========================\n\n  var backdrop = '.dropdown-backdrop'\n  var toggle   = '[data-toggle=\"dropdown\"]'\n  var Dropdown = function (element) {\n    $(element).on('click.bs.dropdown', this.toggle)\n  }\n\n  Dropdown.VERSION = '3.3.6'\n\n  function getParent($this) {\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    var $parent = selector && $(selector)\n\n    return $parent && $parent.length ? $parent : $this.parent()\n  }\n\n  function clearMenus(e) {\n    if (e && e.which === 3) return\n    $(backdrop).remove()\n    $(toggle).each(function () {\n      var $this         = $(this)\n      var $parent       = getParent($this)\n      var relatedTarget = { relatedTarget: this }\n\n      if (!$parent.hasClass('open')) return\n\n      if (e && e.type == 'click' && /input|textarea/i.test(e.target.tagName) && $.contains($parent[0], e.target)) return\n\n      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))\n\n      if (e.isDefaultPrevented()) return\n\n      $this.attr('aria-expanded', 'false')\n      $parent.removeClass('open').trigger($.Event('hidden.bs.dropdown', relatedTarget))\n    })\n  }\n\n  Dropdown.prototype.toggle = function (e) {\n    var $this = $(this)\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    clearMenus()\n\n    if (!isActive) {\n      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {\n        // if mobile we use a backdrop because click events don't delegate\n        $(document.createElement('div'))\n          .addClass('dropdown-backdrop')\n          .insertAfter($(this))\n          .on('click', clearMenus)\n      }\n\n      var relatedTarget = { relatedTarget: this }\n      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))\n\n      if (e.isDefaultPrevented()) return\n\n      $this\n        .trigger('focus')\n        .attr('aria-expanded', 'true')\n\n      $parent\n        .toggleClass('open')\n        .trigger($.Event('shown.bs.dropdown', relatedTarget))\n    }\n\n    return false\n  }\n\n  Dropdown.prototype.keydown = function (e) {\n    if (!/(38|40|27|32)/.test(e.which) || /input|textarea/i.test(e.target.tagName)) return\n\n    var $this = $(this)\n\n    e.preventDefault()\n    e.stopPropagation()\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    if (!isActive && e.which != 27 || isActive && e.which == 27) {\n      if (e.which == 27) $parent.find(toggle).trigger('focus')\n      return $this.trigger('click')\n    }\n\n    var desc = ' li:not(.disabled):visible a'\n    var $items = $parent.find('.dropdown-menu' + desc)\n\n    if (!$items.length) return\n\n    var index = $items.index(e.target)\n\n    if (e.which == 38 && index > 0)                 index--         // up\n    if (e.which == 40 && index < $items.length - 1) index++         // down\n    if (!~index)                                    index = 0\n\n    $items.eq(index).trigger('focus')\n  }\n\n\n  // DROPDOWN PLUGIN DEFINITION\n  // ==========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.dropdown')\n\n      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  var old = $.fn.dropdown\n\n  $.fn.dropdown             = Plugin\n  $.fn.dropdown.Constructor = Dropdown\n\n\n  // DROPDOWN NO CONFLICT\n  // ====================\n\n  $.fn.dropdown.noConflict = function () {\n    $.fn.dropdown = old\n    return this\n  }\n\n\n  // APPLY TO STANDARD DROPDOWN ELEMENTS\n  // ===================================\n\n  $(document)\n    .on('click.bs.dropdown.data-api', clearMenus)\n    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })\n    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)\n    .on('keydown.bs.dropdown.data-api', toggle, Dropdown.prototype.keydown)\n    .on('keydown.bs.dropdown.data-api', '.dropdown-menu', Dropdown.prototype.keydown)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: modal.js v3.3.6\n * http://getbootstrap.com/javascript/#modals\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // MODAL CLASS DEFINITION\n  // ======================\n\n  var Modal = function (element, options) {\n    this.options             = options\n    this.$body               = $(document.body)\n    this.$element            = $(element)\n    this.$dialog             = this.$element.find('.modal-dialog')\n    this.$backdrop           = null\n    this.isShown             = null\n    this.originalBodyPad     = null\n    this.scrollbarWidth      = 0\n    this.ignoreBackdropClick = false\n\n    if (this.options.remote) {\n      this.$element\n        .find('.modal-content')\n        .load(this.options.remote, $.proxy(function () {\n          this.$element.trigger('loaded.bs.modal')\n        }, this))\n    }\n  }\n\n  Modal.VERSION  = '3.3.6'\n\n  Modal.TRANSITION_DURATION = 300\n  Modal.BACKDROP_TRANSITION_DURATION = 150\n\n  Modal.DEFAULTS = {\n    backdrop: true,\n    keyboard: true,\n    show: true\n  }\n\n  Modal.prototype.toggle = function (_relatedTarget) {\n    return this.isShown ? this.hide() : this.show(_relatedTarget)\n  }\n\n  Modal.prototype.show = function (_relatedTarget) {\n    var that = this\n    var e    = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })\n\n    this.$element.trigger(e)\n\n    if (this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = true\n\n    this.checkScrollbar()\n    this.setScrollbar()\n    this.$body.addClass('modal-open')\n\n    this.escape()\n    this.resize()\n\n    this.$element.on('click.dismiss.bs.modal', '[data-dismiss=\"modal\"]', $.proxy(this.hide, this))\n\n    this.$dialog.on('mousedown.dismiss.bs.modal', function () {\n      that.$element.one('mouseup.dismiss.bs.modal', function (e) {\n        if ($(e.target).is(that.$element)) that.ignoreBackdropClick = true\n      })\n    })\n\n    this.backdrop(function () {\n      var transition = $.support.transition && that.$element.hasClass('fade')\n\n      if (!that.$element.parent().length) {\n        that.$element.appendTo(that.$body) // don't move modals dom position\n      }\n\n      that.$element\n        .show()\n        .scrollTop(0)\n\n      that.adjustDialog()\n\n      if (transition) {\n        that.$element[0].offsetWidth // force reflow\n      }\n\n      that.$element.addClass('in')\n\n      that.enforceFocus()\n\n      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })\n\n      transition ?\n        that.$dialog // wait for modal to slide in\n          .one('bsTransitionEnd', function () {\n            that.$element.trigger('focus').trigger(e)\n          })\n          .emulateTransitionEnd(Modal.TRANSITION_DURATION) :\n        that.$element.trigger('focus').trigger(e)\n    })\n  }\n\n  Modal.prototype.hide = function (e) {\n    if (e) e.preventDefault()\n\n    e = $.Event('hide.bs.modal')\n\n    this.$element.trigger(e)\n\n    if (!this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = false\n\n    this.escape()\n    this.resize()\n\n    $(document).off('focusin.bs.modal')\n\n    this.$element\n      .removeClass('in')\n      .off('click.dismiss.bs.modal')\n      .off('mouseup.dismiss.bs.modal')\n\n    this.$dialog.off('mousedown.dismiss.bs.modal')\n\n    $.support.transition && this.$element.hasClass('fade') ?\n      this.$element\n        .one('bsTransitionEnd', $.proxy(this.hideModal, this))\n        .emulateTransitionEnd(Modal.TRANSITION_DURATION) :\n      this.hideModal()\n  }\n\n  Modal.prototype.enforceFocus = function () {\n    $(document)\n      .off('focusin.bs.modal') // guard against infinite focus loop\n      .on('focusin.bs.modal', $.proxy(function (e) {\n        if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {\n          this.$element.trigger('focus')\n        }\n      }, this))\n  }\n\n  Modal.prototype.escape = function () {\n    if (this.isShown && this.options.keyboard) {\n      this.$element.on('keydown.dismiss.bs.modal', $.proxy(function (e) {\n        e.which == 27 && this.hide()\n      }, this))\n    } else if (!this.isShown) {\n      this.$element.off('keydown.dismiss.bs.modal')\n    }\n  }\n\n  Modal.prototype.resize = function () {\n    if (this.isShown) {\n      $(window).on('resize.bs.modal', $.proxy(this.handleUpdate, this))\n    } else {\n      $(window).off('resize.bs.modal')\n    }\n  }\n\n  Modal.prototype.hideModal = function () {\n    var that = this\n    this.$element.hide()\n    this.backdrop(function () {\n      that.$body.removeClass('modal-open')\n      that.resetAdjustments()\n      that.resetScrollbar()\n      that.$element.trigger('hidden.bs.modal')\n    })\n  }\n\n  Modal.prototype.removeBackdrop = function () {\n    this.$backdrop && this.$backdrop.remove()\n    this.$backdrop = null\n  }\n\n  Modal.prototype.backdrop = function (callback) {\n    var that = this\n    var animate = this.$element.hasClass('fade') ? 'fade' : ''\n\n    if (this.isShown && this.options.backdrop) {\n      var doAnimate = $.support.transition && animate\n\n      this.$backdrop = $(document.createElement('div'))\n        .addClass('modal-backdrop ' + animate)\n        .appendTo(this.$body)\n\n      this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {\n        if (this.ignoreBackdropClick) {\n          this.ignoreBackdropClick = false\n          return\n        }\n        if (e.target !== e.currentTarget) return\n        this.options.backdrop == 'static'\n          ? this.$element[0].focus()\n          : this.hide()\n      }, this))\n\n      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow\n\n      this.$backdrop.addClass('in')\n\n      if (!callback) return\n\n      doAnimate ?\n        this.$backdrop\n          .one('bsTransitionEnd', callback)\n          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :\n        callback()\n\n    } else if (!this.isShown && this.$backdrop) {\n      this.$backdrop.removeClass('in')\n\n      var callbackRemove = function () {\n        that.removeBackdrop()\n        callback && callback()\n      }\n      $.support.transition && this.$element.hasClass('fade') ?\n        this.$backdrop\n          .one('bsTransitionEnd', callbackRemove)\n          .emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :\n        callbackRemove()\n\n    } else if (callback) {\n      callback()\n    }\n  }\n\n  // these following methods are used to handle overflowing modals\n\n  Modal.prototype.handleUpdate = function () {\n    this.adjustDialog()\n  }\n\n  Modal.prototype.adjustDialog = function () {\n    var modalIsOverflowing = this.$element[0].scrollHeight > document.documentElement.clientHeight\n\n    this.$element.css({\n      paddingLeft:  !this.bodyIsOverflowing && modalIsOverflowing ? this.scrollbarWidth : '',\n      paddingRight: this.bodyIsOverflowing && !modalIsOverflowing ? this.scrollbarWidth : ''\n    })\n  }\n\n  Modal.prototype.resetAdjustments = function () {\n    this.$element.css({\n      paddingLeft: '',\n      paddingRight: ''\n    })\n  }\n\n  Modal.prototype.checkScrollbar = function () {\n    var fullWindowWidth = window.innerWidth\n    if (!fullWindowWidth) { // workaround for missing window.innerWidth in IE8\n      var documentElementRect = document.documentElement.getBoundingClientRect()\n      fullWindowWidth = documentElementRect.right - Math.abs(documentElementRect.left)\n    }\n    this.bodyIsOverflowing = document.body.clientWidth < fullWindowWidth\n    this.scrollbarWidth = this.measureScrollbar()\n  }\n\n  Modal.prototype.setScrollbar = function () {\n    var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10)\n    this.originalBodyPad = document.body.style.paddingRight || ''\n    if (this.bodyIsOverflowing) this.$body.css('padding-right', bodyPad + this.scrollbarWidth)\n  }\n\n  Modal.prototype.resetScrollbar = function () {\n    this.$body.css('padding-right', this.originalBodyPad)\n  }\n\n  Modal.prototype.measureScrollbar = function () { // thx walsh\n    var scrollDiv = document.createElement('div')\n    scrollDiv.className = 'modal-scrollbar-measure'\n    this.$body.append(scrollDiv)\n    var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth\n    this.$body[0].removeChild(scrollDiv)\n    return scrollbarWidth\n  }\n\n\n  // MODAL PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option, _relatedTarget) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.modal')\n      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))\n      if (typeof option == 'string') data[option](_relatedTarget)\n      else if (options.show) data.show(_relatedTarget)\n    })\n  }\n\n  var old = $.fn.modal\n\n  $.fn.modal             = Plugin\n  $.fn.modal.Constructor = Modal\n\n\n  // MODAL NO CONFLICT\n  // =================\n\n  $.fn.modal.noConflict = function () {\n    $.fn.modal = old\n    return this\n  }\n\n\n  // MODAL DATA-API\n  // ==============\n\n  $(document).on('click.bs.modal.data-api', '[data-toggle=\"modal\"]', function (e) {\n    var $this   = $(this)\n    var href    = $this.attr('href')\n    var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\\s]+$)/, ''))) // strip for ie7\n    var option  = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())\n\n    if ($this.is('a')) e.preventDefault()\n\n    $target.one('show.bs.modal', function (showEvent) {\n      if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown\n      $target.one('hidden.bs.modal', function () {\n        $this.is(':visible') && $this.trigger('focus')\n      })\n    })\n    Plugin.call($target, option, this)\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tooltip.js v3.3.6\n * http://getbootstrap.com/javascript/#tooltip\n * Inspired by the original jQuery.tipsy by Jason Frame\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TOOLTIP PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Tooltip = function (element, options) {\n    this.type       = null\n    this.options    = null\n    this.enabled    = null\n    this.timeout    = null\n    this.hoverState = null\n    this.$element   = null\n    this.inState    = null\n\n    this.init('tooltip', element, options)\n  }\n\n  Tooltip.VERSION  = '3.3.6'\n\n  Tooltip.TRANSITION_DURATION = 150\n\n  Tooltip.DEFAULTS = {\n    animation: true,\n    placement: 'top',\n    selector: false,\n    template: '<div class=\"tooltip\" role=\"tooltip\"><div class=\"tooltip-arrow\"></div><div class=\"tooltip-inner\"></div></div>',\n    trigger: 'hover focus',\n    title: '',\n    delay: 0,\n    html: false,\n    container: false,\n    viewport: {\n      selector: 'body',\n      padding: 0\n    }\n  }\n\n  Tooltip.prototype.init = function (type, element, options) {\n    this.enabled   = true\n    this.type      = type\n    this.$element  = $(element)\n    this.options   = this.getOptions(options)\n    this.$viewport = this.options.viewport && $($.isFunction(this.options.viewport) ? this.options.viewport.call(this, this.$element) : (this.options.viewport.selector || this.options.viewport))\n    this.inState   = { click: false, hover: false, focus: false }\n\n    if (this.$element[0] instanceof document.constructor && !this.options.selector) {\n      throw new Error('`selector` option must be specified when initializing ' + this.type + ' on the window.document object!')\n    }\n\n    var triggers = this.options.trigger.split(' ')\n\n    for (var i = triggers.length; i--;) {\n      var trigger = triggers[i]\n\n      if (trigger == 'click') {\n        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))\n      } else if (trigger != 'manual') {\n        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'\n        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'\n\n        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))\n        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))\n      }\n    }\n\n    this.options.selector ?\n      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :\n      this.fixTitle()\n  }\n\n  Tooltip.prototype.getDefaults = function () {\n    return Tooltip.DEFAULTS\n  }\n\n  Tooltip.prototype.getOptions = function (options) {\n    options = $.extend({}, this.getDefaults(), this.$element.data(), options)\n\n    if (options.delay && typeof options.delay == 'number') {\n      options.delay = {\n        show: options.delay,\n        hide: options.delay\n      }\n    }\n\n    return options\n  }\n\n  Tooltip.prototype.getDelegateOptions = function () {\n    var options  = {}\n    var defaults = this.getDefaults()\n\n    this._options && $.each(this._options, function (key, value) {\n      if (defaults[key] != value) options[key] = value\n    })\n\n    return options\n  }\n\n  Tooltip.prototype.enter = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget).data('bs.' + this.type)\n\n    if (!self) {\n      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())\n      $(obj.currentTarget).data('bs.' + this.type, self)\n    }\n\n    if (obj instanceof $.Event) {\n      self.inState[obj.type == 'focusin' ? 'focus' : 'hover'] = true\n    }\n\n    if (self.tip().hasClass('in') || self.hoverState == 'in') {\n      self.hoverState = 'in'\n      return\n    }\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'in'\n\n    if (!self.options.delay || !self.options.delay.show) return self.show()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'in') self.show()\n    }, self.options.delay.show)\n  }\n\n  Tooltip.prototype.isInStateTrue = function () {\n    for (var key in this.inState) {\n      if (this.inState[key]) return true\n    }\n\n    return false\n  }\n\n  Tooltip.prototype.leave = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget).data('bs.' + this.type)\n\n    if (!self) {\n      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())\n      $(obj.currentTarget).data('bs.' + this.type, self)\n    }\n\n    if (obj instanceof $.Event) {\n      self.inState[obj.type == 'focusout' ? 'focus' : 'hover'] = false\n    }\n\n    if (self.isInStateTrue()) return\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'out'\n\n    if (!self.options.delay || !self.options.delay.hide) return self.hide()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'out') self.hide()\n    }, self.options.delay.hide)\n  }\n\n  Tooltip.prototype.show = function () {\n    var e = $.Event('show.bs.' + this.type)\n\n    if (this.hasContent() && this.enabled) {\n      this.$element.trigger(e)\n\n      var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0])\n      if (e.isDefaultPrevented() || !inDom) return\n      var that = this\n\n      var $tip = this.tip()\n\n      var tipId = this.getUID(this.type)\n\n      this.setContent()\n      $tip.attr('id', tipId)\n      this.$element.attr('aria-describedby', tipId)\n\n      if (this.options.animation) $tip.addClass('fade')\n\n      var placement = typeof this.options.placement == 'function' ?\n        this.options.placement.call(this, $tip[0], this.$element[0]) :\n        this.options.placement\n\n      var autoToken = /\\s?auto?\\s?/i\n      var autoPlace = autoToken.test(placement)\n      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'\n\n      $tip\n        .detach()\n        .css({ top: 0, left: 0, display: 'block' })\n        .addClass(placement)\n        .data('bs.' + this.type, this)\n\n      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)\n      this.$element.trigger('inserted.bs.' + this.type)\n\n      var pos          = this.getPosition()\n      var actualWidth  = $tip[0].offsetWidth\n      var actualHeight = $tip[0].offsetHeight\n\n      if (autoPlace) {\n        var orgPlacement = placement\n        var viewportDim = this.getPosition(this.$viewport)\n\n        placement = placement == 'bottom' && pos.bottom + actualHeight > viewportDim.bottom ? 'top'    :\n                    placement == 'top'    && pos.top    - actualHeight < viewportDim.top    ? 'bottom' :\n                    placement == 'right'  && pos.right  + actualWidth  > viewportDim.width  ? 'left'   :\n                    placement == 'left'   && pos.left   - actualWidth  < viewportDim.left   ? 'right'  :\n                    placement\n\n        $tip\n          .removeClass(orgPlacement)\n          .addClass(placement)\n      }\n\n      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)\n\n      this.applyPlacement(calculatedOffset, placement)\n\n      var complete = function () {\n        var prevHoverState = that.hoverState\n        that.$element.trigger('shown.bs.' + that.type)\n        that.hoverState = null\n\n        if (prevHoverState == 'out') that.leave(that)\n      }\n\n      $.support.transition && this.$tip.hasClass('fade') ?\n        $tip\n          .one('bsTransitionEnd', complete)\n          .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :\n        complete()\n    }\n  }\n\n  Tooltip.prototype.applyPlacement = function (offset, placement) {\n    var $tip   = this.tip()\n    var width  = $tip[0].offsetWidth\n    var height = $tip[0].offsetHeight\n\n    // manually read margins because getBoundingClientRect includes difference\n    var marginTop = parseInt($tip.css('margin-top'), 10)\n    var marginLeft = parseInt($tip.css('margin-left'), 10)\n\n    // we must check for NaN for ie 8/9\n    if (isNaN(marginTop))  marginTop  = 0\n    if (isNaN(marginLeft)) marginLeft = 0\n\n    offset.top  += marginTop\n    offset.left += marginLeft\n\n    // $.fn.offset doesn't round pixel values\n    // so we use setOffset directly with our own function B-0\n    $.offset.setOffset($tip[0], $.extend({\n      using: function (props) {\n        $tip.css({\n          top: Math.round(props.top),\n          left: Math.round(props.left)\n        })\n      }\n    }, offset), 0)\n\n    $tip.addClass('in')\n\n    // check to see if placing tip in new offset caused the tip to resize itself\n    var actualWidth  = $tip[0].offsetWidth\n    var actualHeight = $tip[0].offsetHeight\n\n    if (placement == 'top' && actualHeight != height) {\n      offset.top = offset.top + height - actualHeight\n    }\n\n    var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)\n\n    if (delta.left) offset.left += delta.left\n    else offset.top += delta.top\n\n    var isVertical          = /top|bottom/.test(placement)\n    var arrowDelta          = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight\n    var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'\n\n    $tip.offset(offset)\n    this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)\n  }\n\n  Tooltip.prototype.replaceArrow = function (delta, dimension, isVertical) {\n    this.arrow()\n      .css(isVertical ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')\n      .css(isVertical ? 'top' : 'left', '')\n  }\n\n  Tooltip.prototype.setContent = function () {\n    var $tip  = this.tip()\n    var title = this.getTitle()\n\n    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)\n    $tip.removeClass('fade in top bottom left right')\n  }\n\n  Tooltip.prototype.hide = function (callback) {\n    var that = this\n    var $tip = $(this.$tip)\n    var e    = $.Event('hide.bs.' + this.type)\n\n    function complete() {\n      if (that.hoverState != 'in') $tip.detach()\n      that.$element\n        .removeAttr('aria-describedby')\n        .trigger('hidden.bs.' + that.type)\n      callback && callback()\n    }\n\n    this.$element.trigger(e)\n\n    if (e.isDefaultPrevented()) return\n\n    $tip.removeClass('in')\n\n    $.support.transition && $tip.hasClass('fade') ?\n      $tip\n        .one('bsTransitionEnd', complete)\n        .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :\n      complete()\n\n    this.hoverState = null\n\n    return this\n  }\n\n  Tooltip.prototype.fixTitle = function () {\n    var $e = this.$element\n    if ($e.attr('title') || typeof $e.attr('data-original-title') != 'string') {\n      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')\n    }\n  }\n\n  Tooltip.prototype.hasContent = function () {\n    return this.getTitle()\n  }\n\n  Tooltip.prototype.getPosition = function ($element) {\n    $element   = $element || this.$element\n\n    var el     = $element[0]\n    var isBody = el.tagName == 'BODY'\n\n    var elRect    = el.getBoundingClientRect()\n    if (elRect.width == null) {\n      // width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093\n      elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top })\n    }\n    var elOffset  = isBody ? { top: 0, left: 0 } : $element.offset()\n    var scroll    = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() }\n    var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null\n\n    return $.extend({}, elRect, scroll, outerDims, elOffset)\n  }\n\n  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {\n    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2 } :\n           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :\n           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :\n        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }\n\n  }\n\n  Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {\n    var delta = { top: 0, left: 0 }\n    if (!this.$viewport) return delta\n\n    var viewportPadding = this.options.viewport && this.options.viewport.padding || 0\n    var viewportDimensions = this.getPosition(this.$viewport)\n\n    if (/right|left/.test(placement)) {\n      var topEdgeOffset    = pos.top - viewportPadding - viewportDimensions.scroll\n      var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight\n      if (topEdgeOffset < viewportDimensions.top) { // top overflow\n        delta.top = viewportDimensions.top - topEdgeOffset\n      } else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow\n        delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset\n      }\n    } else {\n      var leftEdgeOffset  = pos.left - viewportPadding\n      var rightEdgeOffset = pos.left + viewportPadding + actualWidth\n      if (leftEdgeOffset < viewportDimensions.left) { // left overflow\n        delta.left = viewportDimensions.left - leftEdgeOffset\n      } else if (rightEdgeOffset > viewportDimensions.right) { // right overflow\n        delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset\n      }\n    }\n\n    return delta\n  }\n\n  Tooltip.prototype.getTitle = function () {\n    var title\n    var $e = this.$element\n    var o  = this.options\n\n    title = $e.attr('data-original-title')\n      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)\n\n    return title\n  }\n\n  Tooltip.prototype.getUID = function (prefix) {\n    do prefix += ~~(Math.random() * 1000000)\n    while (document.getElementById(prefix))\n    return prefix\n  }\n\n  Tooltip.prototype.tip = function () {\n    if (!this.$tip) {\n      this.$tip = $(this.options.template)\n      if (this.$tip.length != 1) {\n        throw new Error(this.type + ' `template` option must consist of exactly 1 top-level element!')\n      }\n    }\n    return this.$tip\n  }\n\n  Tooltip.prototype.arrow = function () {\n    return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))\n  }\n\n  Tooltip.prototype.enable = function () {\n    this.enabled = true\n  }\n\n  Tooltip.prototype.disable = function () {\n    this.enabled = false\n  }\n\n  Tooltip.prototype.toggleEnabled = function () {\n    this.enabled = !this.enabled\n  }\n\n  Tooltip.prototype.toggle = function (e) {\n    var self = this\n    if (e) {\n      self = $(e.currentTarget).data('bs.' + this.type)\n      if (!self) {\n        self = new this.constructor(e.currentTarget, this.getDelegateOptions())\n        $(e.currentTarget).data('bs.' + this.type, self)\n      }\n    }\n\n    if (e) {\n      self.inState.click = !self.inState.click\n      if (self.isInStateTrue()) self.enter(self)\n      else self.leave(self)\n    } else {\n      self.tip().hasClass('in') ? self.leave(self) : self.enter(self)\n    }\n  }\n\n  Tooltip.prototype.destroy = function () {\n    var that = this\n    clearTimeout(this.timeout)\n    this.hide(function () {\n      that.$element.off('.' + that.type).removeData('bs.' + that.type)\n      if (that.$tip) {\n        that.$tip.detach()\n      }\n      that.$tip = null\n      that.$arrow = null\n      that.$viewport = null\n    })\n  }\n\n\n  // TOOLTIP PLUGIN DEFINITION\n  // =========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.tooltip')\n      var options = typeof option == 'object' && option\n\n      if (!data && /destroy|hide/.test(option)) return\n      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.tooltip\n\n  $.fn.tooltip             = Plugin\n  $.fn.tooltip.Constructor = Tooltip\n\n\n  // TOOLTIP NO CONFLICT\n  // ===================\n\n  $.fn.tooltip.noConflict = function () {\n    $.fn.tooltip = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: popover.js v3.3.6\n * http://getbootstrap.com/javascript/#popovers\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // POPOVER PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Popover = function (element, options) {\n    this.init('popover', element, options)\n  }\n\n  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')\n\n  Popover.VERSION  = '3.3.6'\n\n  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {\n    placement: 'right',\n    trigger: 'click',\n    content: '',\n    template: '<div class=\"popover\" role=\"tooltip\"><div class=\"arrow\"></div><h3 class=\"popover-title\"></h3><div class=\"popover-content\"></div></div>'\n  })\n\n\n  // NOTE: POPOVER EXTENDS tooltip.js\n  // ================================\n\n  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)\n\n  Popover.prototype.constructor = Popover\n\n  Popover.prototype.getDefaults = function () {\n    return Popover.DEFAULTS\n  }\n\n  Popover.prototype.setContent = function () {\n    var $tip    = this.tip()\n    var title   = this.getTitle()\n    var content = this.getContent()\n\n    $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)\n    $tip.find('.popover-content').children().detach().end()[ // we use append for html objects to maintain js events\n      this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'\n    ](content)\n\n    $tip.removeClass('fade top bottom left right in')\n\n    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do\n    // this manually by checking the contents.\n    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()\n  }\n\n  Popover.prototype.hasContent = function () {\n    return this.getTitle() || this.getContent()\n  }\n\n  Popover.prototype.getContent = function () {\n    var $e = this.$element\n    var o  = this.options\n\n    return $e.attr('data-content')\n      || (typeof o.content == 'function' ?\n            o.content.call($e[0]) :\n            o.content)\n  }\n\n  Popover.prototype.arrow = function () {\n    return (this.$arrow = this.$arrow || this.tip().find('.arrow'))\n  }\n\n\n  // POPOVER PLUGIN DEFINITION\n  // =========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.popover')\n      var options = typeof option == 'object' && option\n\n      if (!data && /destroy|hide/.test(option)) return\n      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.popover\n\n  $.fn.popover             = Plugin\n  $.fn.popover.Constructor = Popover\n\n\n  // POPOVER NO CONFLICT\n  // ===================\n\n  $.fn.popover.noConflict = function () {\n    $.fn.popover = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: scrollspy.js v3.3.6\n * http://getbootstrap.com/javascript/#scrollspy\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // SCROLLSPY CLASS DEFINITION\n  // ==========================\n\n  function ScrollSpy(element, options) {\n    this.$body          = $(document.body)\n    this.$scrollElement = $(element).is(document.body) ? $(window) : $(element)\n    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)\n    this.selector       = (this.options.target || '') + ' .nav li > a'\n    this.offsets        = []\n    this.targets        = []\n    this.activeTarget   = null\n    this.scrollHeight   = 0\n\n    this.$scrollElement.on('scroll.bs.scrollspy', $.proxy(this.process, this))\n    this.refresh()\n    this.process()\n  }\n\n  ScrollSpy.VERSION  = '3.3.6'\n\n  ScrollSpy.DEFAULTS = {\n    offset: 10\n  }\n\n  ScrollSpy.prototype.getScrollHeight = function () {\n    return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight)\n  }\n\n  ScrollSpy.prototype.refresh = function () {\n    var that          = this\n    var offsetMethod  = 'offset'\n    var offsetBase    = 0\n\n    this.offsets      = []\n    this.targets      = []\n    this.scrollHeight = this.getScrollHeight()\n\n    if (!$.isWindow(this.$scrollElement[0])) {\n      offsetMethod = 'position'\n      offsetBase   = this.$scrollElement.scrollTop()\n    }\n\n    this.$body\n      .find(this.selector)\n      .map(function () {\n        var $el   = $(this)\n        var href  = $el.data('target') || $el.attr('href')\n        var $href = /^#./.test(href) && $(href)\n\n        return ($href\n          && $href.length\n          && $href.is(':visible')\n          && [[$href[offsetMethod]().top + offsetBase, href]]) || null\n      })\n      .sort(function (a, b) { return a[0] - b[0] })\n      .each(function () {\n        that.offsets.push(this[0])\n        that.targets.push(this[1])\n      })\n  }\n\n  ScrollSpy.prototype.process = function () {\n    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset\n    var scrollHeight = this.getScrollHeight()\n    var maxScroll    = this.options.offset + scrollHeight - this.$scrollElement.height()\n    var offsets      = this.offsets\n    var targets      = this.targets\n    var activeTarget = this.activeTarget\n    var i\n\n    if (this.scrollHeight != scrollHeight) {\n      this.refresh()\n    }\n\n    if (scrollTop >= maxScroll) {\n      return activeTarget != (i = targets[targets.length - 1]) && this.activate(i)\n    }\n\n    if (activeTarget && scrollTop < offsets[0]) {\n      this.activeTarget = null\n      return this.clear()\n    }\n\n    for (i = offsets.length; i--;) {\n      activeTarget != targets[i]\n        && scrollTop >= offsets[i]\n        && (offsets[i + 1] === undefined || scrollTop < offsets[i + 1])\n        && this.activate(targets[i])\n    }\n  }\n\n  ScrollSpy.prototype.activate = function (target) {\n    this.activeTarget = target\n\n    this.clear()\n\n    var selector = this.selector +\n      '[data-target=\"' + target + '\"],' +\n      this.selector + '[href=\"' + target + '\"]'\n\n    var active = $(selector)\n      .parents('li')\n      .addClass('active')\n\n    if (active.parent('.dropdown-menu').length) {\n      active = active\n        .closest('li.dropdown')\n        .addClass('active')\n    }\n\n    active.trigger('activate.bs.scrollspy')\n  }\n\n  ScrollSpy.prototype.clear = function () {\n    $(this.selector)\n      .parentsUntil(this.options.target, '.active')\n      .removeClass('active')\n  }\n\n\n  // SCROLLSPY PLUGIN DEFINITION\n  // ===========================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.scrollspy')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.scrollspy\n\n  $.fn.scrollspy             = Plugin\n  $.fn.scrollspy.Constructor = ScrollSpy\n\n\n  // SCROLLSPY NO CONFLICT\n  // =====================\n\n  $.fn.scrollspy.noConflict = function () {\n    $.fn.scrollspy = old\n    return this\n  }\n\n\n  // SCROLLSPY DATA-API\n  // ==================\n\n  $(window).on('load.bs.scrollspy.data-api', function () {\n    $('[data-spy=\"scroll\"]').each(function () {\n      var $spy = $(this)\n      Plugin.call($spy, $spy.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tab.js v3.3.6\n * http://getbootstrap.com/javascript/#tabs\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TAB CLASS DEFINITION\n  // ====================\n\n  var Tab = function (element) {\n    // jscs:disable requireDollarBeforejQueryAssignment\n    this.element = $(element)\n    // jscs:enable requireDollarBeforejQueryAssignment\n  }\n\n  Tab.VERSION = '3.3.6'\n\n  Tab.TRANSITION_DURATION = 150\n\n  Tab.prototype.show = function () {\n    var $this    = this.element\n    var $ul      = $this.closest('ul:not(.dropdown-menu)')\n    var selector = $this.data('target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    if ($this.parent('li').hasClass('active')) return\n\n    var $previous = $ul.find('.active:last a')\n    var hideEvent = $.Event('hide.bs.tab', {\n      relatedTarget: $this[0]\n    })\n    var showEvent = $.Event('show.bs.tab', {\n      relatedTarget: $previous[0]\n    })\n\n    $previous.trigger(hideEvent)\n    $this.trigger(showEvent)\n\n    if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return\n\n    var $target = $(selector)\n\n    this.activate($this.closest('li'), $ul)\n    this.activate($target, $target.parent(), function () {\n      $previous.trigger({\n        type: 'hidden.bs.tab',\n        relatedTarget: $this[0]\n      })\n      $this.trigger({\n        type: 'shown.bs.tab',\n        relatedTarget: $previous[0]\n      })\n    })\n  }\n\n  Tab.prototype.activate = function (element, container, callback) {\n    var $active    = container.find('> .active')\n    var transition = callback\n      && $.support.transition\n      && ($active.length && $active.hasClass('fade') || !!container.find('> .fade').length)\n\n    function next() {\n      $active\n        .removeClass('active')\n        .find('> .dropdown-menu > .active')\n          .removeClass('active')\n        .end()\n        .find('[data-toggle=\"tab\"]')\n          .attr('aria-expanded', false)\n\n      element\n        .addClass('active')\n        .find('[data-toggle=\"tab\"]')\n          .attr('aria-expanded', true)\n\n      if (transition) {\n        element[0].offsetWidth // reflow for transition\n        element.addClass('in')\n      } else {\n        element.removeClass('fade')\n      }\n\n      if (element.parent('.dropdown-menu').length) {\n        element\n          .closest('li.dropdown')\n            .addClass('active')\n          .end()\n          .find('[data-toggle=\"tab\"]')\n            .attr('aria-expanded', true)\n      }\n\n      callback && callback()\n    }\n\n    $active.length && transition ?\n      $active\n        .one('bsTransitionEnd', next)\n        .emulateTransitionEnd(Tab.TRANSITION_DURATION) :\n      next()\n\n    $active.removeClass('in')\n  }\n\n\n  // TAB PLUGIN DEFINITION\n  // =====================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.tab')\n\n      if (!data) $this.data('bs.tab', (data = new Tab(this)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.tab\n\n  $.fn.tab             = Plugin\n  $.fn.tab.Constructor = Tab\n\n\n  // TAB NO CONFLICT\n  // ===============\n\n  $.fn.tab.noConflict = function () {\n    $.fn.tab = old\n    return this\n  }\n\n\n  // TAB DATA-API\n  // ============\n\n  var clickHandler = function (e) {\n    e.preventDefault()\n    Plugin.call($(this), 'show')\n  }\n\n  $(document)\n    .on('click.bs.tab.data-api', '[data-toggle=\"tab\"]', clickHandler)\n    .on('click.bs.tab.data-api', '[data-toggle=\"pill\"]', clickHandler)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: affix.js v3.3.6\n * http://getbootstrap.com/javascript/#affix\n * ========================================================================\n * Copyright 2011-2015 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // AFFIX CLASS DEFINITION\n  // ======================\n\n  var Affix = function (element, options) {\n    this.options = $.extend({}, Affix.DEFAULTS, options)\n\n    this.$target = $(this.options.target)\n      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))\n      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))\n\n    this.$element     = $(element)\n    this.affixed      = null\n    this.unpin        = null\n    this.pinnedOffset = null\n\n    this.checkPosition()\n  }\n\n  Affix.VERSION  = '3.3.6'\n\n  Affix.RESET    = 'affix affix-top affix-bottom'\n\n  Affix.DEFAULTS = {\n    offset: 0,\n    target: window\n  }\n\n  Affix.prototype.getState = function (scrollHeight, height, offsetTop, offsetBottom) {\n    var scrollTop    = this.$target.scrollTop()\n    var position     = this.$element.offset()\n    var targetHeight = this.$target.height()\n\n    if (offsetTop != null && this.affixed == 'top') return scrollTop < offsetTop ? 'top' : false\n\n    if (this.affixed == 'bottom') {\n      if (offsetTop != null) return (scrollTop + this.unpin <= position.top) ? false : 'bottom'\n      return (scrollTop + targetHeight <= scrollHeight - offsetBottom) ? false : 'bottom'\n    }\n\n    var initializing   = this.affixed == null\n    var colliderTop    = initializing ? scrollTop : position.top\n    var colliderHeight = initializing ? targetHeight : height\n\n    if (offsetTop != null && scrollTop <= offsetTop) return 'top'\n    if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom'\n\n    return false\n  }\n\n  Affix.prototype.getPinnedOffset = function () {\n    if (this.pinnedOffset) return this.pinnedOffset\n    this.$element.removeClass(Affix.RESET).addClass('affix')\n    var scrollTop = this.$target.scrollTop()\n    var position  = this.$element.offset()\n    return (this.pinnedOffset = position.top - scrollTop)\n  }\n\n  Affix.prototype.checkPositionWithEventLoop = function () {\n    setTimeout($.proxy(this.checkPosition, this), 1)\n  }\n\n  Affix.prototype.checkPosition = function () {\n    if (!this.$element.is(':visible')) return\n\n    var height       = this.$element.height()\n    var offset       = this.options.offset\n    var offsetTop    = offset.top\n    var offsetBottom = offset.bottom\n    var scrollHeight = Math.max($(document).height(), $(document.body).height())\n\n    if (typeof offset != 'object')         offsetBottom = offsetTop = offset\n    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)\n    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)\n\n    var affix = this.getState(scrollHeight, height, offsetTop, offsetBottom)\n\n    if (this.affixed != affix) {\n      if (this.unpin != null) this.$element.css('top', '')\n\n      var affixType = 'affix' + (affix ? '-' + affix : '')\n      var e         = $.Event(affixType + '.bs.affix')\n\n      this.$element.trigger(e)\n\n      if (e.isDefaultPrevented()) return\n\n      this.affixed = affix\n      this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null\n\n      this.$element\n        .removeClass(Affix.RESET)\n        .addClass(affixType)\n        .trigger(affixType.replace('affix', 'affixed') + '.bs.affix')\n    }\n\n    if (affix == 'bottom') {\n      this.$element.offset({\n        top: scrollHeight - height - offsetBottom\n      })\n    }\n  }\n\n\n  // AFFIX PLUGIN DEFINITION\n  // =======================\n\n  function Plugin(option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.affix')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  var old = $.fn.affix\n\n  $.fn.affix             = Plugin\n  $.fn.affix.Constructor = Affix\n\n\n  // AFFIX NO CONFLICT\n  // =================\n\n  $.fn.affix.noConflict = function () {\n    $.fn.affix = old\n    return this\n  }\n\n\n  // AFFIX DATA-API\n  // ==============\n\n  $(window).on('load', function () {\n    $('[data-spy=\"affix\"]').each(function () {\n      var $spy = $(this)\n      var data = $spy.data()\n\n      data.offset = data.offset || {}\n\n      if (data.offsetBottom != null) data.offset.bottom = data.offsetBottom\n      if (data.offsetTop    != null) data.offset.top    = data.offsetTop\n\n      Plugin.call($spy, data)\n    })\n  })\n\n}(jQuery);\n"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.bootstrap.js",
    "content": "/*! DataTables Bootstrap 3 integration\n * ©2011-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for Bootstrap 3. This requires Bootstrap 3 and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using Bootstrap. See http://datatables.net/manual/styling/bootstrap\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t// Require DataTables, which attaches to jQuery, including\n\t\t\t\t// jQuery if needed and have a $ property so we can access the\n\t\t\t\t// jQuery object that is used\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'row'<'col-sm-6'l><'col-sm-6'f>>\" +\n\t\t\"<'row'<'col-sm-12'tr>>\" +\n\t\t\"<'row'<'col-sm-5'i><'col-sm-7'p>>\",\n\trenderer: 'bootstrap'\n} );\n\n\n/* Default class modification */\n$.extend( DataTable.ext.classes, {\n\tsWrapper:      \"dataTables_wrapper form-inline dt-bootstrap\",\n\tsFilterInput:  \"form-control input-sm\",\n\tsLengthSelect: \"form-control input-sm\",\n\tsProcessing:   \"dataTables_processing panel panel-default\"\n} );\n\n\n/* Bootstrap paging button renderer */\nDataTable.ext.renderer.pageButton.bootstrap = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api     = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang    = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass, counter=0;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tbtnClass = '';\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '&#x2026;';\n\t\t\t\t\t\tbtnClass = 'disabled';\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t'active' : '';\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<li>', {\n\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.append( $('<a>', {\n\t\t\t\t\t\t\t\t'href': '#',\n\t\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t\t'tabindex': settings.iTabIndex\n\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\n\t\t\t\t\tcounter++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t// inside an iframe or frame. \n\tvar activeEl;\n\n\ttry {\n\t\t// Because this approach is destroying and recreating the paging\n\t\t// elements, focus is lost on the select button which is bad for\n\t\t// accessibility. So we want to restore focus once the draw has\n\t\t// completed\n\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t}\n\tcatch (e) {}\n\n\tattach(\n\t\t$(host).empty().html('<ul class=\"pagination\"/>').children('ul'),\n\t\tbuttons\n\t);\n\n\tif ( activeEl ) {\n\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t}\n};\n\n\nreturn DataTable;\n}));"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.bootstrap4.js",
    "content": "/*! DataTables Bootstrap 3 integration\n * ©2011-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for Bootstrap 3. This requires Bootstrap 3 and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using Bootstrap. See http://datatables.net/manual/styling/bootstrap\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t// Require DataTables, which attaches to jQuery, including\n\t\t\t\t// jQuery if needed and have a $ property so we can access the\n\t\t\t\t// jQuery object that is used\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'row'<'col-md-6'l><'col-md-6'f>>\" +\n\t\t\"<'row'<'col-md-12'tr>>\" +\n\t\t\"<'row'<'col-md-5'i><'col-md-7'p>>\",\n\trenderer: 'bootstrap'\n} );\n\n\n/* Default class modification */\n$.extend( DataTable.ext.classes, {\n\tsWrapper:      \"dataTables_wrapper form-inline dt-bootstrap4\",\n\tsFilterInput:  \"form-control input-sm\",\n\tsLengthSelect: \"form-control input-sm\",\n\tsProcessing:   \"dataTables_processing panel panel-default\",\n\tsPageButton:   \"paginate_button page-item\"\n} );\n\n\n/* Bootstrap paging button renderer */\nDataTable.ext.renderer.pageButton.bootstrap = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api     = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang    = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass, counter=0;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tbtnClass = '';\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '&#x2026;';\n\t\t\t\t\t\tbtnClass = 'disabled';\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t'active' : '';\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<li>', {\n\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.append( $('<a>', {\n\t\t\t\t\t\t\t\t'href': '#',\n\t\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t\t'tabindex': settings.iTabIndex,\n\t\t\t\t\t\t\t\t'class': 'page-link'\n\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\n\t\t\t\t\tcounter++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t// inside an iframe or frame. \n\tvar activeEl;\n\n\ttry {\n\t\t// Because this approach is destroying and recreating the paging\n\t\t// elements, focus is lost on the select button which is bad for\n\t\t// accessibility. So we want to restore focus once the draw has\n\t\t// completed\n\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t}\n\tcatch (e) {}\n\n\tattach(\n\t\t$(host).empty().html('<ul class=\"pagination\"/>').children('ul'),\n\t\tbuttons\n\t);\n\n\tif ( activeEl ) {\n\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t}\n};\n\n\nreturn DataTable;\n}));"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.foundation.js",
    "content": "/*! DataTables Foundation integration\n * ©2011-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for Foundation. This requires Foundation 5 and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using Foundation. See http://datatables.net/manual/styling/foundation\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n// Detect Foundation 5 / 6 as they have different element and class requirements\nvar meta = $('<meta class=\"foundation-mq\"/>').appendTo('head');\nDataTable.ext.foundationVersion = meta.css('font-family').match(/small|medium|large/) ? 6 : 5;\nmeta.remove();\n\n\n$.extend( DataTable.ext.classes, {\n\tsWrapper:    \"dataTables_wrapper dt-foundation\",\n\tsProcessing: \"dataTables_processing panel\"\n} );\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'row'<'small-6 columns'l><'small-6 columns'f>r>\"+\n\t\t\"t\"+\n\t\t\"<'row'<'small-6 columns'i><'small-6 columns'p>>\",\n\trenderer: 'foundation'\n} );\n\n\n/* Page button renderer */\nDataTable.ext.renderer.pageButton.foundation = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass;\n\tvar tag;\n\tvar v5 = DataTable.ext.foundationVersion === 5;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('unavailable') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tbtnClass = '';\n\t\t\t\ttag = null;\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '&#x2026;';\n\t\t\t\t\t\tbtnClass = 'unavailable disabled';\n\t\t\t\t\t\ttag = null;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' unavailable disabled');\n\t\t\t\t\t\ttag = page > 0 ? 'a' : null;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' unavailable disabled');\n\t\t\t\t\t\ttag = page > 0 ? 'a' : null;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' unavailable disabled');\n\t\t\t\t\t\ttag = page < pages-1 ? 'a' : null;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' unavailable disabled');\n\t\t\t\t\t\ttag = page < pages-1 ? 'a' : null;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t'current' : '';\n\t\t\t\t\t\ttag = page === button ?\n\t\t\t\t\t\t\tnull : 'a';\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif ( v5 ) {\n\t\t\t\t\ttag = 'a';\n\t\t\t\t}\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<li>', {\n\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t'tabindex': settings.iTabIndex,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.append( tag ?\n\t\t\t\t\t\t\t$('<'+tag+'/>', {'href': '#'} ).html( btnDisplay ) :\n\t\t\t\t\t\t\tbtnDisplay\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\tattach(\n\t\t$(host).empty().html('<ul class=\"pagination\"/>').children('ul'),\n\t\tbuttons\n\t);\n};\n\n\nreturn DataTable;\n}));\n"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.jqueryui.js",
    "content": "/*! DataTables jQuery UI integration\n * ©2011-2014 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for jQuery UI. This requires jQuery UI and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using jQuery UI. See http://datatables.net/manual/styling/jqueryui\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\nvar sort_prefix = 'css_right ui-icon ui-icon-';\nvar toolbar_prefix = 'fg-toolbar ui-toolbar ui-widget-header ui-helper-clearfix ui-corner-';\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t'<\"'+toolbar_prefix+'tl ui-corner-tr\"lfr>'+\n\t\t't'+\n\t\t'<\"'+toolbar_prefix+'bl ui-corner-br\"ip>',\n\trenderer: 'jqueryui'\n} );\n\n\n$.extend( DataTable.ext.classes, {\n\t\"sWrapper\":            \"dataTables_wrapper dt-jqueryui\",\n\n\t/* Full numbers paging buttons */\n\t\"sPageButton\":         \"fg-button ui-button ui-state-default\",\n\t\"sPageButtonActive\":   \"ui-state-disabled\",\n\t\"sPageButtonDisabled\": \"ui-state-disabled\",\n\n\t/* Features */\n\t\"sPaging\": \"dataTables_paginate fg-buttonset ui-buttonset fg-buttonset-multi \"+\n\t\t\"ui-buttonset-multi paging_\", /* Note that the type is postfixed */\n\n\t/* Sorting */\n\t\"sSortAsc\":            \"ui-state-default sorting_asc\",\n\t\"sSortDesc\":           \"ui-state-default sorting_desc\",\n\t\"sSortable\":           \"ui-state-default sorting\",\n\t\"sSortableAsc\":        \"ui-state-default sorting_asc_disabled\",\n\t\"sSortableDesc\":       \"ui-state-default sorting_desc_disabled\",\n\t\"sSortableNone\":       \"ui-state-default sorting_disabled\",\n\t\"sSortIcon\":           \"DataTables_sort_icon\",\n\n\t/* Scrolling */\n\t\"sScrollHead\": \"dataTables_scrollHead \"+\"ui-state-default\",\n\t\"sScrollFoot\": \"dataTables_scrollFoot \"+\"ui-state-default\",\n\n\t/* Misc */\n\t\"sHeaderTH\":  \"ui-state-default\",\n\t\"sFooterTH\":  \"ui-state-default\"\n} );\n\n\nDataTable.ext.renderer.header.jqueryui = function ( settings, cell, column, classes ) {\n\t// Calculate what the unsorted class should be\n\tvar noSortAppliedClass = sort_prefix+'carat-2-n-s';\n\tvar asc = $.inArray('asc', column.asSorting) !== -1;\n\tvar desc = $.inArray('desc', column.asSorting) !== -1;\n\n\tif ( !column.bSortable || (!asc && !desc) ) {\n\t\tnoSortAppliedClass = '';\n\t}\n\telse if ( asc && !desc ) {\n\t\tnoSortAppliedClass = sort_prefix+'carat-1-n';\n\t}\n\telse if ( !asc && desc ) {\n\t\tnoSortAppliedClass = sort_prefix+'carat-1-s';\n\t}\n\n\t// Setup the DOM structure\n\t$('<div/>')\n\t\t.addClass( 'DataTables_sort_wrapper' )\n\t\t.append( cell.contents() )\n\t\t.append( $('<span/>')\n\t\t\t.addClass( classes.sSortIcon+' '+noSortAppliedClass )\n\t\t)\n\t\t.appendTo( cell );\n\n\t// Attach a sort listener to update on sort\n\t$(settings.nTable).on( 'order.dt', function ( e, ctx, sorting, columns ) {\n\t\tif ( settings !== ctx ) {\n\t\t\treturn;\n\t\t}\n\n\t\tvar colIdx = column.idx;\n\n\t\tcell\n\t\t\t.removeClass( classes.sSortAsc +\" \"+classes.sSortDesc )\n\t\t\t.addClass( columns[ colIdx ] == 'asc' ?\n\t\t\t\tclasses.sSortAsc : columns[ colIdx ] == 'desc' ?\n\t\t\t\t\tclasses.sSortDesc :\n\t\t\t\t\tcolumn.sSortingClass\n\t\t\t);\n\n\t\tcell\n\t\t\t.find( 'span.'+classes.sSortIcon )\n\t\t\t.removeClass(\n\t\t\t\tsort_prefix+'triangle-1-n' +\" \"+\n\t\t\t\tsort_prefix+'triangle-1-s' +\" \"+\n\t\t\t\tsort_prefix+'carat-2-n-s' +\" \"+\n\t\t\t\tsort_prefix+'carat-1-n' +\" \"+\n\t\t\t\tsort_prefix+'carat-1-s'\n\t\t\t)\n\t\t\t.addClass( columns[ colIdx ] == 'asc' ?\n\t\t\t\tsort_prefix+'triangle-1-n' : columns[ colIdx ] == 'desc' ?\n\t\t\t\t\tsort_prefix+'triangle-1-s' :\n\t\t\t\t\tnoSortAppliedClass\n\t\t\t);\n\t} );\n};\n\n\n/*\n * TableTools jQuery UI compatibility\n * Required TableTools 2.1+\n */\nif ( DataTable.TableTools ) {\n\t$.extend( true, DataTable.TableTools.classes, {\n\t\t\"container\": \"DTTT_container ui-buttonset ui-buttonset-multi\",\n\t\t\"buttons\": {\n\t\t\t\"normal\": \"DTTT_button ui-button ui-state-default\"\n\t\t},\n\t\t\"collection\": {\n\t\t\t\"container\": \"DTTT_collection ui-buttonset ui-buttonset-multi\"\n\t\t}\n\t} );\n}\n\n\nreturn DataTable;\n}));\n"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.material.js",
    "content": "/*! DataTables Bootstrap 3 integration\n * ©2011-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for Bootstrap 3. This requires Bootstrap 3 and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using Bootstrap. See http://datatables.net/manual/styling/bootstrap\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t// Require DataTables, which attaches to jQuery, including\n\t\t\t\t// jQuery if needed and have a $ property so we can access the\n\t\t\t\t// jQuery object that is used\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'mdl-grid'\"+\n\t\t\t\"<'mdl-cell mdl-cell--6-col'l>\"+\n\t\t\t\"<'mdl-cell mdl-cell--6-col'f>\"+\n\t\t\">\"+\n\t\t\"<'mdl-grid dt-table'\"+\n\t\t\t\"<'mdl-cell mdl-cell--12-col'tr>\"+\n\t\t\">\"+\n\t\t\"<'mdl-grid'\"+\n\t\t\t\"<'mdl-cell mdl-cell--4-col'i>\"+\n\t\t\t\"<'mdl-cell mdl-cell--8-col'p>\"+\n\t\t\">\",\n\trenderer: 'material'\n} );\n\n\n/* Default class modification */\n$.extend( DataTable.ext.classes, {\n\tsWrapper:      \"dataTables_wrapper form-inline dt-material\",\n\tsFilterInput:  \"form-control input-sm\",\n\tsLengthSelect: \"form-control input-sm\",\n\tsProcessing:   \"dataTables_processing panel panel-default\"\n} );\n\n\n/* Bootstrap paging button renderer */\nDataTable.ext.renderer.pageButton.material = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api     = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang    = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass, counter=0;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button, disabled, active;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tactive = false;\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '&#x2026;';\n\t\t\t\t\t\tbtnClass = 'disabled';\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = '';\n\t\t\t\t\t\tactive = page === button;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif ( active ) {\n\t\t\t\t\tbtnClass += ' mdl-button--raised mdl-button--colored';\n\t\t\t\t}\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<button>', {\n\t\t\t\t\t\t\t'class': 'mdl-button '+btnClass,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull,\n\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t'tabindex': settings.iTabIndex,\n\t\t\t\t\t\t\t'disabled': btnClass.indexOf('disabled') !== -1\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\n\t\t\t\t\tcounter++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t// inside an iframe or frame. \n\tvar activeEl;\n\n\ttry {\n\t\t// Because this approach is destroying and recreating the paging\n\t\t// elements, focus is lost on the select button which is bad for\n\t\t// accessibility. So we want to restore focus once the draw has\n\t\t// completed\n\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t}\n\tcatch (e) {}\n\n\tattach(\n\t\t$(host).empty().html('<div class=\"pagination\"/>').children(),\n\t\tbuttons\n\t);\n\n\tif ( activeEl ) {\n\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t}\n};\n\n\nreturn DataTable;\n}));"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.semanticui.js",
    "content": "/*! DataTables Bootstrap 3 integration\n * ©2011-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * DataTables integration for Bootstrap 3. This requires Bootstrap 3 and\n * DataTables 1.10 or newer.\n *\n * This file sets the defaults and adds options to DataTables to style its\n * controls using Bootstrap. See http://datatables.net/manual/styling/bootstrap\n * for further information.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t// Require DataTables, which attaches to jQuery, including\n\t\t\t\t// jQuery if needed and have a $ property so we can access the\n\t\t\t\t// jQuery object that is used\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'ui grid'\"+\n\t\t\t\"<'row'\"+\n\t\t\t\t\"<'eight wide column'l>\"+\n\t\t\t\t\"<'right aligned eight wide column'f>\"+\n\t\t\t\">\"+\n\t\t\t\"<'row dt-table'\"+\n\t\t\t\t\"<'sixteen wide column'tr>\"+\n\t\t\t\">\"+\n\t\t\t\"<'row'\"+\n\t\t\t\t\"<'seven wide column'i>\"+\n\t\t\t\t\"<'right aligned nine wide column'p>\"+\n\t\t\t\">\"+\n\t\t\">\",\n\trenderer: 'semanticUI'\n} );\n\n\n/* Default class modification */\n$.extend( DataTable.ext.classes, {\n\tsWrapper:      \"dataTables_wrapper dt-semanticUI\",\n\tsFilter:       \"dataTables_filter ui input\",\n\tsProcessing:   \"dataTables_processing ui segment\",\n\tsPageButton:   \"paginate_button item\"\n} );\n\n\n/* Bootstrap paging button renderer */\nDataTable.ext.renderer.pageButton.semanticUI = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api     = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang    = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass, counter=0;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tbtnClass = '';\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '&#x2026;';\n\t\t\t\t\t\tbtnClass = 'disabled';\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t'active' : '';\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tvar tag = btnClass.indexOf( 'disabled' ) === -1 ?\n\t\t\t\t\t'a' :\n\t\t\t\t\t'div';\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<'+tag+'>', {\n\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull,\n\t\t\t\t\t\t\t'href': '#',\n\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t'tabindex': settings.iTabIndex\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\n\t\t\t\t\tcounter++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t// inside an iframe or frame. \n\tvar activeEl;\n\n\ttry {\n\t\t// Because this approach is destroying and recreating the paging\n\t\t// elements, focus is lost on the select button which is bad for\n\t\t// accessibility. So we want to restore focus once the draw has\n\t\t// completed\n\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t}\n\tcatch (e) {}\n\n\tattach(\n\t\t$(host).empty().html('<div class=\"ui pagination menu\"/>').children(),\n\t\tbuttons\n\t);\n\n\tif ( activeEl ) {\n\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t}\n};\n\n\n// Javascript enhancements on table initialisation\n$(document).on( 'init.dt', function (e, ctx) {\n\tif ( e.namespace !== 'dt' ) {\n\t\treturn;\n\t}\n\n\t// Length menu drop down\n\tif ( $.fn.dropdown ) {\n\t\tvar api = new $.fn.dataTable.Api( ctx );\n\n\t\t$( 'div.dataTables_length select', api.table().container() ).dropdown();\n\t}\n} );\n\n\nreturn DataTable;\n}));"
  },
  {
    "path": "web_gui/gui_v3/js/dataTables.uikit.js",
    "content": "/*! DataTables UIkit 3 integration\n */\n\n/**\n * This is a tech preview of UIKit integration with DataTables.\n */\n(function( factory ){\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery', 'datatables.net'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ || ! $.fn.dataTable ) {\n\t\t\t\t// Require DataTables, which attaches to jQuery, including\n\t\t\t\t// jQuery if needed and have a $ property so we can access the\n\t\t\t\t// jQuery object that is used\n\t\t\t\t$ = require('datatables.net')(root, $).$;\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}(function( $, window, document, undefined ) {\n'use strict';\nvar DataTable = $.fn.dataTable;\n\n\n/* Set the defaults for DataTables initialisation */\n$.extend( true, DataTable.defaults, {\n\tdom:\n\t\t\"<'row uk-grid'<'uk-width-1-2'l><'uk-width-1-2'f>>\" +\n\t\t\"<'row uk-grid dt-merge-grid'<'uk-width-1-1'tr>>\" +\n\t\t\"<'row uk-grid dt-merge-grid'<'uk-width-2-5'i><'uk-width-3-5'p>>\",\n\trenderer: 'uikit'\n} );\n\n\n/* Default class modification */\n$.extend( DataTable.ext.classes, {\n\tsWrapper:      \"dataTables_wrapper uk-form dt-uikit\",\n\tsFilterInput:  \"uk-form-small\",\n\tsLengthSelect: \"uk-form-small\",\n\tsProcessing:   \"dataTables_processing uk-panel\"\n} );\n\n\n/* UIkit paging button renderer */\nDataTable.ext.renderer.pageButton.uikit = function ( settings, host, idx, buttons, page, pages ) {\n\tvar api     = new DataTable.Api( settings );\n\tvar classes = settings.oClasses;\n\tvar lang    = settings.oLanguage.oPaginate;\n\tvar aria = settings.oLanguage.oAria.paginate || {};\n\tvar btnDisplay, btnClass, counter=0;\n\n\tvar attach = function( container, buttons ) {\n\t\tvar i, ien, node, button;\n\t\tvar clickHandler = function ( e ) {\n\t\t\te.preventDefault();\n\t\t\tif ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {\n\t\t\t\tapi.page( e.data.action ).draw( 'page' );\n\t\t\t}\n\t\t};\n\n\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\tbutton = buttons[i];\n\n\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\tattach( container, button );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbtnDisplay = '';\n\t\t\t\tbtnClass = '';\n\n\t\t\t\tswitch ( button ) {\n\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\tbtnDisplay = '<i class=\"uk-icon-ellipsis-h\"></i>';\n\t\t\t\t\t\tbtnClass = 'uk-disabled disabled';\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'first':\n\t\t\t\t\t\tbtnDisplay = '<i class=\"uk-icon-angle-double-left\"></i> ' + lang.sFirst;\n\t\t\t\t\t\tbtnClass = (page > 0 ?\n\t\t\t\t\t\t\t'' : ' uk-disabled disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\tbtnDisplay = '<i class=\"uk-icon-angle-left\"></i> ' + lang.sPrevious;\n\t\t\t\t\t\tbtnClass = (page > 0 ?\n\t\t\t\t\t\t\t'' : 'uk-disabled disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'next':\n\t\t\t\t\t\tbtnDisplay = lang.sNext + ' <i class=\"uk-icon-angle-right\"></i>';\n\t\t\t\t\t\tbtnClass = (page < pages-1 ?\n\t\t\t\t\t\t\t'' : 'uk-disabled disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase 'last':\n\t\t\t\t\t\tbtnDisplay = lang.sLast + ' <i class=\"uk-icon-angle-double-right\"></i>';\n\t\t\t\t\t\tbtnClass = (page < pages-1 ?\n\t\t\t\t\t\t\t'' : ' uk-disabled disabled');\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t'uk-active' : '';\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif ( btnDisplay ) {\n\t\t\t\t\tnode = $('<li>', {\n\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\tnull\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.append( $(( -1 != btnClass.indexOf('disabled') || -1 != btnClass.indexOf('active') ) ? '<span>' : '<a>', {\n\t\t\t\t\t\t\t\t'href': '#',\n\t\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t\t'tabindex': settings.iTabIndex\n\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.appendTo( container );\n\n\t\t\t\t\tsettings.oApi._fnBindAction(\n\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t);\n\n\t\t\t\t\tcounter++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t// inside an iframe or frame. \n\tvar activeEl;\n\n\ttry {\n\t\t// Because this approach is destroying and recreating the paging\n\t\t// elements, focus is lost on the select button which is bad for\n\t\t// accessibility. So we want to restore focus once the draw has\n\t\t// completed\n\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t}\n\tcatch (e) {}\n\n\tattach(\n\t\t$(host).empty().html('<ul class=\"uk-pagination uk-pagination-right\"/>').children('ul'),\n\t\tbuttons\n\t);\n\n\tif ( activeEl ) {\n\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t}\n};\n\n\nreturn DataTable;\n}));"
  },
  {
    "path": "web_gui/gui_v3/js/filesize.dataTables.js",
    "content": "jQuery.fn.dataTable.ext.type.order['file-size-pre'] = function ( data ) {\n    var matches = data.match( /^(\\d+(?:\\.\\d+)?)\\s*([a-z]+)/i );\n    var multipliers = {\n        b:  1,\n        kb: 1000,\n        kib: 1024,\n        mb: 1000000,\n        mib: 1048576,\n        gb: 1000000000,\n        gib: 1073741824,\n        tb: 1000000000000,\n        tib: 1099511627776,\n        pb: 1000000000000000,\n        pib: 1125899906842624\n    };\n\n    if (matches) {\n        var multiplier = multipliers[matches[2].toLowerCase()];\n        return parseFloat( matches[1] ) * multiplier;\n    } else {\n        return -1;\n    };\n};\n"
  },
  {
    "path": "web_gui/gui_v3/js/jquery.dataTables.js",
    "content": "/*! DataTables 1.10.12\n * ©2008-2015 SpryMedia Ltd - datatables.net/license\n */\n\n/**\n * @summary     DataTables\n * @description Paginate, search and order HTML tables\n * @version     1.10.12\n * @file        jquery.dataTables.js\n * @author      SpryMedia Ltd (www.sprymedia.co.uk)\n * @contact     www.sprymedia.co.uk/contact\n * @copyright   Copyright 2008-2015 SpryMedia Ltd.\n *\n * This source file is free software, available under the following license:\n *   MIT license - http://datatables.net/license\n *\n * This source file is distributed in the hope that it will be useful, but\n * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n * or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.\n *\n * For details please refer to: http://www.datatables.net\n */\n\n/*jslint evil: true, undef: true, browser: true */\n/*globals $,require,jQuery,define,_selector_run,_selector_opts,_selector_first,_selector_row_indexes,_ext,_Api,_api_register,_api_registerPlural,_re_new_lines,_re_html,_re_formatted_numeric,_re_escape_regex,_empty,_intVal,_numToDecimal,_isNumber,_isHtml,_htmlNumeric,_pluck,_pluck_order,_range,_stripHtml,_unique,_fnBuildAjax,_fnAjaxUpdate,_fnAjaxParameters,_fnAjaxUpdateDraw,_fnAjaxDataSrc,_fnAddColumn,_fnColumnOptions,_fnAdjustColumnSizing,_fnVisibleToColumnIndex,_fnColumnIndexToVisible,_fnVisbleColumns,_fnGetColumns,_fnColumnTypes,_fnApplyColumnDefs,_fnHungarianMap,_fnCamelToHungarian,_fnLanguageCompat,_fnBrowserDetect,_fnAddData,_fnAddTr,_fnNodeToDataIndex,_fnNodeToColumnIndex,_fnGetCellData,_fnSetCellData,_fnSplitObjNotation,_fnGetObjectDataFn,_fnSetObjectDataFn,_fnGetDataMaster,_fnClearTable,_fnDeleteIndex,_fnInvalidate,_fnGetRowElements,_fnCreateTr,_fnBuildHead,_fnDrawHead,_fnDraw,_fnReDraw,_fnAddOptionsHtml,_fnDetectHeader,_fnGetUniqueThs,_fnFeatureHtmlFilter,_fnFilterComplete,_fnFilterCustom,_fnFilterColumn,_fnFilter,_fnFilterCreateSearch,_fnEscapeRegex,_fnFilterData,_fnFeatureHtmlInfo,_fnUpdateInfo,_fnInfoMacros,_fnInitialise,_fnInitComplete,_fnLengthChange,_fnFeatureHtmlLength,_fnFeatureHtmlPaginate,_fnPageChange,_fnFeatureHtmlProcessing,_fnProcessingDisplay,_fnFeatureHtmlTable,_fnScrollDraw,_fnApplyToChildren,_fnCalculateColumnWidths,_fnThrottle,_fnConvertToWidth,_fnGetWidestNode,_fnGetMaxLenString,_fnStringToCss,_fnSortFlatten,_fnSort,_fnSortAria,_fnSortListener,_fnSortAttachListener,_fnSortingClasses,_fnSortData,_fnSaveState,_fnLoadState,_fnSettingsFromNode,_fnLog,_fnMap,_fnBindAction,_fnCallbackReg,_fnCallbackFire,_fnLengthOverflow,_fnRenderer,_fnDataSource,_fnRowAttributes*/\n\n(function( factory ) {\n\t\"use strict\";\n\n\tif ( typeof define === 'function' && define.amd ) {\n\t\t// AMD\n\t\tdefine( ['jquery'], function ( $ ) {\n\t\t\treturn factory( $, window, document );\n\t\t} );\n\t}\n\telse if ( typeof exports === 'object' ) {\n\t\t// CommonJS\n\t\tmodule.exports = function (root, $) {\n\t\t\tif ( ! root ) {\n\t\t\t\t// CommonJS environments without a window global must pass a\n\t\t\t\t// root. This will give an error otherwise\n\t\t\t\troot = window;\n\t\t\t}\n\n\t\t\tif ( ! $ ) {\n\t\t\t\t$ = typeof window !== 'undefined' ? // jQuery's factory checks for a global window\n\t\t\t\t\trequire('jquery') :\n\t\t\t\t\trequire('jquery')( root );\n\t\t\t}\n\n\t\t\treturn factory( $, root, root.document );\n\t\t};\n\t}\n\telse {\n\t\t// Browser\n\t\tfactory( jQuery, window, document );\n\t}\n}\n(function( $, window, document, undefined ) {\n\t\"use strict\";\n\n\t/**\n\t * DataTables is a plug-in for the jQuery Javascript library. It is a highly\n\t * flexible tool, based upon the foundations of progressive enhancement,\n\t * which will add advanced interaction controls to any HTML table. For a\n\t * full list of features please refer to\n\t * [DataTables.net](href=\"http://datatables.net).\n\t *\n\t * Note that the `DataTable` object is not a global variable but is aliased\n\t * to `jQuery.fn.DataTable` and `jQuery.fn.dataTable` through which it may\n\t * be  accessed.\n\t *\n\t *  @class\n\t *  @param {object} [init={}] Configuration object for DataTables. Options\n\t *    are defined by {@link DataTable.defaults}\n\t *  @requires jQuery 1.7+\n\t *\n\t *  @example\n\t *    // Basic initialisation\n\t *    $(document).ready( function {\n\t *      $('#example').dataTable();\n\t *    } );\n\t *\n\t *  @example\n\t *    // Initialisation with configuration options - in this case, disable\n\t *    // pagination and sorting.\n\t *    $(document).ready( function {\n\t *      $('#example').dataTable( {\n\t *        \"paginate\": false,\n\t *        \"sort\": false\n\t *      } );\n\t *    } );\n\t */\n\tvar DataTable = function ( options )\n\t{\n\t\t/**\n\t\t * Perform a jQuery selector action on the table's TR elements (from the tbody) and\n\t\t * return the resulting jQuery object.\n\t\t *  @param {string|node|jQuery} sSelector jQuery selector or node collection to act on\n\t\t *  @param {object} [oOpts] Optional parameters for modifying the rows to be included\n\t\t *  @param {string} [oOpts.filter=none] Select TR elements that meet the current filter\n\t\t *    criterion (\"applied\") or all TR elements (i.e. no filter).\n\t\t *  @param {string} [oOpts.order=current] Order of the TR elements in the processed array.\n\t\t *    Can be either 'current', whereby the current sorting of the table is used, or\n\t\t *    'original' whereby the original order the data was read into the table is used.\n\t\t *  @param {string} [oOpts.page=all] Limit the selection to the currently displayed page\n\t\t *    (\"current\") or not (\"all\"). If 'current' is given, then order is assumed to be\n\t\t *    'current' and filter is 'applied', regardless of what they might be given as.\n\t\t *  @returns {object} jQuery object, filtered by the given selector.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Highlight every second row\n\t\t *      oTable.$('tr:odd').css('backgroundColor', 'blue');\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Filter to rows with 'Webkit' in them, add a background colour and then\n\t\t *      // remove the filter, thus highlighting the 'Webkit' rows only.\n\t\t *      oTable.fnFilter('Webkit');\n\t\t *      oTable.$('tr', {\"search\": \"applied\"}).css('backgroundColor', 'blue');\n\t\t *      oTable.fnFilter('');\n\t\t *    } );\n\t\t */\n\t\tthis.$ = function ( sSelector, oOpts )\n\t\t{\n\t\t\treturn this.api(true).$( sSelector, oOpts );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Almost identical to $ in operation, but in this case returns the data for the matched\n\t\t * rows - as such, the jQuery selector used should match TR row nodes or TD/TH cell nodes\n\t\t * rather than any descendants, so the data can be obtained for the row/cell. If matching\n\t\t * rows are found, the data returned is the original data array/object that was used to\n\t\t * create the row (or a generated array if from a DOM source).\n\t\t *\n\t\t * This method is often useful in-combination with $ where both functions are given the\n\t\t * same parameters and the array indexes will match identically.\n\t\t *  @param {string|node|jQuery} sSelector jQuery selector or node collection to act on\n\t\t *  @param {object} [oOpts] Optional parameters for modifying the rows to be included\n\t\t *  @param {string} [oOpts.filter=none] Select elements that meet the current filter\n\t\t *    criterion (\"applied\") or all elements (i.e. no filter).\n\t\t *  @param {string} [oOpts.order=current] Order of the data in the processed array.\n\t\t *    Can be either 'current', whereby the current sorting of the table is used, or\n\t\t *    'original' whereby the original order the data was read into the table is used.\n\t\t *  @param {string} [oOpts.page=all] Limit the selection to the currently displayed page\n\t\t *    (\"current\") or not (\"all\"). If 'current' is given, then order is assumed to be\n\t\t *    'current' and filter is 'applied', regardless of what they might be given as.\n\t\t *  @returns {array} Data for the matched elements. If any elements, as a result of the\n\t\t *    selector, were not TR, TD or TH elements in the DataTable, they will have a null\n\t\t *    entry in the array.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Get the data from the first row in the table\n\t\t *      var data = oTable._('tr:first');\n\t\t *\n\t\t *      // Do something useful with the data\n\t\t *      alert( \"First cell is: \"+data[0] );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Filter to 'Webkit' and get all data for\n\t\t *      oTable.fnFilter('Webkit');\n\t\t *      var data = oTable._('tr', {\"search\": \"applied\"});\n\t\t *\n\t\t *      // Do something with the data\n\t\t *      alert( data.length+\" rows matched the search\" );\n\t\t *    } );\n\t\t */\n\t\tthis._ = function ( sSelector, oOpts )\n\t\t{\n\t\t\treturn this.api(true).rows( sSelector, oOpts ).data();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Create a DataTables Api instance, with the currently selected tables for\n\t\t * the Api's context.\n\t\t * @param {boolean} [traditional=false] Set the API instance's context to be\n\t\t *   only the table referred to by the `DataTable.ext.iApiIndex` option, as was\n\t\t *   used in the API presented by DataTables 1.9- (i.e. the traditional mode),\n\t\t *   or if all tables captured in the jQuery object should be used.\n\t\t * @return {DataTables.Api}\n\t\t */\n\t\tthis.api = function ( traditional )\n\t\t{\n\t\t\treturn traditional ?\n\t\t\t\tnew _Api(\n\t\t\t\t\t_fnSettingsFromNode( this[ _ext.iApiIndex ] )\n\t\t\t\t) :\n\t\t\t\tnew _Api( this );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Add a single new row or multiple rows of data to the table. Please note\n\t\t * that this is suitable for client-side processing only - if you are using\n\t\t * server-side processing (i.e. \"bServerSide\": true), then to add data, you\n\t\t * must add it to the data source, i.e. the server-side, through an Ajax call.\n\t\t *  @param {array|object} data The data to be added to the table. This can be:\n\t\t *    <ul>\n\t\t *      <li>1D array of data - add a single row with the data provided</li>\n\t\t *      <li>2D array of arrays - add multiple rows in a single call</li>\n\t\t *      <li>object - data object when using <i>mData</i></li>\n\t\t *      <li>array of objects - multiple data objects when using <i>mData</i></li>\n\t\t *    </ul>\n\t\t *  @param {bool} [redraw=true] redraw the table or not\n\t\t *  @returns {array} An array of integers, representing the list of indexes in\n\t\t *    <i>aoData</i> ({@link DataTable.models.oSettings}) that have been added to\n\t\t *    the table.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    // Global var for counter\n\t\t *    var giCount = 2;\n\t\t *\n\t\t *    $(document).ready(function() {\n\t\t *      $('#example').dataTable();\n\t\t *    } );\n\t\t *\n\t\t *    function fnClickAddRow() {\n\t\t *      $('#example').dataTable().fnAddData( [\n\t\t *        giCount+\".1\",\n\t\t *        giCount+\".2\",\n\t\t *        giCount+\".3\",\n\t\t *        giCount+\".4\" ]\n\t\t *      );\n\t\t *\n\t\t *      giCount++;\n\t\t *    }\n\t\t */\n\t\tthis.fnAddData = function( data, redraw )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\n\t\t\t/* Check if we want to add multiple rows or not */\n\t\t\tvar rows = $.isArray(data) && ( $.isArray(data[0]) || $.isPlainObject(data[0]) ) ?\n\t\t\t\tapi.rows.add( data ) :\n\t\t\t\tapi.row.add( data );\n\t\t\n\t\t\tif ( redraw === undefined || redraw ) {\n\t\t\t\tapi.draw();\n\t\t\t}\n\t\t\n\t\t\treturn rows.flatten().toArray();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * This function will make DataTables recalculate the column sizes, based on the data\n\t\t * contained in the table and the sizes applied to the columns (in the DOM, CSS or\n\t\t * through the sWidth parameter). This can be useful when the width of the table's\n\t\t * parent element changes (for example a window resize).\n\t\t *  @param {boolean} [bRedraw=true] Redraw the table or not, you will typically want to\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable( {\n\t\t *        \"sScrollY\": \"200px\",\n\t\t *        \"bPaginate\": false\n\t\t *      } );\n\t\t *\n\t\t *      $(window).bind('resize', function () {\n\t\t *        oTable.fnAdjustColumnSizing();\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\tthis.fnAdjustColumnSizing = function ( bRedraw )\n\t\t{\n\t\t\tvar api = this.api( true ).columns.adjust();\n\t\t\tvar settings = api.settings()[0];\n\t\t\tvar scroll = settings.oScroll;\n\t\t\n\t\t\tif ( bRedraw === undefined || bRedraw ) {\n\t\t\t\tapi.draw( false );\n\t\t\t}\n\t\t\telse if ( scroll.sX !== \"\" || scroll.sY !== \"\" ) {\n\t\t\t\t/* If not redrawing, but scrolling, we want to apply the new column sizes anyway */\n\t\t\t\t_fnScrollDraw( settings );\n\t\t\t}\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Quickly and simply clear a table\n\t\t *  @param {bool} [bRedraw=true] redraw the table or not\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Immediately 'nuke' the current rows (perhaps waiting for an Ajax callback...)\n\t\t *      oTable.fnClearTable();\n\t\t *    } );\n\t\t */\n\t\tthis.fnClearTable = function( bRedraw )\n\t\t{\n\t\t\tvar api = this.api( true ).clear();\n\t\t\n\t\t\tif ( bRedraw === undefined || bRedraw ) {\n\t\t\t\tapi.draw();\n\t\t\t}\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * The exact opposite of 'opening' a row, this function will close any rows which\n\t\t * are currently 'open'.\n\t\t *  @param {node} nTr the table row to 'close'\n\t\t *  @returns {int} 0 on success, or 1 if failed (can't find the row)\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable;\n\t\t *\n\t\t *      // 'open' an information row when a row is clicked on\n\t\t *      $('#example tbody tr').click( function () {\n\t\t *        if ( oTable.fnIsOpen(this) ) {\n\t\t *          oTable.fnClose( this );\n\t\t *        } else {\n\t\t *          oTable.fnOpen( this, \"Temporary row opened\", \"info_row\" );\n\t\t *        }\n\t\t *      } );\n\t\t *\n\t\t *      oTable = $('#example').dataTable();\n\t\t *    } );\n\t\t */\n\t\tthis.fnClose = function( nTr )\n\t\t{\n\t\t\tthis.api( true ).row( nTr ).child.hide();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Remove a row for the table\n\t\t *  @param {mixed} target The index of the row from aoData to be deleted, or\n\t\t *    the TR element you want to delete\n\t\t *  @param {function|null} [callBack] Callback function\n\t\t *  @param {bool} [redraw=true] Redraw the table or not\n\t\t *  @returns {array} The row that was deleted\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Immediately remove the first row\n\t\t *      oTable.fnDeleteRow( 0 );\n\t\t *    } );\n\t\t */\n\t\tthis.fnDeleteRow = function( target, callback, redraw )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\tvar rows = api.rows( target );\n\t\t\tvar settings = rows.settings()[0];\n\t\t\tvar data = settings.aoData[ rows[0][0] ];\n\t\t\n\t\t\trows.remove();\n\t\t\n\t\t\tif ( callback ) {\n\t\t\t\tcallback.call( this, settings, data );\n\t\t\t}\n\t\t\n\t\t\tif ( redraw === undefined || redraw ) {\n\t\t\t\tapi.draw();\n\t\t\t}\n\t\t\n\t\t\treturn data;\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Restore the table to it's original state in the DOM by removing all of DataTables\n\t\t * enhancements, alterations to the DOM structure of the table and event listeners.\n\t\t *  @param {boolean} [remove=false] Completely remove the table from the DOM\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      // This example is fairly pointless in reality, but shows how fnDestroy can be used\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *      oTable.fnDestroy();\n\t\t *    } );\n\t\t */\n\t\tthis.fnDestroy = function ( remove )\n\t\t{\n\t\t\tthis.api( true ).destroy( remove );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Redraw the table\n\t\t *  @param {bool} [complete=true] Re-filter and resort (if enabled) the table before the draw.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Re-draw the table - you wouldn't want to do it here, but it's an example :-)\n\t\t *      oTable.fnDraw();\n\t\t *    } );\n\t\t */\n\t\tthis.fnDraw = function( complete )\n\t\t{\n\t\t\t// Note that this isn't an exact match to the old call to _fnDraw - it takes\n\t\t\t// into account the new data, but can hold position.\n\t\t\tthis.api( true ).draw( complete );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Filter the input based on data\n\t\t *  @param {string} sInput String to filter the table on\n\t\t *  @param {int|null} [iColumn] Column to limit filtering to\n\t\t *  @param {bool} [bRegex=false] Treat as regular expression or not\n\t\t *  @param {bool} [bSmart=true] Perform smart filtering or not\n\t\t *  @param {bool} [bShowGlobal=true] Show the input global filter in it's input box(es)\n\t\t *  @param {bool} [bCaseInsensitive=true] Do case-insensitive matching (true) or not (false)\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Sometime later - filter...\n\t\t *      oTable.fnFilter( 'test string' );\n\t\t *    } );\n\t\t */\n\t\tthis.fnFilter = function( sInput, iColumn, bRegex, bSmart, bShowGlobal, bCaseInsensitive )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\n\t\t\tif ( iColumn === null || iColumn === undefined ) {\n\t\t\t\tapi.search( sInput, bRegex, bSmart, bCaseInsensitive );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tapi.column( iColumn ).search( sInput, bRegex, bSmart, bCaseInsensitive );\n\t\t\t}\n\t\t\n\t\t\tapi.draw();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Get the data for the whole table, an individual row or an individual cell based on the\n\t\t * provided parameters.\n\t\t *  @param {int|node} [src] A TR row node, TD/TH cell node or an integer. If given as\n\t\t *    a TR node then the data source for the whole row will be returned. If given as a\n\t\t *    TD/TH cell node then iCol will be automatically calculated and the data for the\n\t\t *    cell returned. If given as an integer, then this is treated as the aoData internal\n\t\t *    data index for the row (see fnGetPosition) and the data for that row used.\n\t\t *  @param {int} [col] Optional column index that you want the data of.\n\t\t *  @returns {array|object|string} If mRow is undefined, then the data for all rows is\n\t\t *    returned. If mRow is defined, just data for that row, and is iCol is\n\t\t *    defined, only data for the designated cell is returned.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    // Row data\n\t\t *    $(document).ready(function() {\n\t\t *      oTable = $('#example').dataTable();\n\t\t *\n\t\t *      oTable.$('tr').click( function () {\n\t\t *        var data = oTable.fnGetData( this );\n\t\t *        // ... do something with the array / object of data for the row\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Individual cell data\n\t\t *    $(document).ready(function() {\n\t\t *      oTable = $('#example').dataTable();\n\t\t *\n\t\t *      oTable.$('td').click( function () {\n\t\t *        var sData = oTable.fnGetData( this );\n\t\t *        alert( 'The cell clicked on had the value of '+sData );\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\tthis.fnGetData = function( src, col )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\n\t\t\tif ( src !== undefined ) {\n\t\t\t\tvar type = src.nodeName ? src.nodeName.toLowerCase() : '';\n\t\t\n\t\t\t\treturn col !== undefined || type == 'td' || type == 'th' ?\n\t\t\t\t\tapi.cell( src, col ).data() :\n\t\t\t\t\tapi.row( src ).data() || null;\n\t\t\t}\n\t\t\n\t\t\treturn api.data().toArray();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Get an array of the TR nodes that are used in the table's body. Note that you will\n\t\t * typically want to use the '$' API method in preference to this as it is more\n\t\t * flexible.\n\t\t *  @param {int} [iRow] Optional row index for the TR element you want\n\t\t *  @returns {array|node} If iRow is undefined, returns an array of all TR elements\n\t\t *    in the table's body, or iRow is defined, just the TR element requested.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Get the nodes from the table\n\t\t *      var nNodes = oTable.fnGetNodes( );\n\t\t *    } );\n\t\t */\n\t\tthis.fnGetNodes = function( iRow )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\n\t\t\treturn iRow !== undefined ?\n\t\t\t\tapi.row( iRow ).node() :\n\t\t\t\tapi.rows().nodes().flatten().toArray();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Get the array indexes of a particular cell from it's DOM element\n\t\t * and column index including hidden columns\n\t\t *  @param {node} node this can either be a TR, TD or TH in the table's body\n\t\t *  @returns {int} If nNode is given as a TR, then a single index is returned, or\n\t\t *    if given as a cell, an array of [row index, column index (visible),\n\t\t *    column index (all)] is given.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      $('#example tbody td').click( function () {\n\t\t *        // Get the position of the current data from the node\n\t\t *        var aPos = oTable.fnGetPosition( this );\n\t\t *\n\t\t *        // Get the data array for this row\n\t\t *        var aData = oTable.fnGetData( aPos[0] );\n\t\t *\n\t\t *        // Update the data array and return the value\n\t\t *        aData[ aPos[1] ] = 'clicked';\n\t\t *        this.innerHTML = 'clicked';\n\t\t *      } );\n\t\t *\n\t\t *      // Init DataTables\n\t\t *      oTable = $('#example').dataTable();\n\t\t *    } );\n\t\t */\n\t\tthis.fnGetPosition = function( node )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\tvar nodeName = node.nodeName.toUpperCase();\n\t\t\n\t\t\tif ( nodeName == 'TR' ) {\n\t\t\t\treturn api.row( node ).index();\n\t\t\t}\n\t\t\telse if ( nodeName == 'TD' || nodeName == 'TH' ) {\n\t\t\t\tvar cell = api.cell( node ).index();\n\t\t\n\t\t\t\treturn [\n\t\t\t\t\tcell.row,\n\t\t\t\t\tcell.columnVisible,\n\t\t\t\t\tcell.column\n\t\t\t\t];\n\t\t\t}\n\t\t\treturn null;\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Check to see if a row is 'open' or not.\n\t\t *  @param {node} nTr the table row to check\n\t\t *  @returns {boolean} true if the row is currently open, false otherwise\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable;\n\t\t *\n\t\t *      // 'open' an information row when a row is clicked on\n\t\t *      $('#example tbody tr').click( function () {\n\t\t *        if ( oTable.fnIsOpen(this) ) {\n\t\t *          oTable.fnClose( this );\n\t\t *        } else {\n\t\t *          oTable.fnOpen( this, \"Temporary row opened\", \"info_row\" );\n\t\t *        }\n\t\t *      } );\n\t\t *\n\t\t *      oTable = $('#example').dataTable();\n\t\t *    } );\n\t\t */\n\t\tthis.fnIsOpen = function( nTr )\n\t\t{\n\t\t\treturn this.api( true ).row( nTr ).child.isShown();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * This function will place a new row directly after a row which is currently\n\t\t * on display on the page, with the HTML contents that is passed into the\n\t\t * function. This can be used, for example, to ask for confirmation that a\n\t\t * particular record should be deleted.\n\t\t *  @param {node} nTr The table row to 'open'\n\t\t *  @param {string|node|jQuery} mHtml The HTML to put into the row\n\t\t *  @param {string} sClass Class to give the new TD cell\n\t\t *  @returns {node} The row opened. Note that if the table row passed in as the\n\t\t *    first parameter, is not found in the table, this method will silently\n\t\t *    return.\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable;\n\t\t *\n\t\t *      // 'open' an information row when a row is clicked on\n\t\t *      $('#example tbody tr').click( function () {\n\t\t *        if ( oTable.fnIsOpen(this) ) {\n\t\t *          oTable.fnClose( this );\n\t\t *        } else {\n\t\t *          oTable.fnOpen( this, \"Temporary row opened\", \"info_row\" );\n\t\t *        }\n\t\t *      } );\n\t\t *\n\t\t *      oTable = $('#example').dataTable();\n\t\t *    } );\n\t\t */\n\t\tthis.fnOpen = function( nTr, mHtml, sClass )\n\t\t{\n\t\t\treturn this.api( true )\n\t\t\t\t.row( nTr )\n\t\t\t\t.child( mHtml, sClass )\n\t\t\t\t.show()\n\t\t\t\t.child()[0];\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Change the pagination - provides the internal logic for pagination in a simple API\n\t\t * function. With this function you can have a DataTables table go to the next,\n\t\t * previous, first or last pages.\n\t\t *  @param {string|int} mAction Paging action to take: \"first\", \"previous\", \"next\" or \"last\"\n\t\t *    or page number to jump to (integer), note that page 0 is the first page.\n\t\t *  @param {bool} [bRedraw=true] Redraw the table or not\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *      oTable.fnPageChange( 'next' );\n\t\t *    } );\n\t\t */\n\t\tthis.fnPageChange = function ( mAction, bRedraw )\n\t\t{\n\t\t\tvar api = this.api( true ).page( mAction );\n\t\t\n\t\t\tif ( bRedraw === undefined || bRedraw ) {\n\t\t\t\tapi.draw(false);\n\t\t\t}\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Show a particular column\n\t\t *  @param {int} iCol The column whose display should be changed\n\t\t *  @param {bool} bShow Show (true) or hide (false) the column\n\t\t *  @param {bool} [bRedraw=true] Redraw the table or not\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Hide the second column after initialisation\n\t\t *      oTable.fnSetColumnVis( 1, false );\n\t\t *    } );\n\t\t */\n\t\tthis.fnSetColumnVis = function ( iCol, bShow, bRedraw )\n\t\t{\n\t\t\tvar api = this.api( true ).column( iCol ).visible( bShow );\n\t\t\n\t\t\tif ( bRedraw === undefined || bRedraw ) {\n\t\t\t\tapi.columns.adjust().draw();\n\t\t\t}\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Get the settings for a particular table for external manipulation\n\t\t *  @returns {object} DataTables settings object. See\n\t\t *    {@link DataTable.models.oSettings}\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *      var oSettings = oTable.fnSettings();\n\t\t *\n\t\t *      // Show an example parameter from the settings\n\t\t *      alert( oSettings._iDisplayStart );\n\t\t *    } );\n\t\t */\n\t\tthis.fnSettings = function()\n\t\t{\n\t\t\treturn _fnSettingsFromNode( this[_ext.iApiIndex] );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Sort the table by a particular column\n\t\t *  @param {int} iCol the data index to sort on. Note that this will not match the\n\t\t *    'display index' if you have hidden data entries\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Sort immediately with columns 0 and 1\n\t\t *      oTable.fnSort( [ [0,'asc'], [1,'asc'] ] );\n\t\t *    } );\n\t\t */\n\t\tthis.fnSort = function( aaSort )\n\t\t{\n\t\t\tthis.api( true ).order( aaSort ).draw();\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Attach a sort listener to an element for a given column\n\t\t *  @param {node} nNode the element to attach the sort listener to\n\t\t *  @param {int} iColumn the column that a click on this node will sort on\n\t\t *  @param {function} [fnCallback] callback function when sort is run\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *\n\t\t *      // Sort on column 1, when 'sorter' is clicked on\n\t\t *      oTable.fnSortListener( document.getElementById('sorter'), 1 );\n\t\t *    } );\n\t\t */\n\t\tthis.fnSortListener = function( nNode, iColumn, fnCallback )\n\t\t{\n\t\t\tthis.api( true ).order.listener( nNode, iColumn, fnCallback );\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Update a table cell or row - this method will accept either a single value to\n\t\t * update the cell with, an array of values with one element for each column or\n\t\t * an object in the same format as the original data source. The function is\n\t\t * self-referencing in order to make the multi column updates easier.\n\t\t *  @param {object|array|string} mData Data to update the cell/row with\n\t\t *  @param {node|int} mRow TR element you want to update or the aoData index\n\t\t *  @param {int} [iColumn] The column to update, give as null or undefined to\n\t\t *    update a whole row.\n\t\t *  @param {bool} [bRedraw=true] Redraw the table or not\n\t\t *  @param {bool} [bAction=true] Perform pre-draw actions or not\n\t\t *  @returns {int} 0 on success, 1 on error\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *      oTable.fnUpdate( 'Example update', 0, 0 ); // Single cell\n\t\t *      oTable.fnUpdate( ['a', 'b', 'c', 'd', 'e'], $('tbody tr')[0] ); // Row\n\t\t *    } );\n\t\t */\n\t\tthis.fnUpdate = function( mData, mRow, iColumn, bRedraw, bAction )\n\t\t{\n\t\t\tvar api = this.api( true );\n\t\t\n\t\t\tif ( iColumn === undefined || iColumn === null ) {\n\t\t\t\tapi.row( mRow ).data( mData );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tapi.cell( mRow, iColumn ).data( mData );\n\t\t\t}\n\t\t\n\t\t\tif ( bAction === undefined || bAction ) {\n\t\t\t\tapi.columns.adjust();\n\t\t\t}\n\t\t\n\t\t\tif ( bRedraw === undefined || bRedraw ) {\n\t\t\t\tapi.draw();\n\t\t\t}\n\t\t\treturn 0;\n\t\t};\n\t\t\n\t\t\n\t\t/**\n\t\t * Provide a common method for plug-ins to check the version of DataTables being used, in order\n\t\t * to ensure compatibility.\n\t\t *  @param {string} sVersion Version string to check for, in the format \"X.Y.Z\". Note that the\n\t\t *    formats \"X\" and \"X.Y\" are also acceptable.\n\t\t *  @returns {boolean} true if this version of DataTables is greater or equal to the required\n\t\t *    version, or false if this version of DataTales is not suitable\n\t\t *  @method\n\t\t *  @dtopt API\n\t\t *  @deprecated Since v1.10\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready(function() {\n\t\t *      var oTable = $('#example').dataTable();\n\t\t *      alert( oTable.fnVersionCheck( '1.9.0' ) );\n\t\t *    } );\n\t\t */\n\t\tthis.fnVersionCheck = _ext.fnVersionCheck;\n\t\t\n\n\t\tvar _that = this;\n\t\tvar emptyInit = options === undefined;\n\t\tvar len = this.length;\n\n\t\tif ( emptyInit ) {\n\t\t\toptions = {};\n\t\t}\n\n\t\tthis.oApi = this.internal = _ext.internal;\n\n\t\t// Extend with old style plug-in API methods\n\t\tfor ( var fn in DataTable.ext.internal ) {\n\t\t\tif ( fn ) {\n\t\t\t\tthis[fn] = _fnExternApiFunc(fn);\n\t\t\t}\n\t\t}\n\n\t\tthis.each(function() {\n\t\t\t// For each initialisation we want to give it a clean initialisation\n\t\t\t// object that can be bashed around\n\t\t\tvar o = {};\n\t\t\tvar oInit = len > 1 ? // optimisation for single table case\n\t\t\t\t_fnExtend( o, options, true ) :\n\t\t\t\toptions;\n\n\t\t\t/*global oInit,_that,emptyInit*/\n\t\t\tvar i=0, iLen, j, jLen, k, kLen;\n\t\t\tvar sId = this.getAttribute( 'id' );\n\t\t\tvar bInitHandedOff = false;\n\t\t\tvar defaults = DataTable.defaults;\n\t\t\tvar $this = $(this);\n\t\t\t\n\t\t\t\n\t\t\t/* Sanity check */\n\t\t\tif ( this.nodeName.toLowerCase() != 'table' )\n\t\t\t{\n\t\t\t\t_fnLog( null, 0, 'Non-table node initialisation ('+this.nodeName+')', 2 );\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t\n\t\t\t/* Backwards compatibility for the defaults */\n\t\t\t_fnCompatOpts( defaults );\n\t\t\t_fnCompatCols( defaults.column );\n\t\t\t\n\t\t\t/* Convert the camel-case defaults to Hungarian */\n\t\t\t_fnCamelToHungarian( defaults, defaults, true );\n\t\t\t_fnCamelToHungarian( defaults.column, defaults.column, true );\n\t\t\t\n\t\t\t/* Setting up the initialisation object */\n\t\t\t_fnCamelToHungarian( defaults, $.extend( oInit, $this.data() ) );\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t/* Check to see if we are re-initialising a table */\n\t\t\tvar allSettings = DataTable.settings;\n\t\t\tfor ( i=0, iLen=allSettings.length ; i<iLen ; i++ )\n\t\t\t{\n\t\t\t\tvar s = allSettings[i];\n\t\t\t\n\t\t\t\t/* Base check on table node */\n\t\t\t\tif ( s.nTable == this || s.nTHead.parentNode == this || (s.nTFoot && s.nTFoot.parentNode == this) )\n\t\t\t\t{\n\t\t\t\t\tvar bRetrieve = oInit.bRetrieve !== undefined ? oInit.bRetrieve : defaults.bRetrieve;\n\t\t\t\t\tvar bDestroy = oInit.bDestroy !== undefined ? oInit.bDestroy : defaults.bDestroy;\n\t\t\t\n\t\t\t\t\tif ( emptyInit || bRetrieve )\n\t\t\t\t\t{\n\t\t\t\t\t\treturn s.oInstance;\n\t\t\t\t\t}\n\t\t\t\t\telse if ( bDestroy )\n\t\t\t\t\t{\n\t\t\t\t\t\ts.oInstance.fnDestroy();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t_fnLog( s, 0, 'Cannot reinitialise DataTable', 3 );\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\n\t\t\t\t/* If the element we are initialising has the same ID as a table which was previously\n\t\t\t\t * initialised, but the table nodes don't match (from before) then we destroy the old\n\t\t\t\t * instance by simply deleting it. This is under the assumption that the table has been\n\t\t\t\t * destroyed by other methods. Anyone using non-id selectors will need to do this manually\n\t\t\t\t */\n\t\t\t\tif ( s.sTableId == this.id )\n\t\t\t\t{\n\t\t\t\t\tallSettings.splice( i, 1 );\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t/* Ensure the table has an ID - required for accessibility */\n\t\t\tif ( sId === null || sId === \"\" )\n\t\t\t{\n\t\t\t\tsId = \"DataTables_Table_\"+(DataTable.ext._unique++);\n\t\t\t\tthis.id = sId;\n\t\t\t}\n\t\t\t\n\t\t\t/* Create the settings object for this table and set some of the default parameters */\n\t\t\tvar oSettings = $.extend( true, {}, DataTable.models.oSettings, {\n\t\t\t\t\"sDestroyWidth\": $this[0].style.width,\n\t\t\t\t\"sInstance\":     sId,\n\t\t\t\t\"sTableId\":      sId\n\t\t\t} );\n\t\t\toSettings.nTable = this;\n\t\t\toSettings.oApi   = _that.internal;\n\t\t\toSettings.oInit  = oInit;\n\t\t\t\n\t\t\tallSettings.push( oSettings );\n\t\t\t\n\t\t\t// Need to add the instance after the instance after the settings object has been added\n\t\t\t// to the settings array, so we can self reference the table instance if more than one\n\t\t\toSettings.oInstance = (_that.length===1) ? _that : $this.dataTable();\n\t\t\t\n\t\t\t// Backwards compatibility, before we apply all the defaults\n\t\t\t_fnCompatOpts( oInit );\n\t\t\t\n\t\t\tif ( oInit.oLanguage )\n\t\t\t{\n\t\t\t\t_fnLanguageCompat( oInit.oLanguage );\n\t\t\t}\n\t\t\t\n\t\t\t// If the length menu is given, but the init display length is not, use the length menu\n\t\t\tif ( oInit.aLengthMenu && ! oInit.iDisplayLength )\n\t\t\t{\n\t\t\t\toInit.iDisplayLength = $.isArray( oInit.aLengthMenu[0] ) ?\n\t\t\t\t\toInit.aLengthMenu[0][0] : oInit.aLengthMenu[0];\n\t\t\t}\n\t\t\t\n\t\t\t// Apply the defaults and init options to make a single init object will all\n\t\t\t// options defined from defaults and instance options.\n\t\t\toInit = _fnExtend( $.extend( true, {}, defaults ), oInit );\n\t\t\t\n\t\t\t\n\t\t\t// Map the initialisation options onto the settings object\n\t\t\t_fnMap( oSettings.oFeatures, oInit, [\n\t\t\t\t\"bPaginate\",\n\t\t\t\t\"bLengthChange\",\n\t\t\t\t\"bFilter\",\n\t\t\t\t\"bSort\",\n\t\t\t\t\"bSortMulti\",\n\t\t\t\t\"bInfo\",\n\t\t\t\t\"bProcessing\",\n\t\t\t\t\"bAutoWidth\",\n\t\t\t\t\"bSortClasses\",\n\t\t\t\t\"bServerSide\",\n\t\t\t\t\"bDeferRender\"\n\t\t\t] );\n\t\t\t_fnMap( oSettings, oInit, [\n\t\t\t\t\"asStripeClasses\",\n\t\t\t\t\"ajax\",\n\t\t\t\t\"fnServerData\",\n\t\t\t\t\"fnFormatNumber\",\n\t\t\t\t\"sServerMethod\",\n\t\t\t\t\"aaSorting\",\n\t\t\t\t\"aaSortingFixed\",\n\t\t\t\t\"aLengthMenu\",\n\t\t\t\t\"sPaginationType\",\n\t\t\t\t\"sAjaxSource\",\n\t\t\t\t\"sAjaxDataProp\",\n\t\t\t\t\"iStateDuration\",\n\t\t\t\t\"sDom\",\n\t\t\t\t\"bSortCellsTop\",\n\t\t\t\t\"iTabIndex\",\n\t\t\t\t\"fnStateLoadCallback\",\n\t\t\t\t\"fnStateSaveCallback\",\n\t\t\t\t\"renderer\",\n\t\t\t\t\"searchDelay\",\n\t\t\t\t\"rowId\",\n\t\t\t\t[ \"iCookieDuration\", \"iStateDuration\" ], // backwards compat\n\t\t\t\t[ \"oSearch\", \"oPreviousSearch\" ],\n\t\t\t\t[ \"aoSearchCols\", \"aoPreSearchCols\" ],\n\t\t\t\t[ \"iDisplayLength\", \"_iDisplayLength\" ],\n\t\t\t\t[ \"bJQueryUI\", \"bJUI\" ]\n\t\t\t] );\n\t\t\t_fnMap( oSettings.oScroll, oInit, [\n\t\t\t\t[ \"sScrollX\", \"sX\" ],\n\t\t\t\t[ \"sScrollXInner\", \"sXInner\" ],\n\t\t\t\t[ \"sScrollY\", \"sY\" ],\n\t\t\t\t[ \"bScrollCollapse\", \"bCollapse\" ]\n\t\t\t] );\n\t\t\t_fnMap( oSettings.oLanguage, oInit, \"fnInfoCallback\" );\n\t\t\t\n\t\t\t/* Callback functions which are array driven */\n\t\t\t_fnCallbackReg( oSettings, 'aoDrawCallback',       oInit.fnDrawCallback,      'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoServerParams',       oInit.fnServerParams,      'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoStateSaveParams',    oInit.fnStateSaveParams,   'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoStateLoadParams',    oInit.fnStateLoadParams,   'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoStateLoaded',        oInit.fnStateLoaded,       'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoRowCallback',        oInit.fnRowCallback,       'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoRowCreatedCallback', oInit.fnCreatedRow,        'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoHeaderCallback',     oInit.fnHeaderCallback,    'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoFooterCallback',     oInit.fnFooterCallback,    'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoInitComplete',       oInit.fnInitComplete,      'user' );\n\t\t\t_fnCallbackReg( oSettings, 'aoPreDrawCallback',    oInit.fnPreDrawCallback,   'user' );\n\t\t\t\n\t\t\toSettings.rowIdFn = _fnGetObjectDataFn( oInit.rowId );\n\t\t\t\n\t\t\t/* Browser support detection */\n\t\t\t_fnBrowserDetect( oSettings );\n\t\t\t\n\t\t\tvar oClasses = oSettings.oClasses;\n\t\t\t\n\t\t\t// @todo Remove in 1.11\n\t\t\tif ( oInit.bJQueryUI )\n\t\t\t{\n\t\t\t\t/* Use the JUI classes object for display. You could clone the oStdClasses object if\n\t\t\t\t * you want to have multiple tables with multiple independent classes\n\t\t\t\t */\n\t\t\t\t$.extend( oClasses, DataTable.ext.oJUIClasses, oInit.oClasses );\n\t\t\t\n\t\t\t\tif ( oInit.sDom === defaults.sDom && defaults.sDom === \"lfrtip\" )\n\t\t\t\t{\n\t\t\t\t\t/* Set the DOM to use a layout suitable for jQuery UI's theming */\n\t\t\t\t\toSettings.sDom = '<\"H\"lfr>t<\"F\"ip>';\n\t\t\t\t}\n\t\t\t\n\t\t\t\tif ( ! oSettings.renderer ) {\n\t\t\t\t\toSettings.renderer = 'jqueryui';\n\t\t\t\t}\n\t\t\t\telse if ( $.isPlainObject( oSettings.renderer ) && ! oSettings.renderer.header ) {\n\t\t\t\t\toSettings.renderer.header = 'jqueryui';\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t$.extend( oClasses, DataTable.ext.classes, oInit.oClasses );\n\t\t\t}\n\t\t\t$this.addClass( oClasses.sTable );\n\t\t\t\n\t\t\t\n\t\t\tif ( oSettings.iInitDisplayStart === undefined )\n\t\t\t{\n\t\t\t\t/* Display start point, taking into account the save saving */\n\t\t\t\toSettings.iInitDisplayStart = oInit.iDisplayStart;\n\t\t\t\toSettings._iDisplayStart = oInit.iDisplayStart;\n\t\t\t}\n\t\t\t\n\t\t\tif ( oInit.iDeferLoading !== null )\n\t\t\t{\n\t\t\t\toSettings.bDeferLoading = true;\n\t\t\t\tvar tmp = $.isArray( oInit.iDeferLoading );\n\t\t\t\toSettings._iRecordsDisplay = tmp ? oInit.iDeferLoading[0] : oInit.iDeferLoading;\n\t\t\t\toSettings._iRecordsTotal = tmp ? oInit.iDeferLoading[1] : oInit.iDeferLoading;\n\t\t\t}\n\t\t\t\n\t\t\t/* Language definitions */\n\t\t\tvar oLanguage = oSettings.oLanguage;\n\t\t\t$.extend( true, oLanguage, oInit.oLanguage );\n\t\t\t\n\t\t\tif ( oLanguage.sUrl !== \"\" )\n\t\t\t{\n\t\t\t\t/* Get the language definitions from a file - because this Ajax call makes the language\n\t\t\t\t * get async to the remainder of this function we use bInitHandedOff to indicate that\n\t\t\t\t * _fnInitialise will be fired by the returned Ajax handler, rather than the constructor\n\t\t\t\t */\n\t\t\t\t$.ajax( {\n\t\t\t\t\tdataType: 'json',\n\t\t\t\t\turl: oLanguage.sUrl,\n\t\t\t\t\tsuccess: function ( json ) {\n\t\t\t\t\t\t_fnLanguageCompat( json );\n\t\t\t\t\t\t_fnCamelToHungarian( defaults.oLanguage, json );\n\t\t\t\t\t\t$.extend( true, oLanguage, json );\n\t\t\t\t\t\t_fnInitialise( oSettings );\n\t\t\t\t\t},\n\t\t\t\t\terror: function () {\n\t\t\t\t\t\t// Error occurred loading language file, continue on as best we can\n\t\t\t\t\t\t_fnInitialise( oSettings );\n\t\t\t\t\t}\n\t\t\t\t} );\n\t\t\t\tbInitHandedOff = true;\n\t\t\t}\n\t\t\t\n\t\t\t/*\n\t\t\t * Stripes\n\t\t\t */\n\t\t\tif ( oInit.asStripeClasses === null )\n\t\t\t{\n\t\t\t\toSettings.asStripeClasses =[\n\t\t\t\t\toClasses.sStripeOdd,\n\t\t\t\t\toClasses.sStripeEven\n\t\t\t\t];\n\t\t\t}\n\t\t\t\n\t\t\t/* Remove row stripe classes if they are already on the table row */\n\t\t\tvar stripeClasses = oSettings.asStripeClasses;\n\t\t\tvar rowOne = $this.children('tbody').find('tr').eq(0);\n\t\t\tif ( $.inArray( true, $.map( stripeClasses, function(el, i) {\n\t\t\t\treturn rowOne.hasClass(el);\n\t\t\t} ) ) !== -1 ) {\n\t\t\t\t$('tbody tr', this).removeClass( stripeClasses.join(' ') );\n\t\t\t\toSettings.asDestroyStripes = stripeClasses.slice();\n\t\t\t}\n\t\t\t\n\t\t\t/*\n\t\t\t * Columns\n\t\t\t * See if we should load columns automatically or use defined ones\n\t\t\t */\n\t\t\tvar anThs = [];\n\t\t\tvar aoColumnsInit;\n\t\t\tvar nThead = this.getElementsByTagName('thead');\n\t\t\tif ( nThead.length !== 0 )\n\t\t\t{\n\t\t\t\t_fnDetectHeader( oSettings.aoHeader, nThead[0] );\n\t\t\t\tanThs = _fnGetUniqueThs( oSettings );\n\t\t\t}\n\t\t\t\n\t\t\t/* If not given a column array, generate one with nulls */\n\t\t\tif ( oInit.aoColumns === null )\n\t\t\t{\n\t\t\t\taoColumnsInit = [];\n\t\t\t\tfor ( i=0, iLen=anThs.length ; i<iLen ; i++ )\n\t\t\t\t{\n\t\t\t\t\taoColumnsInit.push( null );\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\taoColumnsInit = oInit.aoColumns;\n\t\t\t}\n\t\t\t\n\t\t\t/* Add the columns */\n\t\t\tfor ( i=0, iLen=aoColumnsInit.length ; i<iLen ; i++ )\n\t\t\t{\n\t\t\t\t_fnAddColumn( oSettings, anThs ? anThs[i] : null );\n\t\t\t}\n\t\t\t\n\t\t\t/* Apply the column definitions */\n\t\t\t_fnApplyColumnDefs( oSettings, oInit.aoColumnDefs, aoColumnsInit, function (iCol, oDef) {\n\t\t\t\t_fnColumnOptions( oSettings, iCol, oDef );\n\t\t\t} );\n\t\t\t\n\t\t\t/* HTML5 attribute detection - build an mData object automatically if the\n\t\t\t * attributes are found\n\t\t\t */\n\t\t\tif ( rowOne.length ) {\n\t\t\t\tvar a = function ( cell, name ) {\n\t\t\t\t\treturn cell.getAttribute( 'data-'+name ) !== null ? name : null;\n\t\t\t\t};\n\t\t\t\n\t\t\t\t$( rowOne[0] ).children('th, td').each( function (i, cell) {\n\t\t\t\t\tvar col = oSettings.aoColumns[i];\n\t\t\t\n\t\t\t\t\tif ( col.mData === i ) {\n\t\t\t\t\t\tvar sort = a( cell, 'sort' ) || a( cell, 'order' );\n\t\t\t\t\t\tvar filter = a( cell, 'filter' ) || a( cell, 'search' );\n\t\t\t\n\t\t\t\t\t\tif ( sort !== null || filter !== null ) {\n\t\t\t\t\t\t\tcol.mData = {\n\t\t\t\t\t\t\t\t_:      i+'.display',\n\t\t\t\t\t\t\t\tsort:   sort !== null   ? i+'.@data-'+sort   : undefined,\n\t\t\t\t\t\t\t\ttype:   sort !== null   ? i+'.@data-'+sort   : undefined,\n\t\t\t\t\t\t\t\tfilter: filter !== null ? i+'.@data-'+filter : undefined\n\t\t\t\t\t\t\t};\n\t\t\t\n\t\t\t\t\t\t\t_fnColumnOptions( oSettings, i );\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} );\n\t\t\t}\n\t\t\t\n\t\t\tvar features = oSettings.oFeatures;\n\t\t\t\n\t\t\t/* Must be done after everything which can be overridden by the state saving! */\n\t\t\tif ( oInit.bStateSave )\n\t\t\t{\n\t\t\t\tfeatures.bStateSave = true;\n\t\t\t\t_fnLoadState( oSettings, oInit );\n\t\t\t\t_fnCallbackReg( oSettings, 'aoDrawCallback', _fnSaveState, 'state_save' );\n\t\t\t}\n\t\t\t\n\t\t\t\n\t\t\t/*\n\t\t\t * Sorting\n\t\t\t * @todo For modularisation (1.11) this needs to do into a sort start up handler\n\t\t\t */\n\t\t\t\n\t\t\t// If aaSorting is not defined, then we use the first indicator in asSorting\n\t\t\t// in case that has been altered, so the default sort reflects that option\n\t\t\tif ( oInit.aaSorting === undefined )\n\t\t\t{\n\t\t\t\tvar sorting = oSettings.aaSorting;\n\t\t\t\tfor ( i=0, iLen=sorting.length ; i<iLen ; i++ )\n\t\t\t\t{\n\t\t\t\t\tsorting[i][1] = oSettings.aoColumns[ i ].asSorting[0];\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\t/* Do a first pass on the sorting classes (allows any size changes to be taken into\n\t\t\t * account, and also will apply sorting disabled classes if disabled\n\t\t\t */\n\t\t\t_fnSortingClasses( oSettings );\n\t\t\t\n\t\t\tif ( features.bSort )\n\t\t\t{\n\t\t\t\t_fnCallbackReg( oSettings, 'aoDrawCallback', function () {\n\t\t\t\t\tif ( oSettings.bSorted ) {\n\t\t\t\t\t\tvar aSort = _fnSortFlatten( oSettings );\n\t\t\t\t\t\tvar sortedColumns = {};\n\t\t\t\n\t\t\t\t\t\t$.each( aSort, function (i, val) {\n\t\t\t\t\t\t\tsortedColumns[ val.src ] = val.dir;\n\t\t\t\t\t\t} );\n\t\t\t\n\t\t\t\t\t\t_fnCallbackFire( oSettings, null, 'order', [oSettings, aSort, sortedColumns] );\n\t\t\t\t\t\t_fnSortAria( oSettings );\n\t\t\t\t\t}\n\t\t\t\t} );\n\t\t\t}\n\t\t\t\n\t\t\t_fnCallbackReg( oSettings, 'aoDrawCallback', function () {\n\t\t\t\tif ( oSettings.bSorted || _fnDataSource( oSettings ) === 'ssp' || features.bDeferRender ) {\n\t\t\t\t\t_fnSortingClasses( oSettings );\n\t\t\t\t}\n\t\t\t}, 'sc' );\n\t\t\t\n\t\t\t\n\t\t\t/*\n\t\t\t * Final init\n\t\t\t * Cache the header, body and footer as required, creating them if needed\n\t\t\t */\n\t\t\t\n\t\t\t// Work around for Webkit bug 83867 - store the caption-side before removing from doc\n\t\t\tvar captions = $this.children('caption').each( function () {\n\t\t\t\tthis._captionSide = $this.css('caption-side');\n\t\t\t} );\n\t\t\t\n\t\t\tvar thead = $this.children('thead');\n\t\t\tif ( thead.length === 0 )\n\t\t\t{\n\t\t\t\tthead = $('<thead/>').appendTo(this);\n\t\t\t}\n\t\t\toSettings.nTHead = thead[0];\n\t\t\t\n\t\t\tvar tbody = $this.children('tbody');\n\t\t\tif ( tbody.length === 0 )\n\t\t\t{\n\t\t\t\ttbody = $('<tbody/>').appendTo(this);\n\t\t\t}\n\t\t\toSettings.nTBody = tbody[0];\n\t\t\t\n\t\t\tvar tfoot = $this.children('tfoot');\n\t\t\tif ( tfoot.length === 0 && captions.length > 0 && (oSettings.oScroll.sX !== \"\" || oSettings.oScroll.sY !== \"\") )\n\t\t\t{\n\t\t\t\t// If we are a scrolling table, and no footer has been given, then we need to create\n\t\t\t\t// a tfoot element for the caption element to be appended to\n\t\t\t\ttfoot = $('<tfoot/>').appendTo(this);\n\t\t\t}\n\t\t\t\n\t\t\tif ( tfoot.length === 0 || tfoot.children().length === 0 ) {\n\t\t\t\t$this.addClass( oClasses.sNoFooter );\n\t\t\t}\n\t\t\telse if ( tfoot.length > 0 ) {\n\t\t\t\toSettings.nTFoot = tfoot[0];\n\t\t\t\t_fnDetectHeader( oSettings.aoFooter, oSettings.nTFoot );\n\t\t\t}\n\t\t\t\n\t\t\t/* Check if there is data passing into the constructor */\n\t\t\tif ( oInit.aaData )\n\t\t\t{\n\t\t\t\tfor ( i=0 ; i<oInit.aaData.length ; i++ )\n\t\t\t\t{\n\t\t\t\t\t_fnAddData( oSettings, oInit.aaData[ i ] );\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if ( oSettings.bDeferLoading || _fnDataSource( oSettings ) == 'dom' )\n\t\t\t{\n\t\t\t\t/* Grab the data from the page - only do this when deferred loading or no Ajax\n\t\t\t\t * source since there is no point in reading the DOM data if we are then going\n\t\t\t\t * to replace it with Ajax data\n\t\t\t\t */\n\t\t\t\t_fnAddTr( oSettings, $(oSettings.nTBody).children('tr') );\n\t\t\t}\n\t\t\t\n\t\t\t/* Copy the data index array */\n\t\t\toSettings.aiDisplay = oSettings.aiDisplayMaster.slice();\n\t\t\t\n\t\t\t/* Initialisation complete - table can be drawn */\n\t\t\toSettings.bInitialised = true;\n\t\t\t\n\t\t\t/* Check if we need to initialise the table (it might not have been handed off to the\n\t\t\t * language processor)\n\t\t\t */\n\t\t\tif ( bInitHandedOff === false )\n\t\t\t{\n\t\t\t\t_fnInitialise( oSettings );\n\t\t\t}\n\t\t} );\n\t\t_that = null;\n\t\treturn this;\n\t};\n\n\t\n\t/*\n\t * It is useful to have variables which are scoped locally so only the\n\t * DataTables functions can access them and they don't leak into global space.\n\t * At the same time these functions are often useful over multiple files in the\n\t * core and API, so we list, or at least document, all variables which are used\n\t * by DataTables as private variables here. This also ensures that there is no\n\t * clashing of variable names and that they can easily referenced for reuse.\n\t */\n\t\n\t\n\t// Defined else where\n\t//  _selector_run\n\t//  _selector_opts\n\t//  _selector_first\n\t//  _selector_row_indexes\n\t\n\tvar _ext; // DataTable.ext\n\tvar _Api; // DataTable.Api\n\tvar _api_register; // DataTable.Api.register\n\tvar _api_registerPlural; // DataTable.Api.registerPlural\n\t\n\tvar _re_dic = {};\n\tvar _re_new_lines = /[\\r\\n]/g;\n\tvar _re_html = /<.*?>/g;\n\tvar _re_date_start = /^[\\w\\+\\-]/;\n\tvar _re_date_end = /[\\w\\+\\-]$/;\n\t\n\t// Escape regular expression special characters\n\tvar _re_escape_regex = new RegExp( '(\\\\' + [ '/', '.', '*', '+', '?', '|', '(', ')', '[', ']', '{', '}', '\\\\', '$', '^', '-' ].join('|\\\\') + ')', 'g' );\n\t\n\t// http://en.wikipedia.org/wiki/Foreign_exchange_market\n\t// - \\u20BD - Russian ruble.\n\t// - \\u20a9 - South Korean Won\n\t// - \\u20BA - Turkish Lira\n\t// - \\u20B9 - Indian Rupee\n\t// - R - Brazil (R$) and South Africa\n\t// - fr - Swiss Franc\n\t// - kr - Swedish krona, Norwegian krone and Danish krone\n\t// - \\u2009 is thin space and \\u202F is narrow no-break space, both used in many\n\t//   standards as thousands separators.\n\tvar _re_formatted_numeric = /[',$£€¥%\\u2009\\u202F\\u20BD\\u20a9\\u20BArfk]/gi;\n\t\n\t\n\tvar _empty = function ( d ) {\n\t\treturn !d || d === true || d === '-' ? true : false;\n\t};\n\t\n\t\n\tvar _intVal = function ( s ) {\n\t\tvar integer = parseInt( s, 10 );\n\t\treturn !isNaN(integer) && isFinite(s) ? integer : null;\n\t};\n\t\n\t// Convert from a formatted number with characters other than `.` as the\n\t// decimal place, to a Javascript number\n\tvar _numToDecimal = function ( num, decimalPoint ) {\n\t\t// Cache created regular expressions for speed as this function is called often\n\t\tif ( ! _re_dic[ decimalPoint ] ) {\n\t\t\t_re_dic[ decimalPoint ] = new RegExp( _fnEscapeRegex( decimalPoint ), 'g' );\n\t\t}\n\t\treturn typeof num === 'string' && decimalPoint !== '.' ?\n\t\t\tnum.replace( /\\./g, '' ).replace( _re_dic[ decimalPoint ], '.' ) :\n\t\t\tnum;\n\t};\n\t\n\t\n\tvar _isNumber = function ( d, decimalPoint, formatted ) {\n\t\tvar strType = typeof d === 'string';\n\t\n\t\t// If empty return immediately so there must be a number if it is a\n\t\t// formatted string (this stops the string \"k\", or \"kr\", etc being detected\n\t\t// as a formatted number for currency\n\t\tif ( _empty( d ) ) {\n\t\t\treturn true;\n\t\t}\n\t\n\t\tif ( decimalPoint && strType ) {\n\t\t\td = _numToDecimal( d, decimalPoint );\n\t\t}\n\t\n\t\tif ( formatted && strType ) {\n\t\t\td = d.replace( _re_formatted_numeric, '' );\n\t\t}\n\t\n\t\treturn !isNaN( parseFloat(d) ) && isFinite( d );\n\t};\n\t\n\t\n\t// A string without HTML in it can be considered to be HTML still\n\tvar _isHtml = function ( d ) {\n\t\treturn _empty( d ) || typeof d === 'string';\n\t};\n\t\n\t\n\tvar _htmlNumeric = function ( d, decimalPoint, formatted ) {\n\t\tif ( _empty( d ) ) {\n\t\t\treturn true;\n\t\t}\n\t\n\t\tvar html = _isHtml( d );\n\t\treturn ! html ?\n\t\t\tnull :\n\t\t\t_isNumber( _stripHtml( d ), decimalPoint, formatted ) ?\n\t\t\t\ttrue :\n\t\t\t\tnull;\n\t};\n\t\n\t\n\tvar _pluck = function ( a, prop, prop2 ) {\n\t\tvar out = [];\n\t\tvar i=0, ien=a.length;\n\t\n\t\t// Could have the test in the loop for slightly smaller code, but speed\n\t\t// is essential here\n\t\tif ( prop2 !== undefined ) {\n\t\t\tfor ( ; i<ien ; i++ ) {\n\t\t\t\tif ( a[i] && a[i][ prop ] ) {\n\t\t\t\t\tout.push( a[i][ prop ][ prop2 ] );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tfor ( ; i<ien ; i++ ) {\n\t\t\t\tif ( a[i] ) {\n\t\t\t\t\tout.push( a[i][ prop ] );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\treturn out;\n\t};\n\t\n\t\n\t// Basically the same as _pluck, but rather than looping over `a` we use `order`\n\t// as the indexes to pick from `a`\n\tvar _pluck_order = function ( a, order, prop, prop2 )\n\t{\n\t\tvar out = [];\n\t\tvar i=0, ien=order.length;\n\t\n\t\t// Could have the test in the loop for slightly smaller code, but speed\n\t\t// is essential here\n\t\tif ( prop2 !== undefined ) {\n\t\t\tfor ( ; i<ien ; i++ ) {\n\t\t\t\tif ( a[ order[i] ][ prop ] ) {\n\t\t\t\t\tout.push( a[ order[i] ][ prop ][ prop2 ] );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tfor ( ; i<ien ; i++ ) {\n\t\t\t\tout.push( a[ order[i] ][ prop ] );\n\t\t\t}\n\t\t}\n\t\n\t\treturn out;\n\t};\n\t\n\t\n\tvar _range = function ( len, start )\n\t{\n\t\tvar out = [];\n\t\tvar end;\n\t\n\t\tif ( start === undefined ) {\n\t\t\tstart = 0;\n\t\t\tend = len;\n\t\t}\n\t\telse {\n\t\t\tend = start;\n\t\t\tstart = len;\n\t\t}\n\t\n\t\tfor ( var i=start ; i<end ; i++ ) {\n\t\t\tout.push( i );\n\t\t}\n\t\n\t\treturn out;\n\t};\n\t\n\t\n\tvar _removeEmpty = function ( a )\n\t{\n\t\tvar out = [];\n\t\n\t\tfor ( var i=0, ien=a.length ; i<ien ; i++ ) {\n\t\t\tif ( a[i] ) { // careful - will remove all falsy values!\n\t\t\t\tout.push( a[i] );\n\t\t\t}\n\t\t}\n\t\n\t\treturn out;\n\t};\n\t\n\t\n\tvar _stripHtml = function ( d ) {\n\t\treturn d.replace( _re_html, '' );\n\t};\n\t\n\t\n\t/**\n\t * Find the unique elements in a source array.\n\t *\n\t * @param  {array} src Source array\n\t * @return {array} Array of unique items\n\t * @ignore\n\t */\n\tvar _unique = function ( src )\n\t{\n\t\t// A faster unique method is to use object keys to identify used values,\n\t\t// but this doesn't work with arrays or objects, which we must also\n\t\t// consider. See jsperf.com/compare-array-unique-versions/4 for more\n\t\t// information.\n\t\tvar\n\t\t\tout = [],\n\t\t\tval,\n\t\t\ti, ien=src.length,\n\t\t\tj, k=0;\n\t\n\t\tagain: for ( i=0 ; i<ien ; i++ ) {\n\t\t\tval = src[i];\n\t\n\t\t\tfor ( j=0 ; j<k ; j++ ) {\n\t\t\t\tif ( out[j] === val ) {\n\t\t\t\t\tcontinue again;\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\tout.push( val );\n\t\t\tk++;\n\t\t}\n\t\n\t\treturn out;\n\t};\n\t\n\t\n\t/**\n\t * DataTables utility methods\n\t * \n\t * This namespace provides helper methods that DataTables uses internally to\n\t * create a DataTable, but which are not exclusively used only for DataTables.\n\t * These methods can be used by extension authors to save the duplication of\n\t * code.\n\t *\n\t *  @namespace\n\t */\n\tDataTable.util = {\n\t\t/**\n\t\t * Throttle the calls to a function. Arguments and context are maintained\n\t\t * for the throttled function.\n\t\t *\n\t\t * @param {function} fn Function to be called\n\t\t * @param {integer} freq Call frequency in mS\n\t\t * @return {function} Wrapped function\n\t\t */\n\t\tthrottle: function ( fn, freq ) {\n\t\t\tvar\n\t\t\t\tfrequency = freq !== undefined ? freq : 200,\n\t\t\t\tlast,\n\t\t\t\ttimer;\n\t\n\t\t\treturn function () {\n\t\t\t\tvar\n\t\t\t\t\tthat = this,\n\t\t\t\t\tnow  = +new Date(),\n\t\t\t\t\targs = arguments;\n\t\n\t\t\t\tif ( last && now < last + frequency ) {\n\t\t\t\t\tclearTimeout( timer );\n\t\n\t\t\t\t\ttimer = setTimeout( function () {\n\t\t\t\t\t\tlast = undefined;\n\t\t\t\t\t\tfn.apply( that, args );\n\t\t\t\t\t}, frequency );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tlast = now;\n\t\t\t\t\tfn.apply( that, args );\n\t\t\t\t}\n\t\t\t};\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Escape a string such that it can be used in a regular expression\n\t\t *\n\t\t *  @param {string} val string to escape\n\t\t *  @returns {string} escaped string\n\t\t */\n\t\tescapeRegex: function ( val ) {\n\t\t\treturn val.replace( _re_escape_regex, '\\\\$1' );\n\t\t}\n\t};\n\t\n\t\n\t\n\t/**\n\t * Create a mapping object that allows camel case parameters to be looked up\n\t * for their Hungarian counterparts. The mapping is stored in a private\n\t * parameter called `_hungarianMap` which can be accessed on the source object.\n\t *  @param {object} o\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnHungarianMap ( o )\n\t{\n\t\tvar\n\t\t\thungarian = 'a aa ai ao as b fn i m o s ',\n\t\t\tmatch,\n\t\t\tnewKey,\n\t\t\tmap = {};\n\t\n\t\t$.each( o, function (key, val) {\n\t\t\tmatch = key.match(/^([^A-Z]+?)([A-Z])/);\n\t\n\t\t\tif ( match && hungarian.indexOf(match[1]+' ') !== -1 )\n\t\t\t{\n\t\t\t\tnewKey = key.replace( match[0], match[2].toLowerCase() );\n\t\t\t\tmap[ newKey ] = key;\n\t\n\t\t\t\tif ( match[1] === 'o' )\n\t\t\t\t{\n\t\t\t\t\t_fnHungarianMap( o[key] );\n\t\t\t\t}\n\t\t\t}\n\t\t} );\n\t\n\t\to._hungarianMap = map;\n\t}\n\t\n\t\n\t/**\n\t * Convert from camel case parameters to Hungarian, based on a Hungarian map\n\t * created by _fnHungarianMap.\n\t *  @param {object} src The model object which holds all parameters that can be\n\t *    mapped.\n\t *  @param {object} user The object to convert from camel case to Hungarian.\n\t *  @param {boolean} force When set to `true`, properties which already have a\n\t *    Hungarian value in the `user` object will be overwritten. Otherwise they\n\t *    won't be.\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnCamelToHungarian ( src, user, force )\n\t{\n\t\tif ( ! src._hungarianMap ) {\n\t\t\t_fnHungarianMap( src );\n\t\t}\n\t\n\t\tvar hungarianKey;\n\t\n\t\t$.each( user, function (key, val) {\n\t\t\thungarianKey = src._hungarianMap[ key ];\n\t\n\t\t\tif ( hungarianKey !== undefined && (force || user[hungarianKey] === undefined) )\n\t\t\t{\n\t\t\t\t// For objects, we need to buzz down into the object to copy parameters\n\t\t\t\tif ( hungarianKey.charAt(0) === 'o' )\n\t\t\t\t{\n\t\t\t\t\t// Copy the camelCase options over to the hungarian\n\t\t\t\t\tif ( ! user[ hungarianKey ] ) {\n\t\t\t\t\t\tuser[ hungarianKey ] = {};\n\t\t\t\t\t}\n\t\t\t\t\t$.extend( true, user[hungarianKey], user[key] );\n\t\n\t\t\t\t\t_fnCamelToHungarian( src[hungarianKey], user[hungarianKey], force );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tuser[hungarianKey] = user[ key ];\n\t\t\t\t}\n\t\t\t}\n\t\t} );\n\t}\n\t\n\t\n\t/**\n\t * Language compatibility - when certain options are given, and others aren't, we\n\t * need to duplicate the values over, in order to provide backwards compatibility\n\t * with older language files.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnLanguageCompat( lang )\n\t{\n\t\tvar defaults = DataTable.defaults.oLanguage;\n\t\tvar zeroRecords = lang.sZeroRecords;\n\t\n\t\t/* Backwards compatibility - if there is no sEmptyTable given, then use the same as\n\t\t * sZeroRecords - assuming that is given.\n\t\t */\n\t\tif ( ! lang.sEmptyTable && zeroRecords &&\n\t\t\tdefaults.sEmptyTable === \"No data available in table\" )\n\t\t{\n\t\t\t_fnMap( lang, lang, 'sZeroRecords', 'sEmptyTable' );\n\t\t}\n\t\n\t\t/* Likewise with loading records */\n\t\tif ( ! lang.sLoadingRecords && zeroRecords &&\n\t\t\tdefaults.sLoadingRecords === \"Loading...\" )\n\t\t{\n\t\t\t_fnMap( lang, lang, 'sZeroRecords', 'sLoadingRecords' );\n\t\t}\n\t\n\t\t// Old parameter name of the thousands separator mapped onto the new\n\t\tif ( lang.sInfoThousands ) {\n\t\t\tlang.sThousands = lang.sInfoThousands;\n\t\t}\n\t\n\t\tvar decimal = lang.sDecimal;\n\t\tif ( decimal ) {\n\t\t\t_addNumericSort( decimal );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Map one parameter onto another\n\t *  @param {object} o Object to map\n\t *  @param {*} knew The new parameter name\n\t *  @param {*} old The old parameter name\n\t */\n\tvar _fnCompatMap = function ( o, knew, old ) {\n\t\tif ( o[ knew ] !== undefined ) {\n\t\t\to[ old ] = o[ knew ];\n\t\t}\n\t};\n\t\n\t\n\t/**\n\t * Provide backwards compatibility for the main DT options. Note that the new\n\t * options are mapped onto the old parameters, so this is an external interface\n\t * change only.\n\t *  @param {object} init Object to map\n\t */\n\tfunction _fnCompatOpts ( init )\n\t{\n\t\t_fnCompatMap( init, 'ordering',      'bSort' );\n\t\t_fnCompatMap( init, 'orderMulti',    'bSortMulti' );\n\t\t_fnCompatMap( init, 'orderClasses',  'bSortClasses' );\n\t\t_fnCompatMap( init, 'orderCellsTop', 'bSortCellsTop' );\n\t\t_fnCompatMap( init, 'order',         'aaSorting' );\n\t\t_fnCompatMap( init, 'orderFixed',    'aaSortingFixed' );\n\t\t_fnCompatMap( init, 'paging',        'bPaginate' );\n\t\t_fnCompatMap( init, 'pagingType',    'sPaginationType' );\n\t\t_fnCompatMap( init, 'pageLength',    'iDisplayLength' );\n\t\t_fnCompatMap( init, 'searching',     'bFilter' );\n\t\n\t\t// Boolean initialisation of x-scrolling\n\t\tif ( typeof init.sScrollX === 'boolean' ) {\n\t\t\tinit.sScrollX = init.sScrollX ? '100%' : '';\n\t\t}\n\t\tif ( typeof init.scrollX === 'boolean' ) {\n\t\t\tinit.scrollX = init.scrollX ? '100%' : '';\n\t\t}\n\t\n\t\t// Column search objects are in an array, so it needs to be converted\n\t\t// element by element\n\t\tvar searchCols = init.aoSearchCols;\n\t\n\t\tif ( searchCols ) {\n\t\t\tfor ( var i=0, ien=searchCols.length ; i<ien ; i++ ) {\n\t\t\t\tif ( searchCols[i] ) {\n\t\t\t\t\t_fnCamelToHungarian( DataTable.models.oSearch, searchCols[i] );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Provide backwards compatibility for column options. Note that the new options\n\t * are mapped onto the old parameters, so this is an external interface change\n\t * only.\n\t *  @param {object} init Object to map\n\t */\n\tfunction _fnCompatCols ( init )\n\t{\n\t\t_fnCompatMap( init, 'orderable',     'bSortable' );\n\t\t_fnCompatMap( init, 'orderData',     'aDataSort' );\n\t\t_fnCompatMap( init, 'orderSequence', 'asSorting' );\n\t\t_fnCompatMap( init, 'orderDataType', 'sortDataType' );\n\t\n\t\t// orderData can be given as an integer\n\t\tvar dataSort = init.aDataSort;\n\t\tif ( dataSort && ! $.isArray( dataSort ) ) {\n\t\t\tinit.aDataSort = [ dataSort ];\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Browser feature detection for capabilities, quirks\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnBrowserDetect( settings )\n\t{\n\t\t// We don't need to do this every time DataTables is constructed, the values\n\t\t// calculated are specific to the browser and OS configuration which we\n\t\t// don't expect to change between initialisations\n\t\tif ( ! DataTable.__browser ) {\n\t\t\tvar browser = {};\n\t\t\tDataTable.__browser = browser;\n\t\n\t\t\t// Scrolling feature / quirks detection\n\t\t\tvar n = $('<div/>')\n\t\t\t\t.css( {\n\t\t\t\t\tposition: 'fixed',\n\t\t\t\t\ttop: 0,\n\t\t\t\t\tleft: 0,\n\t\t\t\t\theight: 1,\n\t\t\t\t\twidth: 1,\n\t\t\t\t\toverflow: 'hidden'\n\t\t\t\t} )\n\t\t\t\t.append(\n\t\t\t\t\t$('<div/>')\n\t\t\t\t\t\t.css( {\n\t\t\t\t\t\t\tposition: 'absolute',\n\t\t\t\t\t\t\ttop: 1,\n\t\t\t\t\t\t\tleft: 1,\n\t\t\t\t\t\t\twidth: 100,\n\t\t\t\t\t\t\toverflow: 'scroll'\n\t\t\t\t\t\t} )\n\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t$('<div/>')\n\t\t\t\t\t\t\t\t.css( {\n\t\t\t\t\t\t\t\t\twidth: '100%',\n\t\t\t\t\t\t\t\t\theight: 10\n\t\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\t.appendTo( 'body' );\n\t\n\t\t\tvar outer = n.children();\n\t\t\tvar inner = outer.children();\n\t\n\t\t\t// Numbers below, in order, are:\n\t\t\t// inner.offsetWidth, inner.clientWidth, outer.offsetWidth, outer.clientWidth\n\t\t\t//\n\t\t\t// IE6 XP:                           100 100 100  83\n\t\t\t// IE7 Vista:                        100 100 100  83\n\t\t\t// IE 8+ Windows:                     83  83 100  83\n\t\t\t// Evergreen Windows:                 83  83 100  83\n\t\t\t// Evergreen Mac with scrollbars:     85  85 100  85\n\t\t\t// Evergreen Mac without scrollbars: 100 100 100 100\n\t\n\t\t\t// Get scrollbar width\n\t\t\tbrowser.barWidth = outer[0].offsetWidth - outer[0].clientWidth;\n\t\n\t\t\t// IE6/7 will oversize a width 100% element inside a scrolling element, to\n\t\t\t// include the width of the scrollbar, while other browsers ensure the inner\n\t\t\t// element is contained without forcing scrolling\n\t\t\tbrowser.bScrollOversize = inner[0].offsetWidth === 100 && outer[0].clientWidth !== 100;\n\t\n\t\t\t// In rtl text layout, some browsers (most, but not all) will place the\n\t\t\t// scrollbar on the left, rather than the right.\n\t\t\tbrowser.bScrollbarLeft = Math.round( inner.offset().left ) !== 1;\n\t\n\t\t\t// IE8- don't provide height and width for getBoundingClientRect\n\t\t\tbrowser.bBounding = n[0].getBoundingClientRect().width ? true : false;\n\t\n\t\t\tn.remove();\n\t\t}\n\t\n\t\t$.extend( settings.oBrowser, DataTable.__browser );\n\t\tsettings.oScroll.iBarWidth = DataTable.__browser.barWidth;\n\t}\n\t\n\t\n\t/**\n\t * Array.prototype reduce[Right] method, used for browsers which don't support\n\t * JS 1.6. Done this way to reduce code size, since we iterate either way\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnReduce ( that, fn, init, start, end, inc )\n\t{\n\t\tvar\n\t\t\ti = start,\n\t\t\tvalue,\n\t\t\tisSet = false;\n\t\n\t\tif ( init !== undefined ) {\n\t\t\tvalue = init;\n\t\t\tisSet = true;\n\t\t}\n\t\n\t\twhile ( i !== end ) {\n\t\t\tif ( ! that.hasOwnProperty(i) ) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\n\t\t\tvalue = isSet ?\n\t\t\t\tfn( value, that[i], i, that ) :\n\t\t\t\tthat[i];\n\t\n\t\t\tisSet = true;\n\t\t\ti += inc;\n\t\t}\n\t\n\t\treturn value;\n\t}\n\t\n\t/**\n\t * Add a column to the list used for the table with default values\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {node} nTh The th element for this column\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAddColumn( oSettings, nTh )\n\t{\n\t\t// Add column to aoColumns array\n\t\tvar oDefaults = DataTable.defaults.column;\n\t\tvar iCol = oSettings.aoColumns.length;\n\t\tvar oCol = $.extend( {}, DataTable.models.oColumn, oDefaults, {\n\t\t\t\"nTh\": nTh ? nTh : document.createElement('th'),\n\t\t\t\"sTitle\":    oDefaults.sTitle    ? oDefaults.sTitle    : nTh ? nTh.innerHTML : '',\n\t\t\t\"aDataSort\": oDefaults.aDataSort ? oDefaults.aDataSort : [iCol],\n\t\t\t\"mData\": oDefaults.mData ? oDefaults.mData : iCol,\n\t\t\tidx: iCol\n\t\t} );\n\t\toSettings.aoColumns.push( oCol );\n\t\n\t\t// Add search object for column specific search. Note that the `searchCols[ iCol ]`\n\t\t// passed into extend can be undefined. This allows the user to give a default\n\t\t// with only some of the parameters defined, and also not give a default\n\t\tvar searchCols = oSettings.aoPreSearchCols;\n\t\tsearchCols[ iCol ] = $.extend( {}, DataTable.models.oSearch, searchCols[ iCol ] );\n\t\n\t\t// Use the default column options function to initialise classes etc\n\t\t_fnColumnOptions( oSettings, iCol, $(nTh).data() );\n\t}\n\t\n\t\n\t/**\n\t * Apply options for a column\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {int} iCol column index to consider\n\t *  @param {object} oOptions object with sType, bVisible and bSearchable etc\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnColumnOptions( oSettings, iCol, oOptions )\n\t{\n\t\tvar oCol = oSettings.aoColumns[ iCol ];\n\t\tvar oClasses = oSettings.oClasses;\n\t\tvar th = $(oCol.nTh);\n\t\n\t\t// Try to get width information from the DOM. We can't get it from CSS\n\t\t// as we'd need to parse the CSS stylesheet. `width` option can override\n\t\tif ( ! oCol.sWidthOrig ) {\n\t\t\t// Width attribute\n\t\t\toCol.sWidthOrig = th.attr('width') || null;\n\t\n\t\t\t// Style attribute\n\t\t\tvar t = (th.attr('style') || '').match(/width:\\s*(\\d+[pxem%]+)/);\n\t\t\tif ( t ) {\n\t\t\t\toCol.sWidthOrig = t[1];\n\t\t\t}\n\t\t}\n\t\n\t\t/* User specified column options */\n\t\tif ( oOptions !== undefined && oOptions !== null )\n\t\t{\n\t\t\t// Backwards compatibility\n\t\t\t_fnCompatCols( oOptions );\n\t\n\t\t\t// Map camel case parameters to their Hungarian counterparts\n\t\t\t_fnCamelToHungarian( DataTable.defaults.column, oOptions );\n\t\n\t\t\t/* Backwards compatibility for mDataProp */\n\t\t\tif ( oOptions.mDataProp !== undefined && !oOptions.mData )\n\t\t\t{\n\t\t\t\toOptions.mData = oOptions.mDataProp;\n\t\t\t}\n\t\n\t\t\tif ( oOptions.sType )\n\t\t\t{\n\t\t\t\toCol._sManualType = oOptions.sType;\n\t\t\t}\n\t\n\t\t\t// `class` is a reserved word in Javascript, so we need to provide\n\t\t\t// the ability to use a valid name for the camel case input\n\t\t\tif ( oOptions.className && ! oOptions.sClass )\n\t\t\t{\n\t\t\t\toOptions.sClass = oOptions.className;\n\t\t\t}\n\t\n\t\t\t$.extend( oCol, oOptions );\n\t\t\t_fnMap( oCol, oOptions, \"sWidth\", \"sWidthOrig\" );\n\t\n\t\t\t/* iDataSort to be applied (backwards compatibility), but aDataSort will take\n\t\t\t * priority if defined\n\t\t\t */\n\t\t\tif ( oOptions.iDataSort !== undefined )\n\t\t\t{\n\t\t\t\toCol.aDataSort = [ oOptions.iDataSort ];\n\t\t\t}\n\t\t\t_fnMap( oCol, oOptions, \"aDataSort\" );\n\t\t}\n\t\n\t\t/* Cache the data get and set functions for speed */\n\t\tvar mDataSrc = oCol.mData;\n\t\tvar mData = _fnGetObjectDataFn( mDataSrc );\n\t\tvar mRender = oCol.mRender ? _fnGetObjectDataFn( oCol.mRender ) : null;\n\t\n\t\tvar attrTest = function( src ) {\n\t\t\treturn typeof src === 'string' && src.indexOf('@') !== -1;\n\t\t};\n\t\toCol._bAttrSrc = $.isPlainObject( mDataSrc ) && (\n\t\t\tattrTest(mDataSrc.sort) || attrTest(mDataSrc.type) || attrTest(mDataSrc.filter)\n\t\t);\n\t\toCol._setter = null;\n\t\n\t\toCol.fnGetData = function (rowData, type, meta) {\n\t\t\tvar innerData = mData( rowData, type, undefined, meta );\n\t\n\t\t\treturn mRender && type ?\n\t\t\t\tmRender( innerData, type, rowData, meta ) :\n\t\t\t\tinnerData;\n\t\t};\n\t\toCol.fnSetData = function ( rowData, val, meta ) {\n\t\t\treturn _fnSetObjectDataFn( mDataSrc )( rowData, val, meta );\n\t\t};\n\t\n\t\t// Indicate if DataTables should read DOM data as an object or array\n\t\t// Used in _fnGetRowElements\n\t\tif ( typeof mDataSrc !== 'number' ) {\n\t\t\toSettings._rowReadObject = true;\n\t\t}\n\t\n\t\t/* Feature sorting overrides column specific when off */\n\t\tif ( !oSettings.oFeatures.bSort )\n\t\t{\n\t\t\toCol.bSortable = false;\n\t\t\tth.addClass( oClasses.sSortableNone ); // Have to add class here as order event isn't called\n\t\t}\n\t\n\t\t/* Check that the class assignment is correct for sorting */\n\t\tvar bAsc = $.inArray('asc', oCol.asSorting) !== -1;\n\t\tvar bDesc = $.inArray('desc', oCol.asSorting) !== -1;\n\t\tif ( !oCol.bSortable || (!bAsc && !bDesc) )\n\t\t{\n\t\t\toCol.sSortingClass = oClasses.sSortableNone;\n\t\t\toCol.sSortingClassJUI = \"\";\n\t\t}\n\t\telse if ( bAsc && !bDesc )\n\t\t{\n\t\t\toCol.sSortingClass = oClasses.sSortableAsc;\n\t\t\toCol.sSortingClassJUI = oClasses.sSortJUIAscAllowed;\n\t\t}\n\t\telse if ( !bAsc && bDesc )\n\t\t{\n\t\t\toCol.sSortingClass = oClasses.sSortableDesc;\n\t\t\toCol.sSortingClassJUI = oClasses.sSortJUIDescAllowed;\n\t\t}\n\t\telse\n\t\t{\n\t\t\toCol.sSortingClass = oClasses.sSortable;\n\t\t\toCol.sSortingClassJUI = oClasses.sSortJUI;\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Adjust the table column widths for new data. Note: you would probably want to\n\t * do a redraw after calling this function!\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAdjustColumnSizing ( settings )\n\t{\n\t\t/* Not interested in doing column width calculation if auto-width is disabled */\n\t\tif ( settings.oFeatures.bAutoWidth !== false )\n\t\t{\n\t\t\tvar columns = settings.aoColumns;\n\t\n\t\t\t_fnCalculateColumnWidths( settings );\n\t\t\tfor ( var i=0 , iLen=columns.length ; i<iLen ; i++ )\n\t\t\t{\n\t\t\t\tcolumns[i].nTh.style.width = columns[i].sWidth;\n\t\t\t}\n\t\t}\n\t\n\t\tvar scroll = settings.oScroll;\n\t\tif ( scroll.sY !== '' || scroll.sX !== '')\n\t\t{\n\t\t\t_fnScrollDraw( settings );\n\t\t}\n\t\n\t\t_fnCallbackFire( settings, null, 'column-sizing', [settings] );\n\t}\n\t\n\t\n\t/**\n\t * Covert the index of a visible column to the index in the data array (take account\n\t * of hidden columns)\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {int} iMatch Visible column index to lookup\n\t *  @returns {int} i the data index\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnVisibleToColumnIndex( oSettings, iMatch )\n\t{\n\t\tvar aiVis = _fnGetColumns( oSettings, 'bVisible' );\n\t\n\t\treturn typeof aiVis[iMatch] === 'number' ?\n\t\t\taiVis[iMatch] :\n\t\t\tnull;\n\t}\n\t\n\t\n\t/**\n\t * Covert the index of an index in the data array and convert it to the visible\n\t *   column index (take account of hidden columns)\n\t *  @param {int} iMatch Column index to lookup\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns {int} i the data index\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnColumnIndexToVisible( oSettings, iMatch )\n\t{\n\t\tvar aiVis = _fnGetColumns( oSettings, 'bVisible' );\n\t\tvar iPos = $.inArray( iMatch, aiVis );\n\t\n\t\treturn iPos !== -1 ? iPos : null;\n\t}\n\t\n\t\n\t/**\n\t * Get the number of visible columns\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns {int} i the number of visible columns\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnVisbleColumns( oSettings )\n\t{\n\t\tvar vis = 0;\n\t\n\t\t// No reduce in IE8, use a loop for now\n\t\t$.each( oSettings.aoColumns, function ( i, col ) {\n\t\t\tif ( col.bVisible && $(col.nTh).css('display') !== 'none' ) {\n\t\t\t\tvis++;\n\t\t\t}\n\t\t} );\n\t\n\t\treturn vis;\n\t}\n\t\n\t\n\t/**\n\t * Get an array of column indexes that match a given property\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {string} sParam Parameter in aoColumns to look for - typically\n\t *    bVisible or bSearchable\n\t *  @returns {array} Array of indexes with matched properties\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetColumns( oSettings, sParam )\n\t{\n\t\tvar a = [];\n\t\n\t\t$.map( oSettings.aoColumns, function(val, i) {\n\t\t\tif ( val[sParam] ) {\n\t\t\t\ta.push( i );\n\t\t\t}\n\t\t} );\n\t\n\t\treturn a;\n\t}\n\t\n\t\n\t/**\n\t * Calculate the 'type' of a column\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnColumnTypes ( settings )\n\t{\n\t\tvar columns = settings.aoColumns;\n\t\tvar data = settings.aoData;\n\t\tvar types = DataTable.ext.type.detect;\n\t\tvar i, ien, j, jen, k, ken;\n\t\tvar col, cell, detectedType, cache;\n\t\n\t\t// For each column, spin over the \n\t\tfor ( i=0, ien=columns.length ; i<ien ; i++ ) {\n\t\t\tcol = columns[i];\n\t\t\tcache = [];\n\t\n\t\t\tif ( ! col.sType && col._sManualType ) {\n\t\t\t\tcol.sType = col._sManualType;\n\t\t\t}\n\t\t\telse if ( ! col.sType ) {\n\t\t\t\tfor ( j=0, jen=types.length ; j<jen ; j++ ) {\n\t\t\t\t\tfor ( k=0, ken=data.length ; k<ken ; k++ ) {\n\t\t\t\t\t\t// Use a cache array so we only need to get the type data\n\t\t\t\t\t\t// from the formatter once (when using multiple detectors)\n\t\t\t\t\t\tif ( cache[k] === undefined ) {\n\t\t\t\t\t\t\tcache[k] = _fnGetCellData( settings, k, i, 'type' );\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\tdetectedType = types[j]( cache[k], settings );\n\t\n\t\t\t\t\t\t// If null, then this type can't apply to this column, so\n\t\t\t\t\t\t// rather than testing all cells, break out. There is an\n\t\t\t\t\t\t// exception for the last type which is `html`. We need to\n\t\t\t\t\t\t// scan all rows since it is possible to mix string and HTML\n\t\t\t\t\t\t// types\n\t\t\t\t\t\tif ( ! detectedType && j !== types.length-1 ) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t// Only a single match is needed for html type since it is\n\t\t\t\t\t\t// bottom of the pile and very similar to string\n\t\t\t\t\t\tif ( detectedType === 'html' ) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\n\t\t\t\t\t// Type is valid for all data points in the column - use this\n\t\t\t\t\t// type\n\t\t\t\t\tif ( detectedType ) {\n\t\t\t\t\t\tcol.sType = detectedType;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\t// Fall back - if no type was detected, always use string\n\t\t\t\tif ( ! col.sType ) {\n\t\t\t\t\tcol.sType = 'string';\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Take the column definitions and static columns arrays and calculate how\n\t * they relate to column indexes. The callback function will then apply the\n\t * definition found for a column to a suitable configuration object.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {array} aoColDefs The aoColumnDefs array that is to be applied\n\t *  @param {array} aoCols The aoColumns array that defines columns individually\n\t *  @param {function} fn Callback function - takes two parameters, the calculated\n\t *    column index and the definition for that column.\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnApplyColumnDefs( oSettings, aoColDefs, aoCols, fn )\n\t{\n\t\tvar i, iLen, j, jLen, k, kLen, def;\n\t\tvar columns = oSettings.aoColumns;\n\t\n\t\t// Column definitions with aTargets\n\t\tif ( aoColDefs )\n\t\t{\n\t\t\t/* Loop over the definitions array - loop in reverse so first instance has priority */\n\t\t\tfor ( i=aoColDefs.length-1 ; i>=0 ; i-- )\n\t\t\t{\n\t\t\t\tdef = aoColDefs[i];\n\t\n\t\t\t\t/* Each definition can target multiple columns, as it is an array */\n\t\t\t\tvar aTargets = def.targets !== undefined ?\n\t\t\t\t\tdef.targets :\n\t\t\t\t\tdef.aTargets;\n\t\n\t\t\t\tif ( ! $.isArray( aTargets ) )\n\t\t\t\t{\n\t\t\t\t\taTargets = [ aTargets ];\n\t\t\t\t}\n\t\n\t\t\t\tfor ( j=0, jLen=aTargets.length ; j<jLen ; j++ )\n\t\t\t\t{\n\t\t\t\t\tif ( typeof aTargets[j] === 'number' && aTargets[j] >= 0 )\n\t\t\t\t\t{\n\t\t\t\t\t\t/* Add columns that we don't yet know about */\n\t\t\t\t\t\twhile( columns.length <= aTargets[j] )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t_fnAddColumn( oSettings );\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t/* Integer, basic index */\n\t\t\t\t\t\tfn( aTargets[j], def );\n\t\t\t\t\t}\n\t\t\t\t\telse if ( typeof aTargets[j] === 'number' && aTargets[j] < 0 )\n\t\t\t\t\t{\n\t\t\t\t\t\t/* Negative integer, right to left column counting */\n\t\t\t\t\t\tfn( columns.length+aTargets[j], def );\n\t\t\t\t\t}\n\t\t\t\t\telse if ( typeof aTargets[j] === 'string' )\n\t\t\t\t\t{\n\t\t\t\t\t\t/* Class name matching on TH element */\n\t\t\t\t\t\tfor ( k=0, kLen=columns.length ; k<kLen ; k++ )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif ( aTargets[j] == \"_all\" ||\n\t\t\t\t\t\t\t     $(columns[k].nTh).hasClass( aTargets[j] ) )\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tfn( k, def );\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\t// Statically defined columns array\n\t\tif ( aoCols )\n\t\t{\n\t\t\tfor ( i=0, iLen=aoCols.length ; i<iLen ; i++ )\n\t\t\t{\n\t\t\t\tfn( i, aoCols[i] );\n\t\t\t}\n\t\t}\n\t}\n\t\n\t/**\n\t * Add a data array to the table, creating DOM node etc. This is the parallel to\n\t * _fnGatherData, but for adding rows from a Javascript source, rather than a\n\t * DOM source.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {array} aData data array to be added\n\t *  @param {node} [nTr] TR element to add to the table - optional. If not given,\n\t *    DataTables will create a row automatically\n\t *  @param {array} [anTds] Array of TD|TH elements for the row - must be given\n\t *    if nTr is.\n\t *  @returns {int} >=0 if successful (index of new aoData entry), -1 if failed\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAddData ( oSettings, aDataIn, nTr, anTds )\n\t{\n\t\t/* Create the object for storing information about this new row */\n\t\tvar iRow = oSettings.aoData.length;\n\t\tvar oData = $.extend( true, {}, DataTable.models.oRow, {\n\t\t\tsrc: nTr ? 'dom' : 'data',\n\t\t\tidx: iRow\n\t\t} );\n\t\n\t\toData._aData = aDataIn;\n\t\toSettings.aoData.push( oData );\n\t\n\t\t/* Create the cells */\n\t\tvar nTd, sThisType;\n\t\tvar columns = oSettings.aoColumns;\n\t\n\t\t// Invalidate the column types as the new data needs to be revalidated\n\t\tfor ( var i=0, iLen=columns.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tcolumns[i].sType = null;\n\t\t}\n\t\n\t\t/* Add to the display array */\n\t\toSettings.aiDisplayMaster.push( iRow );\n\t\n\t\tvar id = oSettings.rowIdFn( aDataIn );\n\t\tif ( id !== undefined ) {\n\t\t\toSettings.aIds[ id ] = oData;\n\t\t}\n\t\n\t\t/* Create the DOM information, or register it if already present */\n\t\tif ( nTr || ! oSettings.oFeatures.bDeferRender )\n\t\t{\n\t\t\t_fnCreateTr( oSettings, iRow, nTr, anTds );\n\t\t}\n\t\n\t\treturn iRow;\n\t}\n\t\n\t\n\t/**\n\t * Add one or more TR elements to the table. Generally we'd expect to\n\t * use this for reading data from a DOM sourced table, but it could be\n\t * used for an TR element. Note that if a TR is given, it is used (i.e.\n\t * it is not cloned).\n\t *  @param {object} settings dataTables settings object\n\t *  @param {array|node|jQuery} trs The TR element(s) to add to the table\n\t *  @returns {array} Array of indexes for the added rows\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAddTr( settings, trs )\n\t{\n\t\tvar row;\n\t\n\t\t// Allow an individual node to be passed in\n\t\tif ( ! (trs instanceof $) ) {\n\t\t\ttrs = $(trs);\n\t\t}\n\t\n\t\treturn trs.map( function (i, el) {\n\t\t\trow = _fnGetRowElements( settings, el );\n\t\t\treturn _fnAddData( settings, row.data, el, row.cells );\n\t\t} );\n\t}\n\t\n\t\n\t/**\n\t * Take a TR element and convert it to an index in aoData\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {node} n the TR element to find\n\t *  @returns {int} index if the node is found, null if not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnNodeToDataIndex( oSettings, n )\n\t{\n\t\treturn (n._DT_RowIndex!==undefined) ? n._DT_RowIndex : null;\n\t}\n\t\n\t\n\t/**\n\t * Take a TD element and convert it into a column data index (not the visible index)\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {int} iRow The row number the TD/TH can be found in\n\t *  @param {node} n The TD/TH element to find\n\t *  @returns {int} index if the node is found, -1 if not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnNodeToColumnIndex( oSettings, iRow, n )\n\t{\n\t\treturn $.inArray( n, oSettings.aoData[ iRow ].anCells );\n\t}\n\t\n\t\n\t/**\n\t * Get the data for a given cell from the internal cache, taking into account data mapping\n\t *  @param {object} settings dataTables settings object\n\t *  @param {int} rowIdx aoData row id\n\t *  @param {int} colIdx Column index\n\t *  @param {string} type data get type ('display', 'type' 'filter' 'sort')\n\t *  @returns {*} Cell data\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetCellData( settings, rowIdx, colIdx, type )\n\t{\n\t\tvar draw           = settings.iDraw;\n\t\tvar col            = settings.aoColumns[colIdx];\n\t\tvar rowData        = settings.aoData[rowIdx]._aData;\n\t\tvar defaultContent = col.sDefaultContent;\n\t\tvar cellData       = col.fnGetData( rowData, type, {\n\t\t\tsettings: settings,\n\t\t\trow:      rowIdx,\n\t\t\tcol:      colIdx\n\t\t} );\n\t\n\t\tif ( cellData === undefined ) {\n\t\t\tif ( settings.iDrawError != draw && defaultContent === null ) {\n\t\t\t\t_fnLog( settings, 0, \"Requested unknown parameter \"+\n\t\t\t\t\t(typeof col.mData=='function' ? '{function}' : \"'\"+col.mData+\"'\")+\n\t\t\t\t\t\" for row \"+rowIdx+\", column \"+colIdx, 4 );\n\t\t\t\tsettings.iDrawError = draw;\n\t\t\t}\n\t\t\treturn defaultContent;\n\t\t}\n\t\n\t\t// When the data source is null and a specific data type is requested (i.e.\n\t\t// not the original data), we can use default column data\n\t\tif ( (cellData === rowData || cellData === null) && defaultContent !== null && type !== undefined ) {\n\t\t\tcellData = defaultContent;\n\t\t}\n\t\telse if ( typeof cellData === 'function' ) {\n\t\t\t// If the data source is a function, then we run it and use the return,\n\t\t\t// executing in the scope of the data object (for instances)\n\t\t\treturn cellData.call( rowData );\n\t\t}\n\t\n\t\tif ( cellData === null && type == 'display' ) {\n\t\t\treturn '';\n\t\t}\n\t\treturn cellData;\n\t}\n\t\n\t\n\t/**\n\t * Set the value for a specific cell, into the internal data cache\n\t *  @param {object} settings dataTables settings object\n\t *  @param {int} rowIdx aoData row id\n\t *  @param {int} colIdx Column index\n\t *  @param {*} val Value to set\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSetCellData( settings, rowIdx, colIdx, val )\n\t{\n\t\tvar col     = settings.aoColumns[colIdx];\n\t\tvar rowData = settings.aoData[rowIdx]._aData;\n\t\n\t\tcol.fnSetData( rowData, val, {\n\t\t\tsettings: settings,\n\t\t\trow:      rowIdx,\n\t\t\tcol:      colIdx\n\t\t}  );\n\t}\n\t\n\t\n\t// Private variable that is used to match action syntax in the data property object\n\tvar __reArray = /\\[.*?\\]$/;\n\tvar __reFn = /\\(\\)$/;\n\t\n\t/**\n\t * Split string on periods, taking into account escaped periods\n\t * @param  {string} str String to split\n\t * @return {array} Split string\n\t */\n\tfunction _fnSplitObjNotation( str )\n\t{\n\t\treturn $.map( str.match(/(\\\\.|[^\\.])+/g) || [''], function ( s ) {\n\t\t\treturn s.replace(/\\\\./g, '.');\n\t\t} );\n\t}\n\t\n\t\n\t/**\n\t * Return a function that can be used to get data from a source object, taking\n\t * into account the ability to use nested objects as a source\n\t *  @param {string|int|function} mSource The data source for the object\n\t *  @returns {function} Data get function\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetObjectDataFn( mSource )\n\t{\n\t\tif ( $.isPlainObject( mSource ) )\n\t\t{\n\t\t\t/* Build an object of get functions, and wrap them in a single call */\n\t\t\tvar o = {};\n\t\t\t$.each( mSource, function (key, val) {\n\t\t\t\tif ( val ) {\n\t\t\t\t\to[key] = _fnGetObjectDataFn( val );\n\t\t\t\t}\n\t\t\t} );\n\t\n\t\t\treturn function (data, type, row, meta) {\n\t\t\t\tvar t = o[type] || o._;\n\t\t\t\treturn t !== undefined ?\n\t\t\t\t\tt(data, type, row, meta) :\n\t\t\t\t\tdata;\n\t\t\t};\n\t\t}\n\t\telse if ( mSource === null )\n\t\t{\n\t\t\t/* Give an empty string for rendering / sorting etc */\n\t\t\treturn function (data) { // type, row and meta also passed, but not used\n\t\t\t\treturn data;\n\t\t\t};\n\t\t}\n\t\telse if ( typeof mSource === 'function' )\n\t\t{\n\t\t\treturn function (data, type, row, meta) {\n\t\t\t\treturn mSource( data, type, row, meta );\n\t\t\t};\n\t\t}\n\t\telse if ( typeof mSource === 'string' && (mSource.indexOf('.') !== -1 ||\n\t\t\t      mSource.indexOf('[') !== -1 || mSource.indexOf('(') !== -1) )\n\t\t{\n\t\t\t/* If there is a . in the source string then the data source is in a\n\t\t\t * nested object so we loop over the data for each level to get the next\n\t\t\t * level down. On each loop we test for undefined, and if found immediately\n\t\t\t * return. This allows entire objects to be missing and sDefaultContent to\n\t\t\t * be used if defined, rather than throwing an error\n\t\t\t */\n\t\t\tvar fetchData = function (data, type, src) {\n\t\t\t\tvar arrayNotation, funcNotation, out, innerSrc;\n\t\n\t\t\t\tif ( src !== \"\" )\n\t\t\t\t{\n\t\t\t\t\tvar a = _fnSplitObjNotation( src );\n\t\n\t\t\t\t\tfor ( var i=0, iLen=a.length ; i<iLen ; i++ )\n\t\t\t\t\t{\n\t\t\t\t\t\t// Check if we are dealing with special notation\n\t\t\t\t\t\tarrayNotation = a[i].match(__reArray);\n\t\t\t\t\t\tfuncNotation = a[i].match(__reFn);\n\t\n\t\t\t\t\t\tif ( arrayNotation )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Array notation\n\t\t\t\t\t\t\ta[i] = a[i].replace(__reArray, '');\n\t\n\t\t\t\t\t\t\t// Condition allows simply [] to be passed in\n\t\t\t\t\t\t\tif ( a[i] !== \"\" ) {\n\t\t\t\t\t\t\t\tdata = data[ a[i] ];\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tout = [];\n\t\n\t\t\t\t\t\t\t// Get the remainder of the nested object to get\n\t\t\t\t\t\t\ta.splice( 0, i+1 );\n\t\t\t\t\t\t\tinnerSrc = a.join('.');\n\t\n\t\t\t\t\t\t\t// Traverse each entry in the array getting the properties requested\n\t\t\t\t\t\t\tif ( $.isArray( data ) ) {\n\t\t\t\t\t\t\t\tfor ( var j=0, jLen=data.length ; j<jLen ; j++ ) {\n\t\t\t\t\t\t\t\t\tout.push( fetchData( data[j], type, innerSrc ) );\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t\t// If a string is given in between the array notation indicators, that\n\t\t\t\t\t\t\t// is used to join the strings together, otherwise an array is returned\n\t\t\t\t\t\t\tvar join = arrayNotation[0].substring(1, arrayNotation[0].length-1);\n\t\t\t\t\t\t\tdata = (join===\"\") ? out : out.join(join);\n\t\n\t\t\t\t\t\t\t// The inner call to fetchData has already traversed through the remainder\n\t\t\t\t\t\t\t// of the source requested, so we exit from the loop\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if ( funcNotation )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Function call\n\t\t\t\t\t\t\ta[i] = a[i].replace(__reFn, '');\n\t\t\t\t\t\t\tdata = data[ a[i] ]();\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\tif ( data === null || data[ a[i] ] === undefined )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\treturn undefined;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdata = data[ a[i] ];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\treturn data;\n\t\t\t};\n\t\n\t\t\treturn function (data, type) { // row and meta also passed, but not used\n\t\t\t\treturn fetchData( data, type, mSource );\n\t\t\t};\n\t\t}\n\t\telse\n\t\t{\n\t\t\t/* Array or flat object mapping */\n\t\t\treturn function (data, type) { // row and meta also passed, but not used\n\t\t\t\treturn data[mSource];\n\t\t\t};\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Return a function that can be used to set data from a source object, taking\n\t * into account the ability to use nested objects as a source\n\t *  @param {string|int|function} mSource The data source for the object\n\t *  @returns {function} Data set function\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSetObjectDataFn( mSource )\n\t{\n\t\tif ( $.isPlainObject( mSource ) )\n\t\t{\n\t\t\t/* Unlike get, only the underscore (global) option is used for for\n\t\t\t * setting data since we don't know the type here. This is why an object\n\t\t\t * option is not documented for `mData` (which is read/write), but it is\n\t\t\t * for `mRender` which is read only.\n\t\t\t */\n\t\t\treturn _fnSetObjectDataFn( mSource._ );\n\t\t}\n\t\telse if ( mSource === null )\n\t\t{\n\t\t\t/* Nothing to do when the data source is null */\n\t\t\treturn function () {};\n\t\t}\n\t\telse if ( typeof mSource === 'function' )\n\t\t{\n\t\t\treturn function (data, val, meta) {\n\t\t\t\tmSource( data, 'set', val, meta );\n\t\t\t};\n\t\t}\n\t\telse if ( typeof mSource === 'string' && (mSource.indexOf('.') !== -1 ||\n\t\t\t      mSource.indexOf('[') !== -1 || mSource.indexOf('(') !== -1) )\n\t\t{\n\t\t\t/* Like the get, we need to get data from a nested object */\n\t\t\tvar setData = function (data, val, src) {\n\t\t\t\tvar a = _fnSplitObjNotation( src ), b;\n\t\t\t\tvar aLast = a[a.length-1];\n\t\t\t\tvar arrayNotation, funcNotation, o, innerSrc;\n\t\n\t\t\t\tfor ( var i=0, iLen=a.length-1 ; i<iLen ; i++ )\n\t\t\t\t{\n\t\t\t\t\t// Check if we are dealing with an array notation request\n\t\t\t\t\tarrayNotation = a[i].match(__reArray);\n\t\t\t\t\tfuncNotation = a[i].match(__reFn);\n\t\n\t\t\t\t\tif ( arrayNotation )\n\t\t\t\t\t{\n\t\t\t\t\t\ta[i] = a[i].replace(__reArray, '');\n\t\t\t\t\t\tdata[ a[i] ] = [];\n\t\n\t\t\t\t\t\t// Get the remainder of the nested object to set so we can recurse\n\t\t\t\t\t\tb = a.slice();\n\t\t\t\t\t\tb.splice( 0, i+1 );\n\t\t\t\t\t\tinnerSrc = b.join('.');\n\t\n\t\t\t\t\t\t// Traverse each entry in the array setting the properties requested\n\t\t\t\t\t\tif ( $.isArray( val ) )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfor ( var j=0, jLen=val.length ; j<jLen ; j++ )\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\to = {};\n\t\t\t\t\t\t\t\tsetData( o, val[j], innerSrc );\n\t\t\t\t\t\t\t\tdata[ a[i] ].push( o );\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// We've been asked to save data to an array, but it\n\t\t\t\t\t\t\t// isn't array data to be saved. Best that can be done\n\t\t\t\t\t\t\t// is to just save the value.\n\t\t\t\t\t\t\tdata[ a[i] ] = val;\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t// The inner call to setData has already traversed through the remainder\n\t\t\t\t\t\t// of the source and has set the data, thus we can exit here\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\t\t\t\telse if ( funcNotation )\n\t\t\t\t\t{\n\t\t\t\t\t\t// Function call\n\t\t\t\t\t\ta[i] = a[i].replace(__reFn, '');\n\t\t\t\t\t\tdata = data[ a[i] ]( val );\n\t\t\t\t\t}\n\t\n\t\t\t\t\t// If the nested object doesn't currently exist - since we are\n\t\t\t\t\t// trying to set the value - create it\n\t\t\t\t\tif ( data[ a[i] ] === null || data[ a[i] ] === undefined )\n\t\t\t\t\t{\n\t\t\t\t\t\tdata[ a[i] ] = {};\n\t\t\t\t\t}\n\t\t\t\t\tdata = data[ a[i] ];\n\t\t\t\t}\n\t\n\t\t\t\t// Last item in the input - i.e, the actual set\n\t\t\t\tif ( aLast.match(__reFn ) )\n\t\t\t\t{\n\t\t\t\t\t// Function call\n\t\t\t\t\tdata = data[ aLast.replace(__reFn, '') ]( val );\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\t// If array notation is used, we just want to strip it and use the property name\n\t\t\t\t\t// and assign the value. If it isn't used, then we get the result we want anyway\n\t\t\t\t\tdata[ aLast.replace(__reArray, '') ] = val;\n\t\t\t\t}\n\t\t\t};\n\t\n\t\t\treturn function (data, val) { // meta is also passed in, but not used\n\t\t\t\treturn setData( data, val, mSource );\n\t\t\t};\n\t\t}\n\t\telse\n\t\t{\n\t\t\t/* Array or flat object mapping */\n\t\t\treturn function (data, val) { // meta is also passed in, but not used\n\t\t\t\tdata[mSource] = val;\n\t\t\t};\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Return an array with the full table data\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns array {array} aData Master data array\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetDataMaster ( settings )\n\t{\n\t\treturn _pluck( settings.aoData, '_aData' );\n\t}\n\t\n\t\n\t/**\n\t * Nuke the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnClearTable( settings )\n\t{\n\t\tsettings.aoData.length = 0;\n\t\tsettings.aiDisplayMaster.length = 0;\n\t\tsettings.aiDisplay.length = 0;\n\t\tsettings.aIds = {};\n\t}\n\t\n\t\n\t /**\n\t * Take an array of integers (index array) and remove a target integer (value - not\n\t * the key!)\n\t *  @param {array} a Index array to target\n\t *  @param {int} iTarget value to find\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnDeleteIndex( a, iTarget, splice )\n\t{\n\t\tvar iTargetIndex = -1;\n\t\n\t\tfor ( var i=0, iLen=a.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tif ( a[i] == iTarget )\n\t\t\t{\n\t\t\t\tiTargetIndex = i;\n\t\t\t}\n\t\t\telse if ( a[i] > iTarget )\n\t\t\t{\n\t\t\t\ta[i]--;\n\t\t\t}\n\t\t}\n\t\n\t\tif ( iTargetIndex != -1 && splice === undefined )\n\t\t{\n\t\t\ta.splice( iTargetIndex, 1 );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Mark cached data as invalid such that a re-read of the data will occur when\n\t * the cached data is next requested. Also update from the data source object.\n\t *\n\t * @param {object} settings DataTables settings object\n\t * @param {int}    rowIdx   Row index to invalidate\n\t * @param {string} [src]    Source to invalidate from: undefined, 'auto', 'dom'\n\t *     or 'data'\n\t * @param {int}    [colIdx] Column index to invalidate. If undefined the whole\n\t *     row will be invalidated\n\t * @memberof DataTable#oApi\n\t *\n\t * @todo For the modularisation of v1.11 this will need to become a callback, so\n\t *   the sort and filter methods can subscribe to it. That will required\n\t *   initialisation options for sorting, which is why it is not already baked in\n\t */\n\tfunction _fnInvalidate( settings, rowIdx, src, colIdx )\n\t{\n\t\tvar row = settings.aoData[ rowIdx ];\n\t\tvar i, ien;\n\t\tvar cellWrite = function ( cell, col ) {\n\t\t\t// This is very frustrating, but in IE if you just write directly\n\t\t\t// to innerHTML, and elements that are overwritten are GC'ed,\n\t\t\t// even if there is a reference to them elsewhere\n\t\t\twhile ( cell.childNodes.length ) {\n\t\t\t\tcell.removeChild( cell.firstChild );\n\t\t\t}\n\t\n\t\t\tcell.innerHTML = _fnGetCellData( settings, rowIdx, col, 'display' );\n\t\t};\n\t\n\t\t// Are we reading last data from DOM or the data object?\n\t\tif ( src === 'dom' || ((! src || src === 'auto') && row.src === 'dom') ) {\n\t\t\t// Read the data from the DOM\n\t\t\trow._aData = _fnGetRowElements(\n\t\t\t\t\tsettings, row, colIdx, colIdx === undefined ? undefined : row._aData\n\t\t\t\t)\n\t\t\t\t.data;\n\t\t}\n\t\telse {\n\t\t\t// Reading from data object, update the DOM\n\t\t\tvar cells = row.anCells;\n\t\n\t\t\tif ( cells ) {\n\t\t\t\tif ( colIdx !== undefined ) {\n\t\t\t\t\tcellWrite( cells[colIdx], colIdx );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfor ( i=0, ien=cells.length ; i<ien ; i++ ) {\n\t\t\t\t\t\tcellWrite( cells[i], i );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\t// For both row and cell invalidation, the cached data for sorting and\n\t\t// filtering is nulled out\n\t\trow._aSortData = null;\n\t\trow._aFilterData = null;\n\t\n\t\t// Invalidate the type for a specific column (if given) or all columns since\n\t\t// the data might have changed\n\t\tvar cols = settings.aoColumns;\n\t\tif ( colIdx !== undefined ) {\n\t\t\tcols[ colIdx ].sType = null;\n\t\t}\n\t\telse {\n\t\t\tfor ( i=0, ien=cols.length ; i<ien ; i++ ) {\n\t\t\t\tcols[i].sType = null;\n\t\t\t}\n\t\n\t\t\t// Update DataTables special `DT_*` attributes for the row\n\t\t\t_fnRowAttributes( settings, row );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Build a data source object from an HTML row, reading the contents of the\n\t * cells that are in the row.\n\t *\n\t * @param {object} settings DataTables settings object\n\t * @param {node|object} TR element from which to read data or existing row\n\t *   object from which to re-read the data from the cells\n\t * @param {int} [colIdx] Optional column index\n\t * @param {array|object} [d] Data source object. If `colIdx` is given then this\n\t *   parameter should also be given and will be used to write the data into.\n\t *   Only the column in question will be written\n\t * @returns {object} Object with two parameters: `data` the data read, in\n\t *   document order, and `cells` and array of nodes (they can be useful to the\n\t *   caller, so rather than needing a second traversal to get them, just return\n\t *   them from here).\n\t * @memberof DataTable#oApi\n\t */\n\tfunction _fnGetRowElements( settings, row, colIdx, d )\n\t{\n\t\tvar\n\t\t\ttds = [],\n\t\t\ttd = row.firstChild,\n\t\t\tname, col, o, i=0, contents,\n\t\t\tcolumns = settings.aoColumns,\n\t\t\tobjectRead = settings._rowReadObject;\n\t\n\t\t// Allow the data object to be passed in, or construct\n\t\td = d !== undefined ?\n\t\t\td :\n\t\t\tobjectRead ?\n\t\t\t\t{} :\n\t\t\t\t[];\n\t\n\t\tvar attr = function ( str, td  ) {\n\t\t\tif ( typeof str === 'string' ) {\n\t\t\t\tvar idx = str.indexOf('@');\n\t\n\t\t\t\tif ( idx !== -1 ) {\n\t\t\t\t\tvar attr = str.substring( idx+1 );\n\t\t\t\t\tvar setter = _fnSetObjectDataFn( str );\n\t\t\t\t\tsetter( d, td.getAttribute( attr ) );\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t\n\t\t// Read data from a cell and store into the data object\n\t\tvar cellProcess = function ( cell ) {\n\t\t\tif ( colIdx === undefined || colIdx === i ) {\n\t\t\t\tcol = columns[i];\n\t\t\t\tcontents = $.trim(cell.innerHTML);\n\t\n\t\t\t\tif ( col && col._bAttrSrc ) {\n\t\t\t\t\tvar setter = _fnSetObjectDataFn( col.mData._ );\n\t\t\t\t\tsetter( d, contents );\n\t\n\t\t\t\t\tattr( col.mData.sort, cell );\n\t\t\t\t\tattr( col.mData.type, cell );\n\t\t\t\t\tattr( col.mData.filter, cell );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// Depending on the `data` option for the columns the data can\n\t\t\t\t\t// be read to either an object or an array.\n\t\t\t\t\tif ( objectRead ) {\n\t\t\t\t\t\tif ( ! col._setter ) {\n\t\t\t\t\t\t\t// Cache the setter function\n\t\t\t\t\t\t\tcol._setter = _fnSetObjectDataFn( col.mData );\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcol._setter( d, contents );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\td[i] = contents;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\ti++;\n\t\t};\n\t\n\t\tif ( td ) {\n\t\t\t// `tr` element was passed in\n\t\t\twhile ( td ) {\n\t\t\t\tname = td.nodeName.toUpperCase();\n\t\n\t\t\t\tif ( name == \"TD\" || name == \"TH\" ) {\n\t\t\t\t\tcellProcess( td );\n\t\t\t\t\ttds.push( td );\n\t\t\t\t}\n\t\n\t\t\t\ttd = td.nextSibling;\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\t// Existing row object passed in\n\t\t\ttds = row.anCells;\n\t\n\t\t\tfor ( var j=0, jen=tds.length ; j<jen ; j++ ) {\n\t\t\t\tcellProcess( tds[j] );\n\t\t\t}\n\t\t}\n\t\n\t\t// Read the ID from the DOM if present\n\t\tvar rowNode = row.firstChild ? row : row.nTr;\n\t\n\t\tif ( rowNode ) {\n\t\t\tvar id = rowNode.getAttribute( 'id' );\n\t\n\t\t\tif ( id ) {\n\t\t\t\t_fnSetObjectDataFn( settings.rowId )( d, id );\n\t\t\t}\n\t\t}\n\t\n\t\treturn {\n\t\t\tdata: d,\n\t\t\tcells: tds\n\t\t};\n\t}\n\t/**\n\t * Create a new TR element (and it's TD children) for a row\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {int} iRow Row to consider\n\t *  @param {node} [nTrIn] TR element to add to the table - optional. If not given,\n\t *    DataTables will create a row automatically\n\t *  @param {array} [anTds] Array of TD|TH elements for the row - must be given\n\t *    if nTr is.\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnCreateTr ( oSettings, iRow, nTrIn, anTds )\n\t{\n\t\tvar\n\t\t\trow = oSettings.aoData[iRow],\n\t\t\trowData = row._aData,\n\t\t\tcells = [],\n\t\t\tnTr, nTd, oCol,\n\t\t\ti, iLen;\n\t\n\t\tif ( row.nTr === null )\n\t\t{\n\t\t\tnTr = nTrIn || document.createElement('tr');\n\t\n\t\t\trow.nTr = nTr;\n\t\t\trow.anCells = cells;\n\t\n\t\t\t/* Use a private property on the node to allow reserve mapping from the node\n\t\t\t * to the aoData array for fast look up\n\t\t\t */\n\t\t\tnTr._DT_RowIndex = iRow;\n\t\n\t\t\t/* Special parameters can be given by the data source to be used on the row */\n\t\t\t_fnRowAttributes( oSettings, row );\n\t\n\t\t\t/* Process each column */\n\t\t\tfor ( i=0, iLen=oSettings.aoColumns.length ; i<iLen ; i++ )\n\t\t\t{\n\t\t\t\toCol = oSettings.aoColumns[i];\n\t\n\t\t\t\tnTd = nTrIn ? anTds[i] : document.createElement( oCol.sCellType );\n\t\t\t\tnTd._DT_CellIndex = {\n\t\t\t\t\trow: iRow,\n\t\t\t\t\tcolumn: i\n\t\t\t\t};\n\t\t\t\t\n\t\t\t\tcells.push( nTd );\n\t\n\t\t\t\t// Need to create the HTML if new, or if a rendering function is defined\n\t\t\t\tif ( (!nTrIn || oCol.mRender || oCol.mData !== i) &&\n\t\t\t\t\t (!$.isPlainObject(oCol.mData) || oCol.mData._ !== i+'.display')\n\t\t\t\t) {\n\t\t\t\t\tnTd.innerHTML = _fnGetCellData( oSettings, iRow, i, 'display' );\n\t\t\t\t}\n\t\n\t\t\t\t/* Add user defined class */\n\t\t\t\tif ( oCol.sClass )\n\t\t\t\t{\n\t\t\t\t\tnTd.className += ' '+oCol.sClass;\n\t\t\t\t}\n\t\n\t\t\t\t// Visibility - add or remove as required\n\t\t\t\tif ( oCol.bVisible && ! nTrIn )\n\t\t\t\t{\n\t\t\t\t\tnTr.appendChild( nTd );\n\t\t\t\t}\n\t\t\t\telse if ( ! oCol.bVisible && nTrIn )\n\t\t\t\t{\n\t\t\t\t\tnTd.parentNode.removeChild( nTd );\n\t\t\t\t}\n\t\n\t\t\t\tif ( oCol.fnCreatedCell )\n\t\t\t\t{\n\t\t\t\t\toCol.fnCreatedCell.call( oSettings.oInstance,\n\t\t\t\t\t\tnTd, _fnGetCellData( oSettings, iRow, i ), rowData, iRow, i\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t_fnCallbackFire( oSettings, 'aoRowCreatedCallback', null, [nTr, rowData, iRow] );\n\t\t}\n\t\n\t\t// Remove once webkit bug 131819 and Chromium bug 365619 have been resolved\n\t\t// and deployed\n\t\trow.nTr.setAttribute( 'role', 'row' );\n\t}\n\t\n\t\n\t/**\n\t * Add attributes to a row based on the special `DT_*` parameters in a data\n\t * source object.\n\t *  @param {object} settings DataTables settings object\n\t *  @param {object} DataTables row object for the row to be modified\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnRowAttributes( settings, row )\n\t{\n\t\tvar tr = row.nTr;\n\t\tvar data = row._aData;\n\t\n\t\tif ( tr ) {\n\t\t\tvar id = settings.rowIdFn( data );\n\t\n\t\t\tif ( id ) {\n\t\t\t\ttr.id = id;\n\t\t\t}\n\t\n\t\t\tif ( data.DT_RowClass ) {\n\t\t\t\t// Remove any classes added by DT_RowClass before\n\t\t\t\tvar a = data.DT_RowClass.split(' ');\n\t\t\t\trow.__rowc = row.__rowc ?\n\t\t\t\t\t_unique( row.__rowc.concat( a ) ) :\n\t\t\t\t\ta;\n\t\n\t\t\t\t$(tr)\n\t\t\t\t\t.removeClass( row.__rowc.join(' ') )\n\t\t\t\t\t.addClass( data.DT_RowClass );\n\t\t\t}\n\t\n\t\t\tif ( data.DT_RowAttr ) {\n\t\t\t\t$(tr).attr( data.DT_RowAttr );\n\t\t\t}\n\t\n\t\t\tif ( data.DT_RowData ) {\n\t\t\t\t$(tr).data( data.DT_RowData );\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Create the HTML header for the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnBuildHead( oSettings )\n\t{\n\t\tvar i, ien, cell, row, column;\n\t\tvar thead = oSettings.nTHead;\n\t\tvar tfoot = oSettings.nTFoot;\n\t\tvar createHeader = $('th, td', thead).length === 0;\n\t\tvar classes = oSettings.oClasses;\n\t\tvar columns = oSettings.aoColumns;\n\t\n\t\tif ( createHeader ) {\n\t\t\trow = $('<tr/>').appendTo( thead );\n\t\t}\n\t\n\t\tfor ( i=0, ien=columns.length ; i<ien ; i++ ) {\n\t\t\tcolumn = columns[i];\n\t\t\tcell = $( column.nTh ).addClass( column.sClass );\n\t\n\t\t\tif ( createHeader ) {\n\t\t\t\tcell.appendTo( row );\n\t\t\t}\n\t\n\t\t\t// 1.11 move into sorting\n\t\t\tif ( oSettings.oFeatures.bSort ) {\n\t\t\t\tcell.addClass( column.sSortingClass );\n\t\n\t\t\t\tif ( column.bSortable !== false ) {\n\t\t\t\t\tcell\n\t\t\t\t\t\t.attr( 'tabindex', oSettings.iTabIndex )\n\t\t\t\t\t\t.attr( 'aria-controls', oSettings.sTableId );\n\t\n\t\t\t\t\t_fnSortAttachListener( oSettings, column.nTh, i );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\tif ( column.sTitle != cell[0].innerHTML ) {\n\t\t\t\tcell.html( column.sTitle );\n\t\t\t}\n\t\n\t\t\t_fnRenderer( oSettings, 'header' )(\n\t\t\t\toSettings, cell, column, classes\n\t\t\t);\n\t\t}\n\t\n\t\tif ( createHeader ) {\n\t\t\t_fnDetectHeader( oSettings.aoHeader, thead );\n\t\t}\n\t\t\n\t\t/* ARIA role for the rows */\n\t \t$(thead).find('>tr').attr('role', 'row');\n\t\n\t\t/* Deal with the footer - add classes if required */\n\t\t$(thead).find('>tr>th, >tr>td').addClass( classes.sHeaderTH );\n\t\t$(tfoot).find('>tr>th, >tr>td').addClass( classes.sFooterTH );\n\t\n\t\t// Cache the footer cells. Note that we only take the cells from the first\n\t\t// row in the footer. If there is more than one row the user wants to\n\t\t// interact with, they need to use the table().foot() method. Note also this\n\t\t// allows cells to be used for multiple columns using colspan\n\t\tif ( tfoot !== null ) {\n\t\t\tvar cells = oSettings.aoFooter[0];\n\t\n\t\t\tfor ( i=0, ien=cells.length ; i<ien ; i++ ) {\n\t\t\t\tcolumn = columns[i];\n\t\t\t\tcolumn.nTf = cells[i].cell;\n\t\n\t\t\t\tif ( column.sClass ) {\n\t\t\t\t\t$(column.nTf).addClass( column.sClass );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Draw the header (or footer) element based on the column visibility states. The\n\t * methodology here is to use the layout array from _fnDetectHeader, modified for\n\t * the instantaneous column visibility, to construct the new layout. The grid is\n\t * traversed over cell at a time in a rows x columns grid fashion, although each\n\t * cell insert can cover multiple elements in the grid - which is tracks using the\n\t * aApplied array. Cell inserts in the grid will only occur where there isn't\n\t * already a cell in that position.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param array {objects} aoSource Layout array from _fnDetectHeader\n\t *  @param {boolean} [bIncludeHidden=false] If true then include the hidden columns in the calc,\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnDrawHead( oSettings, aoSource, bIncludeHidden )\n\t{\n\t\tvar i, iLen, j, jLen, k, kLen, n, nLocalTr;\n\t\tvar aoLocal = [];\n\t\tvar aApplied = [];\n\t\tvar iColumns = oSettings.aoColumns.length;\n\t\tvar iRowspan, iColspan;\n\t\n\t\tif ( ! aoSource )\n\t\t{\n\t\t\treturn;\n\t\t}\n\t\n\t\tif (  bIncludeHidden === undefined )\n\t\t{\n\t\t\tbIncludeHidden = false;\n\t\t}\n\t\n\t\t/* Make a copy of the master layout array, but without the visible columns in it */\n\t\tfor ( i=0, iLen=aoSource.length ; i<iLen ; i++ )\n\t\t{\n\t\t\taoLocal[i] = aoSource[i].slice();\n\t\t\taoLocal[i].nTr = aoSource[i].nTr;\n\t\n\t\t\t/* Remove any columns which are currently hidden */\n\t\t\tfor ( j=iColumns-1 ; j>=0 ; j-- )\n\t\t\t{\n\t\t\t\tif ( !oSettings.aoColumns[j].bVisible && !bIncludeHidden )\n\t\t\t\t{\n\t\t\t\t\taoLocal[i].splice( j, 1 );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t/* Prep the applied array - it needs an element for each row */\n\t\t\taApplied.push( [] );\n\t\t}\n\t\n\t\tfor ( i=0, iLen=aoLocal.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tnLocalTr = aoLocal[i].nTr;\n\t\n\t\t\t/* All cells are going to be replaced, so empty out the row */\n\t\t\tif ( nLocalTr )\n\t\t\t{\n\t\t\t\twhile( (n = nLocalTr.firstChild) )\n\t\t\t\t{\n\t\t\t\t\tnLocalTr.removeChild( n );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\tfor ( j=0, jLen=aoLocal[i].length ; j<jLen ; j++ )\n\t\t\t{\n\t\t\t\tiRowspan = 1;\n\t\t\t\tiColspan = 1;\n\t\n\t\t\t\t/* Check to see if there is already a cell (row/colspan) covering our target\n\t\t\t\t * insert point. If there is, then there is nothing to do.\n\t\t\t\t */\n\t\t\t\tif ( aApplied[i][j] === undefined )\n\t\t\t\t{\n\t\t\t\t\tnLocalTr.appendChild( aoLocal[i][j].cell );\n\t\t\t\t\taApplied[i][j] = 1;\n\t\n\t\t\t\t\t/* Expand the cell to cover as many rows as needed */\n\t\t\t\t\twhile ( aoLocal[i+iRowspan] !== undefined &&\n\t\t\t\t\t        aoLocal[i][j].cell == aoLocal[i+iRowspan][j].cell )\n\t\t\t\t\t{\n\t\t\t\t\t\taApplied[i+iRowspan][j] = 1;\n\t\t\t\t\t\tiRowspan++;\n\t\t\t\t\t}\n\t\n\t\t\t\t\t/* Expand the cell to cover as many columns as needed */\n\t\t\t\t\twhile ( aoLocal[i][j+iColspan] !== undefined &&\n\t\t\t\t\t        aoLocal[i][j].cell == aoLocal[i][j+iColspan].cell )\n\t\t\t\t\t{\n\t\t\t\t\t\t/* Must update the applied array over the rows for the columns */\n\t\t\t\t\t\tfor ( k=0 ; k<iRowspan ; k++ )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\taApplied[i+k][j+iColspan] = 1;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tiColspan++;\n\t\t\t\t\t}\n\t\n\t\t\t\t\t/* Do the actual expansion in the DOM */\n\t\t\t\t\t$(aoLocal[i][j].cell)\n\t\t\t\t\t\t.attr('rowspan', iRowspan)\n\t\t\t\t\t\t.attr('colspan', iColspan);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Insert the required TR nodes into the table for display\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnDraw( oSettings )\n\t{\n\t\t/* Provide a pre-callback function which can be used to cancel the draw is false is returned */\n\t\tvar aPreDraw = _fnCallbackFire( oSettings, 'aoPreDrawCallback', 'preDraw', [oSettings] );\n\t\tif ( $.inArray( false, aPreDraw ) !== -1 )\n\t\t{\n\t\t\t_fnProcessingDisplay( oSettings, false );\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar i, iLen, n;\n\t\tvar anRows = [];\n\t\tvar iRowCount = 0;\n\t\tvar asStripeClasses = oSettings.asStripeClasses;\n\t\tvar iStripes = asStripeClasses.length;\n\t\tvar iOpenRows = oSettings.aoOpenRows.length;\n\t\tvar oLang = oSettings.oLanguage;\n\t\tvar iInitDisplayStart = oSettings.iInitDisplayStart;\n\t\tvar bServerSide = _fnDataSource( oSettings ) == 'ssp';\n\t\tvar aiDisplay = oSettings.aiDisplay;\n\t\n\t\toSettings.bDrawing = true;\n\t\n\t\t/* Check and see if we have an initial draw position from state saving */\n\t\tif ( iInitDisplayStart !== undefined && iInitDisplayStart !== -1 )\n\t\t{\n\t\t\toSettings._iDisplayStart = bServerSide ?\n\t\t\t\tiInitDisplayStart :\n\t\t\t\tiInitDisplayStart >= oSettings.fnRecordsDisplay() ?\n\t\t\t\t\t0 :\n\t\t\t\t\tiInitDisplayStart;\n\t\n\t\t\toSettings.iInitDisplayStart = -1;\n\t\t}\n\t\n\t\tvar iDisplayStart = oSettings._iDisplayStart;\n\t\tvar iDisplayEnd = oSettings.fnDisplayEnd();\n\t\n\t\t/* Server-side processing draw intercept */\n\t\tif ( oSettings.bDeferLoading )\n\t\t{\n\t\t\toSettings.bDeferLoading = false;\n\t\t\toSettings.iDraw++;\n\t\t\t_fnProcessingDisplay( oSettings, false );\n\t\t}\n\t\telse if ( !bServerSide )\n\t\t{\n\t\t\toSettings.iDraw++;\n\t\t}\n\t\telse if ( !oSettings.bDestroying && !_fnAjaxUpdate( oSettings ) )\n\t\t{\n\t\t\treturn;\n\t\t}\n\t\n\t\tif ( aiDisplay.length !== 0 )\n\t\t{\n\t\t\tvar iStart = bServerSide ? 0 : iDisplayStart;\n\t\t\tvar iEnd = bServerSide ? oSettings.aoData.length : iDisplayEnd;\n\t\n\t\t\tfor ( var j=iStart ; j<iEnd ; j++ )\n\t\t\t{\n\t\t\t\tvar iDataIndex = aiDisplay[j];\n\t\t\t\tvar aoData = oSettings.aoData[ iDataIndex ];\n\t\t\t\tif ( aoData.nTr === null )\n\t\t\t\t{\n\t\t\t\t\t_fnCreateTr( oSettings, iDataIndex );\n\t\t\t\t}\n\t\n\t\t\t\tvar nRow = aoData.nTr;\n\t\n\t\t\t\t/* Remove the old striping classes and then add the new one */\n\t\t\t\tif ( iStripes !== 0 )\n\t\t\t\t{\n\t\t\t\t\tvar sStripe = asStripeClasses[ iRowCount % iStripes ];\n\t\t\t\t\tif ( aoData._sRowStripe != sStripe )\n\t\t\t\t\t{\n\t\t\t\t\t\t$(nRow).removeClass( aoData._sRowStripe ).addClass( sStripe );\n\t\t\t\t\t\taoData._sRowStripe = sStripe;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\t// Row callback functions - might want to manipulate the row\n\t\t\t\t// iRowCount and j are not currently documented. Are they at all\n\t\t\t\t// useful?\n\t\t\t\t_fnCallbackFire( oSettings, 'aoRowCallback', null,\n\t\t\t\t\t[nRow, aoData._aData, iRowCount, j] );\n\t\n\t\t\t\tanRows.push( nRow );\n\t\t\t\tiRowCount++;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t/* Table is empty - create a row with an empty message in it */\n\t\t\tvar sZero = oLang.sZeroRecords;\n\t\t\tif ( oSettings.iDraw == 1 &&  _fnDataSource( oSettings ) == 'ajax' )\n\t\t\t{\n\t\t\t\tsZero = oLang.sLoadingRecords;\n\t\t\t}\n\t\t\telse if ( oLang.sEmptyTable && oSettings.fnRecordsTotal() === 0 )\n\t\t\t{\n\t\t\t\tsZero = oLang.sEmptyTable;\n\t\t\t}\n\t\n\t\t\tanRows[ 0 ] = $( '<tr/>', { 'class': iStripes ? asStripeClasses[0] : '' } )\n\t\t\t\t.append( $('<td />', {\n\t\t\t\t\t'valign':  'top',\n\t\t\t\t\t'colSpan': _fnVisbleColumns( oSettings ),\n\t\t\t\t\t'class':   oSettings.oClasses.sRowEmpty\n\t\t\t\t} ).html( sZero ) )[0];\n\t\t}\n\t\n\t\t/* Header and footer callbacks */\n\t\t_fnCallbackFire( oSettings, 'aoHeaderCallback', 'header', [ $(oSettings.nTHead).children('tr')[0],\n\t\t\t_fnGetDataMaster( oSettings ), iDisplayStart, iDisplayEnd, aiDisplay ] );\n\t\n\t\t_fnCallbackFire( oSettings, 'aoFooterCallback', 'footer', [ $(oSettings.nTFoot).children('tr')[0],\n\t\t\t_fnGetDataMaster( oSettings ), iDisplayStart, iDisplayEnd, aiDisplay ] );\n\t\n\t\tvar body = $(oSettings.nTBody);\n\t\n\t\tbody.children().detach();\n\t\tbody.append( $(anRows) );\n\t\n\t\t/* Call all required callback functions for the end of a draw */\n\t\t_fnCallbackFire( oSettings, 'aoDrawCallback', 'draw', [oSettings] );\n\t\n\t\t/* Draw is complete, sorting and filtering must be as well */\n\t\toSettings.bSorted = false;\n\t\toSettings.bFiltered = false;\n\t\toSettings.bDrawing = false;\n\t}\n\t\n\t\n\t/**\n\t * Redraw the table - taking account of the various features which are enabled\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {boolean} [holdPosition] Keep the current paging position. By default\n\t *    the paging is reset to the first page\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnReDraw( settings, holdPosition )\n\t{\n\t\tvar\n\t\t\tfeatures = settings.oFeatures,\n\t\t\tsort     = features.bSort,\n\t\t\tfilter   = features.bFilter;\n\t\n\t\tif ( sort ) {\n\t\t\t_fnSort( settings );\n\t\t}\n\t\n\t\tif ( filter ) {\n\t\t\t_fnFilterComplete( settings, settings.oPreviousSearch );\n\t\t}\n\t\telse {\n\t\t\t// No filtering, so we want to just use the display master\n\t\t\tsettings.aiDisplay = settings.aiDisplayMaster.slice();\n\t\t}\n\t\n\t\tif ( holdPosition !== true ) {\n\t\t\tsettings._iDisplayStart = 0;\n\t\t}\n\t\n\t\t// Let any modules know about the draw hold position state (used by\n\t\t// scrolling internally)\n\t\tsettings._drawHold = holdPosition;\n\t\n\t\t_fnDraw( settings );\n\t\n\t\tsettings._drawHold = false;\n\t}\n\t\n\t\n\t/**\n\t * Add the options to the page HTML for the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAddOptionsHtml ( oSettings )\n\t{\n\t\tvar classes = oSettings.oClasses;\n\t\tvar table = $(oSettings.nTable);\n\t\tvar holding = $('<div/>').insertBefore( table ); // Holding element for speed\n\t\tvar features = oSettings.oFeatures;\n\t\n\t\t// All DataTables are wrapped in a div\n\t\tvar insert = $('<div/>', {\n\t\t\tid:      oSettings.sTableId+'_wrapper',\n\t\t\t'class': classes.sWrapper + (oSettings.nTFoot ? '' : ' '+classes.sNoFooter)\n\t\t} );\n\t\n\t\toSettings.nHolding = holding[0];\n\t\toSettings.nTableWrapper = insert[0];\n\t\toSettings.nTableReinsertBefore = oSettings.nTable.nextSibling;\n\t\n\t\t/* Loop over the user set positioning and place the elements as needed */\n\t\tvar aDom = oSettings.sDom.split('');\n\t\tvar featureNode, cOption, nNewNode, cNext, sAttr, j;\n\t\tfor ( var i=0 ; i<aDom.length ; i++ )\n\t\t{\n\t\t\tfeatureNode = null;\n\t\t\tcOption = aDom[i];\n\t\n\t\t\tif ( cOption == '<' )\n\t\t\t{\n\t\t\t\t/* New container div */\n\t\t\t\tnNewNode = $('<div/>')[0];\n\t\n\t\t\t\t/* Check to see if we should append an id and/or a class name to the container */\n\t\t\t\tcNext = aDom[i+1];\n\t\t\t\tif ( cNext == \"'\" || cNext == '\"' )\n\t\t\t\t{\n\t\t\t\t\tsAttr = \"\";\n\t\t\t\t\tj = 2;\n\t\t\t\t\twhile ( aDom[i+j] != cNext )\n\t\t\t\t\t{\n\t\t\t\t\t\tsAttr += aDom[i+j];\n\t\t\t\t\t\tj++;\n\t\t\t\t\t}\n\t\n\t\t\t\t\t/* Replace jQuery UI constants @todo depreciated */\n\t\t\t\t\tif ( sAttr == \"H\" )\n\t\t\t\t\t{\n\t\t\t\t\t\tsAttr = classes.sJUIHeader;\n\t\t\t\t\t}\n\t\t\t\t\telse if ( sAttr == \"F\" )\n\t\t\t\t\t{\n\t\t\t\t\t\tsAttr = classes.sJUIFooter;\n\t\t\t\t\t}\n\t\n\t\t\t\t\t/* The attribute can be in the format of \"#id.class\", \"#id\" or \"class\" This logic\n\t\t\t\t\t * breaks the string into parts and applies them as needed\n\t\t\t\t\t */\n\t\t\t\t\tif ( sAttr.indexOf('.') != -1 )\n\t\t\t\t\t{\n\t\t\t\t\t\tvar aSplit = sAttr.split('.');\n\t\t\t\t\t\tnNewNode.id = aSplit[0].substr(1, aSplit[0].length-1);\n\t\t\t\t\t\tnNewNode.className = aSplit[1];\n\t\t\t\t\t}\n\t\t\t\t\telse if ( sAttr.charAt(0) == \"#\" )\n\t\t\t\t\t{\n\t\t\t\t\t\tnNewNode.id = sAttr.substr(1, sAttr.length-1);\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tnNewNode.className = sAttr;\n\t\t\t\t\t}\n\t\n\t\t\t\t\ti += j; /* Move along the position array */\n\t\t\t\t}\n\t\n\t\t\t\tinsert.append( nNewNode );\n\t\t\t\tinsert = $(nNewNode);\n\t\t\t}\n\t\t\telse if ( cOption == '>' )\n\t\t\t{\n\t\t\t\t/* End container div */\n\t\t\t\tinsert = insert.parent();\n\t\t\t}\n\t\t\t// @todo Move options into their own plugins?\n\t\t\telse if ( cOption == 'l' && features.bPaginate && features.bLengthChange )\n\t\t\t{\n\t\t\t\t/* Length */\n\t\t\t\tfeatureNode = _fnFeatureHtmlLength( oSettings );\n\t\t\t}\n\t\t\telse if ( cOption == 'f' && features.bFilter )\n\t\t\t{\n\t\t\t\t/* Filter */\n\t\t\t\tfeatureNode = _fnFeatureHtmlFilter( oSettings );\n\t\t\t}\n\t\t\telse if ( cOption == 'r' && features.bProcessing )\n\t\t\t{\n\t\t\t\t/* pRocessing */\n\t\t\t\tfeatureNode = _fnFeatureHtmlProcessing( oSettings );\n\t\t\t}\n\t\t\telse if ( cOption == 't' )\n\t\t\t{\n\t\t\t\t/* Table */\n\t\t\t\tfeatureNode = _fnFeatureHtmlTable( oSettings );\n\t\t\t}\n\t\t\telse if ( cOption ==  'i' && features.bInfo )\n\t\t\t{\n\t\t\t\t/* Info */\n\t\t\t\tfeatureNode = _fnFeatureHtmlInfo( oSettings );\n\t\t\t}\n\t\t\telse if ( cOption == 'p' && features.bPaginate )\n\t\t\t{\n\t\t\t\t/* Pagination */\n\t\t\t\tfeatureNode = _fnFeatureHtmlPaginate( oSettings );\n\t\t\t}\n\t\t\telse if ( DataTable.ext.feature.length !== 0 )\n\t\t\t{\n\t\t\t\t/* Plug-in features */\n\t\t\t\tvar aoFeatures = DataTable.ext.feature;\n\t\t\t\tfor ( var k=0, kLen=aoFeatures.length ; k<kLen ; k++ )\n\t\t\t\t{\n\t\t\t\t\tif ( cOption == aoFeatures[k].cFeature )\n\t\t\t\t\t{\n\t\t\t\t\t\tfeatureNode = aoFeatures[k].fnInit( oSettings );\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t/* Add to the 2D features array */\n\t\t\tif ( featureNode )\n\t\t\t{\n\t\t\t\tvar aanFeatures = oSettings.aanFeatures;\n\t\n\t\t\t\tif ( ! aanFeatures[cOption] )\n\t\t\t\t{\n\t\t\t\t\taanFeatures[cOption] = [];\n\t\t\t\t}\n\t\n\t\t\t\taanFeatures[cOption].push( featureNode );\n\t\t\t\tinsert.append( featureNode );\n\t\t\t}\n\t\t}\n\t\n\t\t/* Built our DOM structure - replace the holding div with what we want */\n\t\tholding.replaceWith( insert );\n\t\toSettings.nHolding = null;\n\t}\n\t\n\t\n\t/**\n\t * Use the DOM source to create up an array of header cells. The idea here is to\n\t * create a layout grid (array) of rows x columns, which contains a reference\n\t * to the cell that that point in the grid (regardless of col/rowspan), such that\n\t * any column / row could be removed and the new grid constructed\n\t *  @param array {object} aLayout Array to store the calculated layout in\n\t *  @param {node} nThead The header/footer element for the table\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnDetectHeader ( aLayout, nThead )\n\t{\n\t\tvar nTrs = $(nThead).children('tr');\n\t\tvar nTr, nCell;\n\t\tvar i, k, l, iLen, jLen, iColShifted, iColumn, iColspan, iRowspan;\n\t\tvar bUnique;\n\t\tvar fnShiftCol = function ( a, i, j ) {\n\t\t\tvar k = a[i];\n\t                while ( k[j] ) {\n\t\t\t\tj++;\n\t\t\t}\n\t\t\treturn j;\n\t\t};\n\t\n\t\taLayout.splice( 0, aLayout.length );\n\t\n\t\t/* We know how many rows there are in the layout - so prep it */\n\t\tfor ( i=0, iLen=nTrs.length ; i<iLen ; i++ )\n\t\t{\n\t\t\taLayout.push( [] );\n\t\t}\n\t\n\t\t/* Calculate a layout array */\n\t\tfor ( i=0, iLen=nTrs.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tnTr = nTrs[i];\n\t\t\tiColumn = 0;\n\t\n\t\t\t/* For every cell in the row... */\n\t\t\tnCell = nTr.firstChild;\n\t\t\twhile ( nCell ) {\n\t\t\t\tif ( nCell.nodeName.toUpperCase() == \"TD\" ||\n\t\t\t\t     nCell.nodeName.toUpperCase() == \"TH\" )\n\t\t\t\t{\n\t\t\t\t\t/* Get the col and rowspan attributes from the DOM and sanitise them */\n\t\t\t\t\tiColspan = nCell.getAttribute('colspan') * 1;\n\t\t\t\t\tiRowspan = nCell.getAttribute('rowspan') * 1;\n\t\t\t\t\tiColspan = (!iColspan || iColspan===0 || iColspan===1) ? 1 : iColspan;\n\t\t\t\t\tiRowspan = (!iRowspan || iRowspan===0 || iRowspan===1) ? 1 : iRowspan;\n\t\n\t\t\t\t\t/* There might be colspan cells already in this row, so shift our target\n\t\t\t\t\t * accordingly\n\t\t\t\t\t */\n\t\t\t\t\tiColShifted = fnShiftCol( aLayout, i, iColumn );\n\t\n\t\t\t\t\t/* Cache calculation for unique columns */\n\t\t\t\t\tbUnique = iColspan === 1 ? true : false;\n\t\n\t\t\t\t\t/* If there is col / rowspan, copy the information into the layout grid */\n\t\t\t\t\tfor ( l=0 ; l<iColspan ; l++ )\n\t\t\t\t\t{\n\t\t\t\t\t\tfor ( k=0 ; k<iRowspan ; k++ )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\taLayout[i+k][iColShifted+l] = {\n\t\t\t\t\t\t\t\t\"cell\": nCell,\n\t\t\t\t\t\t\t\t\"unique\": bUnique\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\taLayout[i+k].nTr = nTr;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnCell = nCell.nextSibling;\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Get an array of unique th elements, one for each column\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {node} nHeader automatically detect the layout from this node - optional\n\t *  @param {array} aLayout thead/tfoot layout from _fnDetectHeader - optional\n\t *  @returns array {node} aReturn list of unique th's\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetUniqueThs ( oSettings, nHeader, aLayout )\n\t{\n\t\tvar aReturn = [];\n\t\tif ( !aLayout )\n\t\t{\n\t\t\taLayout = oSettings.aoHeader;\n\t\t\tif ( nHeader )\n\t\t\t{\n\t\t\t\taLayout = [];\n\t\t\t\t_fnDetectHeader( aLayout, nHeader );\n\t\t\t}\n\t\t}\n\t\n\t\tfor ( var i=0, iLen=aLayout.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tfor ( var j=0, jLen=aLayout[i].length ; j<jLen ; j++ )\n\t\t\t{\n\t\t\t\tif ( aLayout[i][j].unique &&\n\t\t\t\t\t (!aReturn[j] || !oSettings.bSortCellsTop) )\n\t\t\t\t{\n\t\t\t\t\taReturn[j] = aLayout[i][j].cell;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\treturn aReturn;\n\t}\n\t\n\t/**\n\t * Create an Ajax call based on the table's settings, taking into account that\n\t * parameters can have multiple forms, and backwards compatibility.\n\t *\n\t * @param {object} oSettings dataTables settings object\n\t * @param {array} data Data to send to the server, required by\n\t *     DataTables - may be augmented by developer callbacks\n\t * @param {function} fn Callback function to run when data is obtained\n\t */\n\tfunction _fnBuildAjax( oSettings, data, fn )\n\t{\n\t\t// Compatibility with 1.9-, allow fnServerData and event to manipulate\n\t\t_fnCallbackFire( oSettings, 'aoServerParams', 'serverParams', [data] );\n\t\n\t\t// Convert to object based for 1.10+ if using the old array scheme which can\n\t\t// come from server-side processing or serverParams\n\t\tif ( data && $.isArray(data) ) {\n\t\t\tvar tmp = {};\n\t\t\tvar rbracket = /(.*?)\\[\\]$/;\n\t\n\t\t\t$.each( data, function (key, val) {\n\t\t\t\tvar match = val.name.match(rbracket);\n\t\n\t\t\t\tif ( match ) {\n\t\t\t\t\t// Support for arrays\n\t\t\t\t\tvar name = match[0];\n\t\n\t\t\t\t\tif ( ! tmp[ name ] ) {\n\t\t\t\t\t\ttmp[ name ] = [];\n\t\t\t\t\t}\n\t\t\t\t\ttmp[ name ].push( val.value );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\ttmp[val.name] = val.value;\n\t\t\t\t}\n\t\t\t} );\n\t\t\tdata = tmp;\n\t\t}\n\t\n\t\tvar ajaxData;\n\t\tvar ajax = oSettings.ajax;\n\t\tvar instance = oSettings.oInstance;\n\t\tvar callback = function ( json ) {\n\t\t\t_fnCallbackFire( oSettings, null, 'xhr', [oSettings, json, oSettings.jqXHR] );\n\t\t\tfn( json );\n\t\t};\n\t\n\t\tif ( $.isPlainObject( ajax ) && ajax.data )\n\t\t{\n\t\t\tajaxData = ajax.data;\n\t\n\t\t\tvar newData = $.isFunction( ajaxData ) ?\n\t\t\t\tajaxData( data, oSettings ) :  // fn can manipulate data or return\n\t\t\t\tajaxData;                      // an object object or array to merge\n\t\n\t\t\t// If the function returned something, use that alone\n\t\t\tdata = $.isFunction( ajaxData ) && newData ?\n\t\t\t\tnewData :\n\t\t\t\t$.extend( true, data, newData );\n\t\n\t\t\t// Remove the data property as we've resolved it already and don't want\n\t\t\t// jQuery to do it again (it is restored at the end of the function)\n\t\t\tdelete ajax.data;\n\t\t}\n\t\n\t\tvar baseAjax = {\n\t\t\t\"data\": data,\n\t\t\t\"success\": function (json) {\n\t\t\t\tvar error = json.error || json.sError;\n\t\t\t\tif ( error ) {\n\t\t\t\t\t_fnLog( oSettings, 0, error );\n\t\t\t\t}\n\t\n\t\t\t\toSettings.json = json;\n\t\t\t\tcallback( json );\n\t\t\t},\n\t\t\t\"dataType\": \"json\",\n\t\t\t\"cache\": false,\n\t\t\t\"type\": oSettings.sServerMethod,\n\t\t\t\"error\": function (xhr, error, thrown) {\n\t\t\t\tvar ret = _fnCallbackFire( oSettings, null, 'xhr', [oSettings, null, oSettings.jqXHR] );\n\t\n\t\t\t\tif ( $.inArray( true, ret ) === -1 ) {\n\t\t\t\t\tif ( error == \"parsererror\" ) {\n\t\t\t\t\t\t_fnLog( oSettings, 0, 'Invalid JSON response', 1 );\n\t\t\t\t\t}\n\t\t\t\t\telse if ( xhr.readyState === 4 ) {\n\t\t\t\t\t\t_fnLog( oSettings, 0, 'Ajax error', 7 );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\t_fnProcessingDisplay( oSettings, false );\n\t\t\t}\n\t\t};\n\t\n\t\t// Store the data submitted for the API\n\t\toSettings.oAjaxData = data;\n\t\n\t\t// Allow plug-ins and external processes to modify the data\n\t\t_fnCallbackFire( oSettings, null, 'preXhr', [oSettings, data] );\n\t\n\t\tif ( oSettings.fnServerData )\n\t\t{\n\t\t\t// DataTables 1.9- compatibility\n\t\t\toSettings.fnServerData.call( instance,\n\t\t\t\toSettings.sAjaxSource,\n\t\t\t\t$.map( data, function (val, key) { // Need to convert back to 1.9 trad format\n\t\t\t\t\treturn { name: key, value: val };\n\t\t\t\t} ),\n\t\t\t\tcallback,\n\t\t\t\toSettings\n\t\t\t);\n\t\t}\n\t\telse if ( oSettings.sAjaxSource || typeof ajax === 'string' )\n\t\t{\n\t\t\t// DataTables 1.9- compatibility\n\t\t\toSettings.jqXHR = $.ajax( $.extend( baseAjax, {\n\t\t\t\turl: ajax || oSettings.sAjaxSource\n\t\t\t} ) );\n\t\t}\n\t\telse if ( $.isFunction( ajax ) )\n\t\t{\n\t\t\t// Is a function - let the caller define what needs to be done\n\t\t\toSettings.jqXHR = ajax.call( instance, data, callback, oSettings );\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Object to extend the base settings\n\t\t\toSettings.jqXHR = $.ajax( $.extend( baseAjax, ajax ) );\n\t\n\t\t\t// Restore for next time around\n\t\t\tajax.data = ajaxData;\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Update the table using an Ajax call\n\t *  @param {object} settings dataTables settings object\n\t *  @returns {boolean} Block the table drawing or not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAjaxUpdate( settings )\n\t{\n\t\tif ( settings.bAjaxDataGet ) {\n\t\t\tsettings.iDraw++;\n\t\t\t_fnProcessingDisplay( settings, true );\n\t\n\t\t\t_fnBuildAjax(\n\t\t\t\tsettings,\n\t\t\t\t_fnAjaxParameters( settings ),\n\t\t\t\tfunction(json) {\n\t\t\t\t\t_fnAjaxUpdateDraw( settings, json );\n\t\t\t\t}\n\t\t\t);\n\t\n\t\t\treturn false;\n\t\t}\n\t\treturn true;\n\t}\n\t\n\t\n\t/**\n\t * Build up the parameters in an object needed for a server-side processing\n\t * request. Note that this is basically done twice, is different ways - a modern\n\t * method which is used by default in DataTables 1.10 which uses objects and\n\t * arrays, or the 1.9- method with is name / value pairs. 1.9 method is used if\n\t * the sAjaxSource option is used in the initialisation, or the legacyAjax\n\t * option is set.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns {bool} block the table drawing or not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAjaxParameters( settings )\n\t{\n\t\tvar\n\t\t\tcolumns = settings.aoColumns,\n\t\t\tcolumnCount = columns.length,\n\t\t\tfeatures = settings.oFeatures,\n\t\t\tpreSearch = settings.oPreviousSearch,\n\t\t\tpreColSearch = settings.aoPreSearchCols,\n\t\t\ti, data = [], dataProp, column, columnSearch,\n\t\t\tsort = _fnSortFlatten( settings ),\n\t\t\tdisplayStart = settings._iDisplayStart,\n\t\t\tdisplayLength = features.bPaginate !== false ?\n\t\t\t\tsettings._iDisplayLength :\n\t\t\t\t-1;\n\t\n\t\tvar param = function ( name, value ) {\n\t\t\tdata.push( { 'name': name, 'value': value } );\n\t\t};\n\t\n\t\t// DataTables 1.9- compatible method\n\t\tparam( 'sEcho',          settings.iDraw );\n\t\tparam( 'iColumns',       columnCount );\n\t\tparam( 'sColumns',       _pluck( columns, 'sName' ).join(',') );\n\t\tparam( 'iDisplayStart',  displayStart );\n\t\tparam( 'iDisplayLength', displayLength );\n\t\n\t\t// DataTables 1.10+ method\n\t\tvar d = {\n\t\t\tdraw:    settings.iDraw,\n\t\t\tcolumns: [],\n\t\t\torder:   [],\n\t\t\tstart:   displayStart,\n\t\t\tlength:  displayLength,\n\t\t\tsearch:  {\n\t\t\t\tvalue: preSearch.sSearch,\n\t\t\t\tregex: preSearch.bRegex\n\t\t\t}\n\t\t};\n\t\n\t\tfor ( i=0 ; i<columnCount ; i++ ) {\n\t\t\tcolumn = columns[i];\n\t\t\tcolumnSearch = preColSearch[i];\n\t\t\tdataProp = typeof column.mData==\"function\" ? 'function' : column.mData ;\n\t\n\t\t\td.columns.push( {\n\t\t\t\tdata:       dataProp,\n\t\t\t\tname:       column.sName,\n\t\t\t\tsearchable: column.bSearchable,\n\t\t\t\torderable:  column.bSortable,\n\t\t\t\tsearch:     {\n\t\t\t\t\tvalue: columnSearch.sSearch,\n\t\t\t\t\tregex: columnSearch.bRegex\n\t\t\t\t}\n\t\t\t} );\n\t\n\t\t\tparam( \"mDataProp_\"+i, dataProp );\n\t\n\t\t\tif ( features.bFilter ) {\n\t\t\t\tparam( 'sSearch_'+i,     columnSearch.sSearch );\n\t\t\t\tparam( 'bRegex_'+i,      columnSearch.bRegex );\n\t\t\t\tparam( 'bSearchable_'+i, column.bSearchable );\n\t\t\t}\n\t\n\t\t\tif ( features.bSort ) {\n\t\t\t\tparam( 'bSortable_'+i, column.bSortable );\n\t\t\t}\n\t\t}\n\t\n\t\tif ( features.bFilter ) {\n\t\t\tparam( 'sSearch', preSearch.sSearch );\n\t\t\tparam( 'bRegex', preSearch.bRegex );\n\t\t}\n\t\n\t\tif ( features.bSort ) {\n\t\t\t$.each( sort, function ( i, val ) {\n\t\t\t\td.order.push( { column: val.col, dir: val.dir } );\n\t\n\t\t\t\tparam( 'iSortCol_'+i, val.col );\n\t\t\t\tparam( 'sSortDir_'+i, val.dir );\n\t\t\t} );\n\t\n\t\t\tparam( 'iSortingCols', sort.length );\n\t\t}\n\t\n\t\t// If the legacy.ajax parameter is null, then we automatically decide which\n\t\t// form to use, based on sAjaxSource\n\t\tvar legacy = DataTable.ext.legacy.ajax;\n\t\tif ( legacy === null ) {\n\t\t\treturn settings.sAjaxSource ? data : d;\n\t\t}\n\t\n\t\t// Otherwise, if legacy has been specified then we use that to decide on the\n\t\t// form\n\t\treturn legacy ? data : d;\n\t}\n\t\n\t\n\t/**\n\t * Data the data from the server (nuking the old) and redraw the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {object} json json data return from the server.\n\t *  @param {string} json.sEcho Tracking flag for DataTables to match requests\n\t *  @param {int} json.iTotalRecords Number of records in the data set, not accounting for filtering\n\t *  @param {int} json.iTotalDisplayRecords Number of records in the data set, accounting for filtering\n\t *  @param {array} json.aaData The data to display on this page\n\t *  @param {string} [json.sColumns] Column ordering (sName, comma separated)\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnAjaxUpdateDraw ( settings, json )\n\t{\n\t\t// v1.10 uses camelCase variables, while 1.9 uses Hungarian notation.\n\t\t// Support both\n\t\tvar compat = function ( old, modern ) {\n\t\t\treturn json[old] !== undefined ? json[old] : json[modern];\n\t\t};\n\t\n\t\tvar data = _fnAjaxDataSrc( settings, json );\n\t\tvar draw            = compat( 'sEcho',                'draw' );\n\t\tvar recordsTotal    = compat( 'iTotalRecords',        'recordsTotal' );\n\t\tvar recordsFiltered = compat( 'iTotalDisplayRecords', 'recordsFiltered' );\n\t\n\t\tif ( draw ) {\n\t\t\t// Protect against out of sequence returns\n\t\t\tif ( draw*1 < settings.iDraw ) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tsettings.iDraw = draw * 1;\n\t\t}\n\t\n\t\t_fnClearTable( settings );\n\t\tsettings._iRecordsTotal   = parseInt(recordsTotal, 10);\n\t\tsettings._iRecordsDisplay = parseInt(recordsFiltered, 10);\n\t\n\t\tfor ( var i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t_fnAddData( settings, data[i] );\n\t\t}\n\t\tsettings.aiDisplay = settings.aiDisplayMaster.slice();\n\t\n\t\tsettings.bAjaxDataGet = false;\n\t\t_fnDraw( settings );\n\t\n\t\tif ( ! settings._bInitComplete ) {\n\t\t\t_fnInitComplete( settings, json );\n\t\t}\n\t\n\t\tsettings.bAjaxDataGet = true;\n\t\t_fnProcessingDisplay( settings, false );\n\t}\n\t\n\t\n\t/**\n\t * Get the data from the JSON data source to use for drawing a table. Using\n\t * `_fnGetObjectDataFn` allows the data to be sourced from a property of the\n\t * source object, or from a processing function.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param  {object} json Data source object / array from the server\n\t *  @return {array} Array of data to use\n\t */\n\tfunction _fnAjaxDataSrc ( oSettings, json )\n\t{\n\t\tvar dataSrc = $.isPlainObject( oSettings.ajax ) && oSettings.ajax.dataSrc !== undefined ?\n\t\t\toSettings.ajax.dataSrc :\n\t\t\toSettings.sAjaxDataProp; // Compatibility with 1.9-.\n\t\n\t\t// Compatibility with 1.9-. In order to read from aaData, check if the\n\t\t// default has been changed, if not, check for aaData\n\t\tif ( dataSrc === 'data' ) {\n\t\t\treturn json.aaData || json[dataSrc];\n\t\t}\n\t\n\t\treturn dataSrc !== \"\" ?\n\t\t\t_fnGetObjectDataFn( dataSrc )( json ) :\n\t\t\tjson;\n\t}\n\t\n\t/**\n\t * Generate the node required for filtering text\n\t *  @returns {node} Filter control element\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlFilter ( settings )\n\t{\n\t\tvar classes = settings.oClasses;\n\t\tvar tableId = settings.sTableId;\n\t\tvar language = settings.oLanguage;\n\t\tvar previousSearch = settings.oPreviousSearch;\n\t\tvar features = settings.aanFeatures;\n\t\tvar input = '<input type=\"search\" class=\"'+classes.sFilterInput+'\"/>';\n\t\n\t\tvar str = language.sSearch;\n\t\tstr = str.match(/_INPUT_/) ?\n\t\t\tstr.replace('_INPUT_', input) :\n\t\t\tstr+input;\n\t\n\t\tvar filter = $('<div/>', {\n\t\t\t\t'id': ! features.f ? tableId+'_filter' : null,\n\t\t\t\t'class': classes.sFilter\n\t\t\t} )\n\t\t\t.append( $('<label/>' ).append( str ) );\n\t\n\t\tvar searchFn = function() {\n\t\t\t/* Update all other filter input elements for the new display */\n\t\t\tvar n = features.f;\n\t\t\tvar val = !this.value ? \"\" : this.value; // mental IE8 fix :-(\n\t\n\t\t\t/* Now do the filter */\n\t\t\tif ( val != previousSearch.sSearch ) {\n\t\t\t\t_fnFilterComplete( settings, {\n\t\t\t\t\t\"sSearch\": val,\n\t\t\t\t\t\"bRegex\": previousSearch.bRegex,\n\t\t\t\t\t\"bSmart\": previousSearch.bSmart ,\n\t\t\t\t\t\"bCaseInsensitive\": previousSearch.bCaseInsensitive\n\t\t\t\t} );\n\t\n\t\t\t\t// Need to redraw, without resorting\n\t\t\t\tsettings._iDisplayStart = 0;\n\t\t\t\t_fnDraw( settings );\n\t\t\t}\n\t\t};\n\t\n\t\tvar searchDelay = settings.searchDelay !== null ?\n\t\t\tsettings.searchDelay :\n\t\t\t_fnDataSource( settings ) === 'ssp' ?\n\t\t\t\t400 :\n\t\t\t\t0;\n\t\n\t\tvar jqFilter = $('input', filter)\n\t\t\t.val( previousSearch.sSearch )\n\t\t\t.attr( 'placeholder', language.sSearchPlaceholder )\n\t\t\t.bind(\n\t\t\t\t'keyup.DT search.DT input.DT paste.DT cut.DT',\n\t\t\t\tsearchDelay ?\n\t\t\t\t\t_fnThrottle( searchFn, searchDelay ) :\n\t\t\t\t\tsearchFn\n\t\t\t)\n\t\t\t.bind( 'keypress.DT', function(e) {\n\t\t\t\t/* Prevent form submission */\n\t\t\t\tif ( e.keyCode == 13 ) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t} )\n\t\t\t.attr('aria-controls', tableId);\n\t\n\t\t// Update the input elements whenever the table is filtered\n\t\t$(settings.nTable).on( 'search.dt.DT', function ( ev, s ) {\n\t\t\tif ( settings === s ) {\n\t\t\t\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t\t\t\t// inside an iframe or frame...\n\t\t\t\ttry {\n\t\t\t\t\tif ( jqFilter[0] !== document.activeElement ) {\n\t\t\t\t\t\tjqFilter.val( previousSearch.sSearch );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcatch ( e ) {}\n\t\t\t}\n\t\t} );\n\t\n\t\treturn filter[0];\n\t}\n\t\n\t\n\t/**\n\t * Filter the table using both the global filter and column based filtering\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {object} oSearch search information\n\t *  @param {int} [iForce] force a research of the master array (1) or not (undefined or 0)\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFilterComplete ( oSettings, oInput, iForce )\n\t{\n\t\tvar oPrevSearch = oSettings.oPreviousSearch;\n\t\tvar aoPrevSearch = oSettings.aoPreSearchCols;\n\t\tvar fnSaveFilter = function ( oFilter ) {\n\t\t\t/* Save the filtering values */\n\t\t\toPrevSearch.sSearch = oFilter.sSearch;\n\t\t\toPrevSearch.bRegex = oFilter.bRegex;\n\t\t\toPrevSearch.bSmart = oFilter.bSmart;\n\t\t\toPrevSearch.bCaseInsensitive = oFilter.bCaseInsensitive;\n\t\t};\n\t\tvar fnRegex = function ( o ) {\n\t\t\t// Backwards compatibility with the bEscapeRegex option\n\t\t\treturn o.bEscapeRegex !== undefined ? !o.bEscapeRegex : o.bRegex;\n\t\t};\n\t\n\t\t// Resolve any column types that are unknown due to addition or invalidation\n\t\t// @todo As per sort - can this be moved into an event handler?\n\t\t_fnColumnTypes( oSettings );\n\t\n\t\t/* In server-side processing all filtering is done by the server, so no point hanging around here */\n\t\tif ( _fnDataSource( oSettings ) != 'ssp' )\n\t\t{\n\t\t\t/* Global filter */\n\t\t\t_fnFilter( oSettings, oInput.sSearch, iForce, fnRegex(oInput), oInput.bSmart, oInput.bCaseInsensitive );\n\t\t\tfnSaveFilter( oInput );\n\t\n\t\t\t/* Now do the individual column filter */\n\t\t\tfor ( var i=0 ; i<aoPrevSearch.length ; i++ )\n\t\t\t{\n\t\t\t\t_fnFilterColumn( oSettings, aoPrevSearch[i].sSearch, i, fnRegex(aoPrevSearch[i]),\n\t\t\t\t\taoPrevSearch[i].bSmart, aoPrevSearch[i].bCaseInsensitive );\n\t\t\t}\n\t\n\t\t\t/* Custom filtering */\n\t\t\t_fnFilterCustom( oSettings );\n\t\t}\n\t\telse\n\t\t{\n\t\t\tfnSaveFilter( oInput );\n\t\t}\n\t\n\t\t/* Tell the draw function we have been filtering */\n\t\toSettings.bFiltered = true;\n\t\t_fnCallbackFire( oSettings, null, 'search', [oSettings] );\n\t}\n\t\n\t\n\t/**\n\t * Apply custom filtering functions\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFilterCustom( settings )\n\t{\n\t\tvar filters = DataTable.ext.search;\n\t\tvar displayRows = settings.aiDisplay;\n\t\tvar row, rowIdx;\n\t\n\t\tfor ( var i=0, ien=filters.length ; i<ien ; i++ ) {\n\t\t\tvar rows = [];\n\t\n\t\t\t// Loop over each row and see if it should be included\n\t\t\tfor ( var j=0, jen=displayRows.length ; j<jen ; j++ ) {\n\t\t\t\trowIdx = displayRows[ j ];\n\t\t\t\trow = settings.aoData[ rowIdx ];\n\t\n\t\t\t\tif ( filters[i]( settings, row._aFilterData, rowIdx, row._aData, j ) ) {\n\t\t\t\t\trows.push( rowIdx );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// So the array reference doesn't break set the results into the\n\t\t\t// existing array\n\t\t\tdisplayRows.length = 0;\n\t\t\t$.merge( displayRows, rows );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Filter the table on a per-column basis\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {string} sInput string to filter on\n\t *  @param {int} iColumn column to filter\n\t *  @param {bool} bRegex treat search string as a regular expression or not\n\t *  @param {bool} bSmart use smart filtering or not\n\t *  @param {bool} bCaseInsensitive Do case insenstive matching or not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFilterColumn ( settings, searchStr, colIdx, regex, smart, caseInsensitive )\n\t{\n\t\tif ( searchStr === '' ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar data;\n\t\tvar display = settings.aiDisplay;\n\t\tvar rpSearch = _fnFilterCreateSearch( searchStr, regex, smart, caseInsensitive );\n\t\n\t\tfor ( var i=display.length-1 ; i>=0 ; i-- ) {\n\t\t\tdata = settings.aoData[ display[i] ]._aFilterData[ colIdx ];\n\t\n\t\t\tif ( ! rpSearch.test( data ) ) {\n\t\t\t\tdisplay.splice( i, 1 );\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Filter the data table based on user input and draw the table\n\t *  @param {object} settings dataTables settings object\n\t *  @param {string} input string to filter on\n\t *  @param {int} force optional - force a research of the master array (1) or not (undefined or 0)\n\t *  @param {bool} regex treat as a regular expression or not\n\t *  @param {bool} smart perform smart filtering or not\n\t *  @param {bool} caseInsensitive Do case insenstive matching or not\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFilter( settings, input, force, regex, smart, caseInsensitive )\n\t{\n\t\tvar rpSearch = _fnFilterCreateSearch( input, regex, smart, caseInsensitive );\n\t\tvar prevSearch = settings.oPreviousSearch.sSearch;\n\t\tvar displayMaster = settings.aiDisplayMaster;\n\t\tvar display, invalidated, i;\n\t\n\t\t// Need to take account of custom filtering functions - always filter\n\t\tif ( DataTable.ext.search.length !== 0 ) {\n\t\t\tforce = true;\n\t\t}\n\t\n\t\t// Check if any of the rows were invalidated\n\t\tinvalidated = _fnFilterData( settings );\n\t\n\t\t// If the input is blank - we just want the full data set\n\t\tif ( input.length <= 0 ) {\n\t\t\tsettings.aiDisplay = displayMaster.slice();\n\t\t}\n\t\telse {\n\t\t\t// New search - start from the master array\n\t\t\tif ( invalidated ||\n\t\t\t\t force ||\n\t\t\t\t prevSearch.length > input.length ||\n\t\t\t\t input.indexOf(prevSearch) !== 0 ||\n\t\t\t\t settings.bSorted // On resort, the display master needs to be\n\t\t\t\t                  // re-filtered since indexes will have changed\n\t\t\t) {\n\t\t\t\tsettings.aiDisplay = displayMaster.slice();\n\t\t\t}\n\t\n\t\t\t// Search the display array\n\t\t\tdisplay = settings.aiDisplay;\n\t\n\t\t\tfor ( i=display.length-1 ; i>=0 ; i-- ) {\n\t\t\t\tif ( ! rpSearch.test( settings.aoData[ display[i] ]._sFilterRow ) ) {\n\t\t\t\t\tdisplay.splice( i, 1 );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Build a regular expression object suitable for searching a table\n\t *  @param {string} sSearch string to search for\n\t *  @param {bool} bRegex treat as a regular expression or not\n\t *  @param {bool} bSmart perform smart filtering or not\n\t *  @param {bool} bCaseInsensitive Do case insensitive matching or not\n\t *  @returns {RegExp} constructed object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFilterCreateSearch( search, regex, smart, caseInsensitive )\n\t{\n\t\tsearch = regex ?\n\t\t\tsearch :\n\t\t\t_fnEscapeRegex( search );\n\t\t\n\t\tif ( smart ) {\n\t\t\t/* For smart filtering we want to allow the search to work regardless of\n\t\t\t * word order. We also want double quoted text to be preserved, so word\n\t\t\t * order is important - a la google. So this is what we want to\n\t\t\t * generate:\n\t\t\t * \n\t\t\t * ^(?=.*?\\bone\\b)(?=.*?\\btwo three\\b)(?=.*?\\bfour\\b).*$\n\t\t\t */\n\t\t\tvar a = $.map( search.match( /\"[^\"]+\"|[^ ]+/g ) || [''], function ( word ) {\n\t\t\t\tif ( word.charAt(0) === '\"' ) {\n\t\t\t\t\tvar m = word.match( /^\"(.*)\"$/ );\n\t\t\t\t\tword = m ? m[1] : word;\n\t\t\t\t}\n\t\n\t\t\t\treturn word.replace('\"', '');\n\t\t\t} );\n\t\n\t\t\tsearch = '^(?=.*?'+a.join( ')(?=.*?' )+').*$';\n\t\t}\n\t\n\t\treturn new RegExp( search, caseInsensitive ? 'i' : '' );\n\t}\n\t\n\t\n\t/**\n\t * Escape a string such that it can be used in a regular expression\n\t *  @param {string} sVal string to escape\n\t *  @returns {string} escaped string\n\t *  @memberof DataTable#oApi\n\t */\n\tvar _fnEscapeRegex = DataTable.util.escapeRegex;\n\t\n\tvar __filter_div = $('<div>')[0];\n\tvar __filter_div_textContent = __filter_div.textContent !== undefined;\n\t\n\t// Update the filtering data for each row if needed (by invalidation or first run)\n\tfunction _fnFilterData ( settings )\n\t{\n\t\tvar columns = settings.aoColumns;\n\t\tvar column;\n\t\tvar i, j, ien, jen, filterData, cellData, row;\n\t\tvar fomatters = DataTable.ext.type.search;\n\t\tvar wasInvalidated = false;\n\t\n\t\tfor ( i=0, ien=settings.aoData.length ; i<ien ; i++ ) {\n\t\t\trow = settings.aoData[i];\n\t\n\t\t\tif ( ! row._aFilterData ) {\n\t\t\t\tfilterData = [];\n\t\n\t\t\t\tfor ( j=0, jen=columns.length ; j<jen ; j++ ) {\n\t\t\t\t\tcolumn = columns[j];\n\t\n\t\t\t\t\tif ( column.bSearchable ) {\n\t\t\t\t\t\tcellData = _fnGetCellData( settings, i, j, 'filter' );\n\t\n\t\t\t\t\t\tif ( fomatters[ column.sType ] ) {\n\t\t\t\t\t\t\tcellData = fomatters[ column.sType ]( cellData );\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t// Search in DataTables 1.10 is string based. In 1.11 this\n\t\t\t\t\t\t// should be altered to also allow strict type checking.\n\t\t\t\t\t\tif ( cellData === null ) {\n\t\t\t\t\t\t\tcellData = '';\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\tif ( typeof cellData !== 'string' && cellData.toString ) {\n\t\t\t\t\t\t\tcellData = cellData.toString();\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tcellData = '';\n\t\t\t\t\t}\n\t\n\t\t\t\t\t// If it looks like there is an HTML entity in the string,\n\t\t\t\t\t// attempt to decode it so sorting works as expected. Note that\n\t\t\t\t\t// we could use a single line of jQuery to do this, but the DOM\n\t\t\t\t\t// method used here is much faster http://jsperf.com/html-decode\n\t\t\t\t\tif ( cellData.indexOf && cellData.indexOf('&') !== -1 ) {\n\t\t\t\t\t\t__filter_div.innerHTML = cellData;\n\t\t\t\t\t\tcellData = __filter_div_textContent ?\n\t\t\t\t\t\t\t__filter_div.textContent :\n\t\t\t\t\t\t\t__filter_div.innerText;\n\t\t\t\t\t}\n\t\n\t\t\t\t\tif ( cellData.replace ) {\n\t\t\t\t\t\tcellData = cellData.replace(/[\\r\\n]/g, '');\n\t\t\t\t\t}\n\t\n\t\t\t\t\tfilterData.push( cellData );\n\t\t\t\t}\n\t\n\t\t\t\trow._aFilterData = filterData;\n\t\t\t\trow._sFilterRow = filterData.join('  ');\n\t\t\t\twasInvalidated = true;\n\t\t\t}\n\t\t}\n\t\n\t\treturn wasInvalidated;\n\t}\n\t\n\t\n\t/**\n\t * Convert from the internal Hungarian notation to camelCase for external\n\t * interaction\n\t *  @param {object} obj Object to convert\n\t *  @returns {object} Inverted object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSearchToCamel ( obj )\n\t{\n\t\treturn {\n\t\t\tsearch:          obj.sSearch,\n\t\t\tsmart:           obj.bSmart,\n\t\t\tregex:           obj.bRegex,\n\t\t\tcaseInsensitive: obj.bCaseInsensitive\n\t\t};\n\t}\n\t\n\t\n\t\n\t/**\n\t * Convert from camelCase notation to the internal Hungarian. We could use the\n\t * Hungarian convert function here, but this is cleaner\n\t *  @param {object} obj Object to convert\n\t *  @returns {object} Inverted object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSearchToHung ( obj )\n\t{\n\t\treturn {\n\t\t\tsSearch:          obj.search,\n\t\t\tbSmart:           obj.smart,\n\t\t\tbRegex:           obj.regex,\n\t\t\tbCaseInsensitive: obj.caseInsensitive\n\t\t};\n\t}\n\t\n\t/**\n\t * Generate the node required for the info display\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns {node} Information element\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlInfo ( settings )\n\t{\n\t\tvar\n\t\t\ttid = settings.sTableId,\n\t\t\tnodes = settings.aanFeatures.i,\n\t\t\tn = $('<div/>', {\n\t\t\t\t'class': settings.oClasses.sInfo,\n\t\t\t\t'id': ! nodes ? tid+'_info' : null\n\t\t\t} );\n\t\n\t\tif ( ! nodes ) {\n\t\t\t// Update display on each draw\n\t\t\tsettings.aoDrawCallback.push( {\n\t\t\t\t\"fn\": _fnUpdateInfo,\n\t\t\t\t\"sName\": \"information\"\n\t\t\t} );\n\t\n\t\t\tn\n\t\t\t\t.attr( 'role', 'status' )\n\t\t\t\t.attr( 'aria-live', 'polite' );\n\t\n\t\t\t// Table is described by our info div\n\t\t\t$(settings.nTable).attr( 'aria-describedby', tid+'_info' );\n\t\t}\n\t\n\t\treturn n[0];\n\t}\n\t\n\t\n\t/**\n\t * Update the information elements in the display\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnUpdateInfo ( settings )\n\t{\n\t\t/* Show information about the table */\n\t\tvar nodes = settings.aanFeatures.i;\n\t\tif ( nodes.length === 0 ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar\n\t\t\tlang  = settings.oLanguage,\n\t\t\tstart = settings._iDisplayStart+1,\n\t\t\tend   = settings.fnDisplayEnd(),\n\t\t\tmax   = settings.fnRecordsTotal(),\n\t\t\ttotal = settings.fnRecordsDisplay(),\n\t\t\tout   = total ?\n\t\t\t\tlang.sInfo :\n\t\t\t\tlang.sInfoEmpty;\n\t\n\t\tif ( total !== max ) {\n\t\t\t/* Record set after filtering */\n\t\t\tout += ' ' + lang.sInfoFiltered;\n\t\t}\n\t\n\t\t// Convert the macros\n\t\tout += lang.sInfoPostFix;\n\t\tout = _fnInfoMacros( settings, out );\n\t\n\t\tvar callback = lang.fnInfoCallback;\n\t\tif ( callback !== null ) {\n\t\t\tout = callback.call( settings.oInstance,\n\t\t\t\tsettings, start, end, max, total, out\n\t\t\t);\n\t\t}\n\t\n\t\t$(nodes).html( out );\n\t}\n\t\n\t\n\tfunction _fnInfoMacros ( settings, str )\n\t{\n\t\t// When infinite scrolling, we are always starting at 1. _iDisplayStart is used only\n\t\t// internally\n\t\tvar\n\t\t\tformatter  = settings.fnFormatNumber,\n\t\t\tstart      = settings._iDisplayStart+1,\n\t\t\tlen        = settings._iDisplayLength,\n\t\t\tvis        = settings.fnRecordsDisplay(),\n\t\t\tall        = len === -1;\n\t\n\t\treturn str.\n\t\t\treplace(/_START_/g, formatter.call( settings, start ) ).\n\t\t\treplace(/_END_/g,   formatter.call( settings, settings.fnDisplayEnd() ) ).\n\t\t\treplace(/_MAX_/g,   formatter.call( settings, settings.fnRecordsTotal() ) ).\n\t\t\treplace(/_TOTAL_/g, formatter.call( settings, vis ) ).\n\t\t\treplace(/_PAGE_/g,  formatter.call( settings, all ? 1 : Math.ceil( start / len ) ) ).\n\t\t\treplace(/_PAGES_/g, formatter.call( settings, all ? 1 : Math.ceil( vis / len ) ) );\n\t}\n\t\n\t\n\t\n\t/**\n\t * Draw the table for the first time, adding all required features\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnInitialise ( settings )\n\t{\n\t\tvar i, iLen, iAjaxStart=settings.iInitDisplayStart;\n\t\tvar columns = settings.aoColumns, column;\n\t\tvar features = settings.oFeatures;\n\t\tvar deferLoading = settings.bDeferLoading; // value modified by the draw\n\t\n\t\t/* Ensure that the table data is fully initialised */\n\t\tif ( ! settings.bInitialised ) {\n\t\t\tsetTimeout( function(){ _fnInitialise( settings ); }, 200 );\n\t\t\treturn;\n\t\t}\n\t\n\t\t/* Show the display HTML options */\n\t\t_fnAddOptionsHtml( settings );\n\t\n\t\t/* Build and draw the header / footer for the table */\n\t\t_fnBuildHead( settings );\n\t\t_fnDrawHead( settings, settings.aoHeader );\n\t\t_fnDrawHead( settings, settings.aoFooter );\n\t\n\t\t/* Okay to show that something is going on now */\n\t\t_fnProcessingDisplay( settings, true );\n\t\n\t\t/* Calculate sizes for columns */\n\t\tif ( features.bAutoWidth ) {\n\t\t\t_fnCalculateColumnWidths( settings );\n\t\t}\n\t\n\t\tfor ( i=0, iLen=columns.length ; i<iLen ; i++ ) {\n\t\t\tcolumn = columns[i];\n\t\n\t\t\tif ( column.sWidth ) {\n\t\t\t\tcolumn.nTh.style.width = _fnStringToCss( column.sWidth );\n\t\t\t}\n\t\t}\n\t\n\t\t_fnCallbackFire( settings, null, 'preInit', [settings] );\n\t\n\t\t// If there is default sorting required - let's do it. The sort function\n\t\t// will do the drawing for us. Otherwise we draw the table regardless of the\n\t\t// Ajax source - this allows the table to look initialised for Ajax sourcing\n\t\t// data (show 'loading' message possibly)\n\t\t_fnReDraw( settings );\n\t\n\t\t// Server-side processing init complete is done by _fnAjaxUpdateDraw\n\t\tvar dataSrc = _fnDataSource( settings );\n\t\tif ( dataSrc != 'ssp' || deferLoading ) {\n\t\t\t// if there is an ajax source load the data\n\t\t\tif ( dataSrc == 'ajax' ) {\n\t\t\t\t_fnBuildAjax( settings, [], function(json) {\n\t\t\t\t\tvar aData = _fnAjaxDataSrc( settings, json );\n\t\n\t\t\t\t\t// Got the data - add it to the table\n\t\t\t\t\tfor ( i=0 ; i<aData.length ; i++ ) {\n\t\t\t\t\t\t_fnAddData( settings, aData[i] );\n\t\t\t\t\t}\n\t\n\t\t\t\t\t// Reset the init display for cookie saving. We've already done\n\t\t\t\t\t// a filter, and therefore cleared it before. So we need to make\n\t\t\t\t\t// it appear 'fresh'\n\t\t\t\t\tsettings.iInitDisplayStart = iAjaxStart;\n\t\n\t\t\t\t\t_fnReDraw( settings );\n\t\n\t\t\t\t\t_fnProcessingDisplay( settings, false );\n\t\t\t\t\t_fnInitComplete( settings, json );\n\t\t\t\t}, settings );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t_fnProcessingDisplay( settings, false );\n\t\t\t\t_fnInitComplete( settings );\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Draw the table for the first time, adding all required features\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {object} [json] JSON from the server that completed the table, if using Ajax source\n\t *    with client-side processing (optional)\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnInitComplete ( settings, json )\n\t{\n\t\tsettings._bInitComplete = true;\n\t\n\t\t// When data was added after the initialisation (data or Ajax) we need to\n\t\t// calculate the column sizing\n\t\tif ( json || settings.oInit.aaData ) {\n\t\t\t_fnAdjustColumnSizing( settings );\n\t\t}\n\t\n\t\t_fnCallbackFire( settings, null, 'plugin-init', [settings, json] );\n\t\t_fnCallbackFire( settings, 'aoInitComplete', 'init', [settings, json] );\n\t}\n\t\n\t\n\tfunction _fnLengthChange ( settings, val )\n\t{\n\t\tvar len = parseInt( val, 10 );\n\t\tsettings._iDisplayLength = len;\n\t\n\t\t_fnLengthOverflow( settings );\n\t\n\t\t// Fire length change event\n\t\t_fnCallbackFire( settings, null, 'length', [settings, len] );\n\t}\n\t\n\t\n\t/**\n\t * Generate the node required for user display length changing\n\t *  @param {object} settings dataTables settings object\n\t *  @returns {node} Display length feature node\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlLength ( settings )\n\t{\n\t\tvar\n\t\t\tclasses  = settings.oClasses,\n\t\t\ttableId  = settings.sTableId,\n\t\t\tmenu     = settings.aLengthMenu,\n\t\t\td2       = $.isArray( menu[0] ),\n\t\t\tlengths  = d2 ? menu[0] : menu,\n\t\t\tlanguage = d2 ? menu[1] : menu;\n\t\n\t\tvar select = $('<select/>', {\n\t\t\t'name':          tableId+'_length',\n\t\t\t'aria-controls': tableId,\n\t\t\t'class':         classes.sLengthSelect\n\t\t} );\n\t\n\t\tfor ( var i=0, ien=lengths.length ; i<ien ; i++ ) {\n\t\t\tselect[0][ i ] = new Option( language[i], lengths[i] );\n\t\t}\n\t\n\t\tvar div = $('<div><label/></div>').addClass( classes.sLength );\n\t\tif ( ! settings.aanFeatures.l ) {\n\t\t\tdiv[0].id = tableId+'_length';\n\t\t}\n\t\n\t\tdiv.children().append(\n\t\t\tsettings.oLanguage.sLengthMenu.replace( '_MENU_', select[0].outerHTML )\n\t\t);\n\t\n\t\t// Can't use `select` variable as user might provide their own and the\n\t\t// reference is broken by the use of outerHTML\n\t\t$('select', div)\n\t\t\t.val( settings._iDisplayLength )\n\t\t\t.bind( 'change.DT', function(e) {\n\t\t\t\t_fnLengthChange( settings, $(this).val() );\n\t\t\t\t_fnDraw( settings );\n\t\t\t} );\n\t\n\t\t// Update node value whenever anything changes the table's length\n\t\t$(settings.nTable).bind( 'length.dt.DT', function (e, s, len) {\n\t\t\tif ( settings === s ) {\n\t\t\t\t$('select', div).val( len );\n\t\t\t}\n\t\t} );\n\t\n\t\treturn div[0];\n\t}\n\t\n\t\n\t\n\t/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\t * Note that most of the paging logic is done in\n\t * DataTable.ext.pager\n\t */\n\t\n\t/**\n\t * Generate the node required for default pagination\n\t *  @param {object} oSettings dataTables settings object\n\t *  @returns {node} Pagination feature node\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlPaginate ( settings )\n\t{\n\t\tvar\n\t\t\ttype   = settings.sPaginationType,\n\t\t\tplugin = DataTable.ext.pager[ type ],\n\t\t\tmodern = typeof plugin === 'function',\n\t\t\tredraw = function( settings ) {\n\t\t\t\t_fnDraw( settings );\n\t\t\t},\n\t\t\tnode = $('<div/>').addClass( settings.oClasses.sPaging + type )[0],\n\t\t\tfeatures = settings.aanFeatures;\n\t\n\t\tif ( ! modern ) {\n\t\t\tplugin.fnInit( settings, node, redraw );\n\t\t}\n\t\n\t\t/* Add a draw callback for the pagination on first instance, to update the paging display */\n\t\tif ( ! features.p )\n\t\t{\n\t\t\tnode.id = settings.sTableId+'_paginate';\n\t\n\t\t\tsettings.aoDrawCallback.push( {\n\t\t\t\t\"fn\": function( settings ) {\n\t\t\t\t\tif ( modern ) {\n\t\t\t\t\t\tvar\n\t\t\t\t\t\t\tstart      = settings._iDisplayStart,\n\t\t\t\t\t\t\tlen        = settings._iDisplayLength,\n\t\t\t\t\t\t\tvisRecords = settings.fnRecordsDisplay(),\n\t\t\t\t\t\t\tall        = len === -1,\n\t\t\t\t\t\t\tpage = all ? 0 : Math.ceil( start / len ),\n\t\t\t\t\t\t\tpages = all ? 1 : Math.ceil( visRecords / len ),\n\t\t\t\t\t\t\tbuttons = plugin(page, pages),\n\t\t\t\t\t\t\ti, ien;\n\t\n\t\t\t\t\t\tfor ( i=0, ien=features.p.length ; i<ien ; i++ ) {\n\t\t\t\t\t\t\t_fnRenderer( settings, 'pageButton' )(\n\t\t\t\t\t\t\t\tsettings, features.p[i], i, buttons, page, pages\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tplugin.fnUpdate( settings, redraw );\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"sName\": \"pagination\"\n\t\t\t} );\n\t\t}\n\t\n\t\treturn node;\n\t}\n\t\n\t\n\t/**\n\t * Alter the display settings to change the page\n\t *  @param {object} settings DataTables settings object\n\t *  @param {string|int} action Paging action to take: \"first\", \"previous\",\n\t *    \"next\" or \"last\" or page number to jump to (integer)\n\t *  @param [bool] redraw Automatically draw the update or not\n\t *  @returns {bool} true page has changed, false - no change\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnPageChange ( settings, action, redraw )\n\t{\n\t\tvar\n\t\t\tstart     = settings._iDisplayStart,\n\t\t\tlen       = settings._iDisplayLength,\n\t\t\trecords   = settings.fnRecordsDisplay();\n\t\n\t\tif ( records === 0 || len === -1 )\n\t\t{\n\t\t\tstart = 0;\n\t\t}\n\t\telse if ( typeof action === \"number\" )\n\t\t{\n\t\t\tstart = action * len;\n\t\n\t\t\tif ( start > records )\n\t\t\t{\n\t\t\t\tstart = 0;\n\t\t\t}\n\t\t}\n\t\telse if ( action == \"first\" )\n\t\t{\n\t\t\tstart = 0;\n\t\t}\n\t\telse if ( action == \"previous\" )\n\t\t{\n\t\t\tstart = len >= 0 ?\n\t\t\t\tstart - len :\n\t\t\t\t0;\n\t\n\t\t\tif ( start < 0 )\n\t\t\t{\n\t\t\t  start = 0;\n\t\t\t}\n\t\t}\n\t\telse if ( action == \"next\" )\n\t\t{\n\t\t\tif ( start + len < records )\n\t\t\t{\n\t\t\t\tstart += len;\n\t\t\t}\n\t\t}\n\t\telse if ( action == \"last\" )\n\t\t{\n\t\t\tstart = Math.floor( (records-1) / len) * len;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t_fnLog( settings, 0, \"Unknown paging action: \"+action, 5 );\n\t\t}\n\t\n\t\tvar changed = settings._iDisplayStart !== start;\n\t\tsettings._iDisplayStart = start;\n\t\n\t\tif ( changed ) {\n\t\t\t_fnCallbackFire( settings, null, 'page', [settings] );\n\t\n\t\t\tif ( redraw ) {\n\t\t\t\t_fnDraw( settings );\n\t\t\t}\n\t\t}\n\t\n\t\treturn changed;\n\t}\n\t\n\t\n\t\n\t/**\n\t * Generate the node required for the processing node\n\t *  @param {object} settings dataTables settings object\n\t *  @returns {node} Processing element\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlProcessing ( settings )\n\t{\n\t\treturn $('<div/>', {\n\t\t\t\t'id': ! settings.aanFeatures.r ? settings.sTableId+'_processing' : null,\n\t\t\t\t'class': settings.oClasses.sProcessing\n\t\t\t} )\n\t\t\t.html( settings.oLanguage.sProcessing )\n\t\t\t.insertBefore( settings.nTable )[0];\n\t}\n\t\n\t\n\t/**\n\t * Display or hide the processing indicator\n\t *  @param {object} settings dataTables settings object\n\t *  @param {bool} show Show the processing indicator (true) or not (false)\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnProcessingDisplay ( settings, show )\n\t{\n\t\tif ( settings.oFeatures.bProcessing ) {\n\t\t\t$(settings.aanFeatures.r).css( 'display', show ? 'block' : 'none' );\n\t\t}\n\t\n\t\t_fnCallbackFire( settings, null, 'processing', [settings, show] );\n\t}\n\t\n\t/**\n\t * Add any control elements for the table - specifically scrolling\n\t *  @param {object} settings dataTables settings object\n\t *  @returns {node} Node to add to the DOM\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnFeatureHtmlTable ( settings )\n\t{\n\t\tvar table = $(settings.nTable);\n\t\n\t\t// Add the ARIA grid role to the table\n\t\ttable.attr( 'role', 'grid' );\n\t\n\t\t// Scrolling from here on in\n\t\tvar scroll = settings.oScroll;\n\t\n\t\tif ( scroll.sX === '' && scroll.sY === '' ) {\n\t\t\treturn settings.nTable;\n\t\t}\n\t\n\t\tvar scrollX = scroll.sX;\n\t\tvar scrollY = scroll.sY;\n\t\tvar classes = settings.oClasses;\n\t\tvar caption = table.children('caption');\n\t\tvar captionSide = caption.length ? caption[0]._captionSide : null;\n\t\tvar headerClone = $( table[0].cloneNode(false) );\n\t\tvar footerClone = $( table[0].cloneNode(false) );\n\t\tvar footer = table.children('tfoot');\n\t\tvar _div = '<div/>';\n\t\tvar size = function ( s ) {\n\t\t\treturn !s ? null : _fnStringToCss( s );\n\t\t};\n\t\n\t\tif ( ! footer.length ) {\n\t\t\tfooter = null;\n\t\t}\n\t\n\t\t/*\n\t\t * The HTML structure that we want to generate in this function is:\n\t\t *  div - scroller\n\t\t *    div - scroll head\n\t\t *      div - scroll head inner\n\t\t *        table - scroll head table\n\t\t *          thead - thead\n\t\t *    div - scroll body\n\t\t *      table - table (master table)\n\t\t *        thead - thead clone for sizing\n\t\t *        tbody - tbody\n\t\t *    div - scroll foot\n\t\t *      div - scroll foot inner\n\t\t *        table - scroll foot table\n\t\t *          tfoot - tfoot\n\t\t */\n\t\tvar scroller = $( _div, { 'class': classes.sScrollWrapper } )\n\t\t\t.append(\n\t\t\t\t$(_div, { 'class': classes.sScrollHead } )\n\t\t\t\t\t.css( {\n\t\t\t\t\t\toverflow: 'hidden',\n\t\t\t\t\t\tposition: 'relative',\n\t\t\t\t\t\tborder: 0,\n\t\t\t\t\t\twidth: scrollX ? size(scrollX) : '100%'\n\t\t\t\t\t} )\n\t\t\t\t\t.append(\n\t\t\t\t\t\t$(_div, { 'class': classes.sScrollHeadInner } )\n\t\t\t\t\t\t\t.css( {\n\t\t\t\t\t\t\t\t'box-sizing': 'content-box',\n\t\t\t\t\t\t\t\twidth: scroll.sXInner || '100%'\n\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t\theaderClone\n\t\t\t\t\t\t\t\t\t.removeAttr('id')\n\t\t\t\t\t\t\t\t\t.css( 'margin-left', 0 )\n\t\t\t\t\t\t\t\t\t.append( captionSide === 'top' ? caption : null )\n\t\t\t\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t\t\t\ttable.children('thead')\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t)\n\t\t\t.append(\n\t\t\t\t$(_div, { 'class': classes.sScrollBody } )\n\t\t\t\t\t.css( {\n\t\t\t\t\t\tposition: 'relative',\n\t\t\t\t\t\toverflow: 'auto',\n\t\t\t\t\t\twidth: size( scrollX )\n\t\t\t\t\t} )\n\t\t\t\t\t.append( table )\n\t\t\t);\n\t\n\t\tif ( footer ) {\n\t\t\tscroller.append(\n\t\t\t\t$(_div, { 'class': classes.sScrollFoot } )\n\t\t\t\t\t.css( {\n\t\t\t\t\t\toverflow: 'hidden',\n\t\t\t\t\t\tborder: 0,\n\t\t\t\t\t\twidth: scrollX ? size(scrollX) : '100%'\n\t\t\t\t\t} )\n\t\t\t\t\t.append(\n\t\t\t\t\t\t$(_div, { 'class': classes.sScrollFootInner } )\n\t\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t\tfooterClone\n\t\t\t\t\t\t\t\t\t.removeAttr('id')\n\t\t\t\t\t\t\t\t\t.css( 'margin-left', 0 )\n\t\t\t\t\t\t\t\t\t.append( captionSide === 'bottom' ? caption : null )\n\t\t\t\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t\t\t\ttable.children('tfoot')\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t);\n\t\t}\n\t\n\t\tvar children = scroller.children();\n\t\tvar scrollHead = children[0];\n\t\tvar scrollBody = children[1];\n\t\tvar scrollFoot = footer ? children[2] : null;\n\t\n\t\t// When the body is scrolled, then we also want to scroll the headers\n\t\tif ( scrollX ) {\n\t\t\t$(scrollBody).on( 'scroll.DT', function (e) {\n\t\t\t\tvar scrollLeft = this.scrollLeft;\n\t\n\t\t\t\tscrollHead.scrollLeft = scrollLeft;\n\t\n\t\t\t\tif ( footer ) {\n\t\t\t\t\tscrollFoot.scrollLeft = scrollLeft;\n\t\t\t\t}\n\t\t\t} );\n\t\t}\n\t\n\t\t$(scrollBody).css(\n\t\t\tscrollY && scroll.bCollapse ? 'max-height' : 'height', \n\t\t\tscrollY\n\t\t);\n\t\n\t\tsettings.nScrollHead = scrollHead;\n\t\tsettings.nScrollBody = scrollBody;\n\t\tsettings.nScrollFoot = scrollFoot;\n\t\n\t\t// On redraw - align columns\n\t\tsettings.aoDrawCallback.push( {\n\t\t\t\"fn\": _fnScrollDraw,\n\t\t\t\"sName\": \"scrolling\"\n\t\t} );\n\t\n\t\treturn scroller[0];\n\t}\n\t\n\t\n\t\n\t/**\n\t * Update the header, footer and body tables for resizing - i.e. column\n\t * alignment.\n\t *\n\t * Welcome to the most horrible function DataTables. The process that this\n\t * function follows is basically:\n\t *   1. Re-create the table inside the scrolling div\n\t *   2. Take live measurements from the DOM\n\t *   3. Apply the measurements to align the columns\n\t *   4. Clean up\n\t *\n\t *  @param {object} settings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnScrollDraw ( settings )\n\t{\n\t\t// Given that this is such a monster function, a lot of variables are use\n\t\t// to try and keep the minimised size as small as possible\n\t\tvar\n\t\t\tscroll         = settings.oScroll,\n\t\t\tscrollX        = scroll.sX,\n\t\t\tscrollXInner   = scroll.sXInner,\n\t\t\tscrollY        = scroll.sY,\n\t\t\tbarWidth       = scroll.iBarWidth,\n\t\t\tdivHeader      = $(settings.nScrollHead),\n\t\t\tdivHeaderStyle = divHeader[0].style,\n\t\t\tdivHeaderInner = divHeader.children('div'),\n\t\t\tdivHeaderInnerStyle = divHeaderInner[0].style,\n\t\t\tdivHeaderTable = divHeaderInner.children('table'),\n\t\t\tdivBodyEl      = settings.nScrollBody,\n\t\t\tdivBody        = $(divBodyEl),\n\t\t\tdivBodyStyle   = divBodyEl.style,\n\t\t\tdivFooter      = $(settings.nScrollFoot),\n\t\t\tdivFooterInner = divFooter.children('div'),\n\t\t\tdivFooterTable = divFooterInner.children('table'),\n\t\t\theader         = $(settings.nTHead),\n\t\t\ttable          = $(settings.nTable),\n\t\t\ttableEl        = table[0],\n\t\t\ttableStyle     = tableEl.style,\n\t\t\tfooter         = settings.nTFoot ? $(settings.nTFoot) : null,\n\t\t\tbrowser        = settings.oBrowser,\n\t\t\tie67           = browser.bScrollOversize,\n\t\t\tdtHeaderCells  = _pluck( settings.aoColumns, 'nTh' ),\n\t\t\theaderTrgEls, footerTrgEls,\n\t\t\theaderSrcEls, footerSrcEls,\n\t\t\theaderCopy, footerCopy,\n\t\t\theaderWidths=[], footerWidths=[],\n\t\t\theaderContent=[], footerContent=[],\n\t\t\tidx, correction, sanityWidth,\n\t\t\tzeroOut = function(nSizer) {\n\t\t\t\tvar style = nSizer.style;\n\t\t\t\tstyle.paddingTop = \"0\";\n\t\t\t\tstyle.paddingBottom = \"0\";\n\t\t\t\tstyle.borderTopWidth = \"0\";\n\t\t\t\tstyle.borderBottomWidth = \"0\";\n\t\t\t\tstyle.height = 0;\n\t\t\t};\n\t\n\t\t// If the scrollbar visibility has changed from the last draw, we need to\n\t\t// adjust the column sizes as the table width will have changed to account\n\t\t// for the scrollbar\n\t\tvar scrollBarVis = divBodyEl.scrollHeight > divBodyEl.clientHeight;\n\t\t\n\t\tif ( settings.scrollBarVis !== scrollBarVis && settings.scrollBarVis !== undefined ) {\n\t\t\tsettings.scrollBarVis = scrollBarVis;\n\t\t\t_fnAdjustColumnSizing( settings );\n\t\t\treturn; // adjust column sizing will call this function again\n\t\t}\n\t\telse {\n\t\t\tsettings.scrollBarVis = scrollBarVis;\n\t\t}\n\t\n\t\t/*\n\t\t * 1. Re-create the table inside the scrolling div\n\t\t */\n\t\n\t\t// Remove the old minimised thead and tfoot elements in the inner table\n\t\ttable.children('thead, tfoot').remove();\n\t\n\t\tif ( footer ) {\n\t\t\tfooterCopy = footer.clone().prependTo( table );\n\t\t\tfooterTrgEls = footer.find('tr'); // the original tfoot is in its own table and must be sized\n\t\t\tfooterSrcEls = footerCopy.find('tr');\n\t\t}\n\t\n\t\t// Clone the current header and footer elements and then place it into the inner table\n\t\theaderCopy = header.clone().prependTo( table );\n\t\theaderTrgEls = header.find('tr'); // original header is in its own table\n\t\theaderSrcEls = headerCopy.find('tr');\n\t\theaderCopy.find('th, td').removeAttr('tabindex');\n\t\n\t\n\t\t/*\n\t\t * 2. Take live measurements from the DOM - do not alter the DOM itself!\n\t\t */\n\t\n\t\t// Remove old sizing and apply the calculated column widths\n\t\t// Get the unique column headers in the newly created (cloned) header. We want to apply the\n\t\t// calculated sizes to this header\n\t\tif ( ! scrollX )\n\t\t{\n\t\t\tdivBodyStyle.width = '100%';\n\t\t\tdivHeader[0].style.width = '100%';\n\t\t}\n\t\n\t\t$.each( _fnGetUniqueThs( settings, headerCopy ), function ( i, el ) {\n\t\t\tidx = _fnVisibleToColumnIndex( settings, i );\n\t\t\tel.style.width = settings.aoColumns[idx].sWidth;\n\t\t} );\n\t\n\t\tif ( footer ) {\n\t\t\t_fnApplyToChildren( function(n) {\n\t\t\t\tn.style.width = \"\";\n\t\t\t}, footerSrcEls );\n\t\t}\n\t\n\t\t// Size the table as a whole\n\t\tsanityWidth = table.outerWidth();\n\t\tif ( scrollX === \"\" ) {\n\t\t\t// No x scrolling\n\t\t\ttableStyle.width = \"100%\";\n\t\n\t\t\t// IE7 will make the width of the table when 100% include the scrollbar\n\t\t\t// - which is shouldn't. When there is a scrollbar we need to take this\n\t\t\t// into account.\n\t\t\tif ( ie67 && (table.find('tbody').height() > divBodyEl.offsetHeight ||\n\t\t\t\tdivBody.css('overflow-y') == \"scroll\")\n\t\t\t) {\n\t\t\t\ttableStyle.width = _fnStringToCss( table.outerWidth() - barWidth);\n\t\t\t}\n\t\n\t\t\t// Recalculate the sanity width\n\t\t\tsanityWidth = table.outerWidth();\n\t\t}\n\t\telse if ( scrollXInner !== \"\" ) {\n\t\t\t// legacy x scroll inner has been given - use it\n\t\t\ttableStyle.width = _fnStringToCss(scrollXInner);\n\t\n\t\t\t// Recalculate the sanity width\n\t\t\tsanityWidth = table.outerWidth();\n\t\t}\n\t\n\t\t// Hidden header should have zero height, so remove padding and borders. Then\n\t\t// set the width based on the real headers\n\t\n\t\t// Apply all styles in one pass\n\t\t_fnApplyToChildren( zeroOut, headerSrcEls );\n\t\n\t\t// Read all widths in next pass\n\t\t_fnApplyToChildren( function(nSizer) {\n\t\t\theaderContent.push( nSizer.innerHTML );\n\t\t\theaderWidths.push( _fnStringToCss( $(nSizer).css('width') ) );\n\t\t}, headerSrcEls );\n\t\n\t\t// Apply all widths in final pass\n\t\t_fnApplyToChildren( function(nToSize, i) {\n\t\t\t// Only apply widths to the DataTables detected header cells - this\n\t\t\t// prevents complex headers from having contradictory sizes applied\n\t\t\tif ( $.inArray( nToSize, dtHeaderCells ) !== -1 ) {\n\t\t\t\tnToSize.style.width = headerWidths[i];\n\t\t\t}\n\t\t}, headerTrgEls );\n\t\n\t\t$(headerSrcEls).height(0);\n\t\n\t\t/* Same again with the footer if we have one */\n\t\tif ( footer )\n\t\t{\n\t\t\t_fnApplyToChildren( zeroOut, footerSrcEls );\n\t\n\t\t\t_fnApplyToChildren( function(nSizer) {\n\t\t\t\tfooterContent.push( nSizer.innerHTML );\n\t\t\t\tfooterWidths.push( _fnStringToCss( $(nSizer).css('width') ) );\n\t\t\t}, footerSrcEls );\n\t\n\t\t\t_fnApplyToChildren( function(nToSize, i) {\n\t\t\t\tnToSize.style.width = footerWidths[i];\n\t\t\t}, footerTrgEls );\n\t\n\t\t\t$(footerSrcEls).height(0);\n\t\t}\n\t\n\t\n\t\t/*\n\t\t * 3. Apply the measurements\n\t\t */\n\t\n\t\t// \"Hide\" the header and footer that we used for the sizing. We need to keep\n\t\t// the content of the cell so that the width applied to the header and body\n\t\t// both match, but we want to hide it completely. We want to also fix their\n\t\t// width to what they currently are\n\t\t_fnApplyToChildren( function(nSizer, i) {\n\t\t\tnSizer.innerHTML = '<div class=\"dataTables_sizing\" style=\"height:0;overflow:hidden;\">'+headerContent[i]+'</div>';\n\t\t\tnSizer.style.width = headerWidths[i];\n\t\t}, headerSrcEls );\n\t\n\t\tif ( footer )\n\t\t{\n\t\t\t_fnApplyToChildren( function(nSizer, i) {\n\t\t\t\tnSizer.innerHTML = '<div class=\"dataTables_sizing\" style=\"height:0;overflow:hidden;\">'+footerContent[i]+'</div>';\n\t\t\t\tnSizer.style.width = footerWidths[i];\n\t\t\t}, footerSrcEls );\n\t\t}\n\t\n\t\t// Sanity check that the table is of a sensible width. If not then we are going to get\n\t\t// misalignment - try to prevent this by not allowing the table to shrink below its min width\n\t\tif ( table.outerWidth() < sanityWidth )\n\t\t{\n\t\t\t// The min width depends upon if we have a vertical scrollbar visible or not */\n\t\t\tcorrection = ((divBodyEl.scrollHeight > divBodyEl.offsetHeight ||\n\t\t\t\tdivBody.css('overflow-y') == \"scroll\")) ?\n\t\t\t\t\tsanityWidth+barWidth :\n\t\t\t\t\tsanityWidth;\n\t\n\t\t\t// IE6/7 are a law unto themselves...\n\t\t\tif ( ie67 && (divBodyEl.scrollHeight >\n\t\t\t\tdivBodyEl.offsetHeight || divBody.css('overflow-y') == \"scroll\")\n\t\t\t) {\n\t\t\t\ttableStyle.width = _fnStringToCss( correction-barWidth );\n\t\t\t}\n\t\n\t\t\t// And give the user a warning that we've stopped the table getting too small\n\t\t\tif ( scrollX === \"\" || scrollXInner !== \"\" ) {\n\t\t\t\t_fnLog( settings, 1, 'Possible column misalignment', 6 );\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tcorrection = '100%';\n\t\t}\n\t\n\t\t// Apply to the container elements\n\t\tdivBodyStyle.width = _fnStringToCss( correction );\n\t\tdivHeaderStyle.width = _fnStringToCss( correction );\n\t\n\t\tif ( footer ) {\n\t\t\tsettings.nScrollFoot.style.width = _fnStringToCss( correction );\n\t\t}\n\t\n\t\n\t\t/*\n\t\t * 4. Clean up\n\t\t */\n\t\tif ( ! scrollY ) {\n\t\t\t/* IE7< puts a vertical scrollbar in place (when it shouldn't be) due to subtracting\n\t\t\t * the scrollbar height from the visible display, rather than adding it on. We need to\n\t\t\t * set the height in order to sort this. Don't want to do it in any other browsers.\n\t\t\t */\n\t\t\tif ( ie67 ) {\n\t\t\t\tdivBodyStyle.height = _fnStringToCss( tableEl.offsetHeight+barWidth );\n\t\t\t}\n\t\t}\n\t\n\t\t/* Finally set the width's of the header and footer tables */\n\t\tvar iOuterWidth = table.outerWidth();\n\t\tdivHeaderTable[0].style.width = _fnStringToCss( iOuterWidth );\n\t\tdivHeaderInnerStyle.width = _fnStringToCss( iOuterWidth );\n\t\n\t\t// Figure out if there are scrollbar present - if so then we need a the header and footer to\n\t\t// provide a bit more space to allow \"overflow\" scrolling (i.e. past the scrollbar)\n\t\tvar bScrolling = table.height() > divBodyEl.clientHeight || divBody.css('overflow-y') == \"scroll\";\n\t\tvar padding = 'padding' + (browser.bScrollbarLeft ? 'Left' : 'Right' );\n\t\tdivHeaderInnerStyle[ padding ] = bScrolling ? barWidth+\"px\" : \"0px\";\n\t\n\t\tif ( footer ) {\n\t\t\tdivFooterTable[0].style.width = _fnStringToCss( iOuterWidth );\n\t\t\tdivFooterInner[0].style.width = _fnStringToCss( iOuterWidth );\n\t\t\tdivFooterInner[0].style[padding] = bScrolling ? barWidth+\"px\" : \"0px\";\n\t\t}\n\t\n\t\t// Correct DOM ordering for colgroup - comes before the thead\n\t\ttable.children('colgroup').insertBefore( table.children('thead') );\n\t\n\t\t/* Adjust the position of the header in case we loose the y-scrollbar */\n\t\tdivBody.scroll();\n\t\n\t\t// If sorting or filtering has occurred, jump the scrolling back to the top\n\t\t// only if we aren't holding the position\n\t\tif ( (settings.bSorted || settings.bFiltered) && ! settings._drawHold ) {\n\t\t\tdivBodyEl.scrollTop = 0;\n\t\t}\n\t}\n\t\n\t\n\t\n\t/**\n\t * Apply a given function to the display child nodes of an element array (typically\n\t * TD children of TR rows\n\t *  @param {function} fn Method to apply to the objects\n\t *  @param array {nodes} an1 List of elements to look through for display children\n\t *  @param array {nodes} an2 Another list (identical structure to the first) - optional\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnApplyToChildren( fn, an1, an2 )\n\t{\n\t\tvar index=0, i=0, iLen=an1.length;\n\t\tvar nNode1, nNode2;\n\t\n\t\twhile ( i < iLen ) {\n\t\t\tnNode1 = an1[i].firstChild;\n\t\t\tnNode2 = an2 ? an2[i].firstChild : null;\n\t\n\t\t\twhile ( nNode1 ) {\n\t\t\t\tif ( nNode1.nodeType === 1 ) {\n\t\t\t\t\tif ( an2 ) {\n\t\t\t\t\t\tfn( nNode1, nNode2, index );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tfn( nNode1, index );\n\t\t\t\t\t}\n\t\n\t\t\t\t\tindex++;\n\t\t\t\t}\n\t\n\t\t\t\tnNode1 = nNode1.nextSibling;\n\t\t\t\tnNode2 = an2 ? nNode2.nextSibling : null;\n\t\t\t}\n\t\n\t\t\ti++;\n\t\t}\n\t}\n\t\n\t\n\t\n\tvar __re_html_remove = /<.*?>/g;\n\t\n\t\n\t/**\n\t * Calculate the width of columns for the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnCalculateColumnWidths ( oSettings )\n\t{\n\t\tvar\n\t\t\ttable = oSettings.nTable,\n\t\t\tcolumns = oSettings.aoColumns,\n\t\t\tscroll = oSettings.oScroll,\n\t\t\tscrollY = scroll.sY,\n\t\t\tscrollX = scroll.sX,\n\t\t\tscrollXInner = scroll.sXInner,\n\t\t\tcolumnCount = columns.length,\n\t\t\tvisibleColumns = _fnGetColumns( oSettings, 'bVisible' ),\n\t\t\theaderCells = $('th', oSettings.nTHead),\n\t\t\ttableWidthAttr = table.getAttribute('width'), // from DOM element\n\t\t\ttableContainer = table.parentNode,\n\t\t\tuserInputs = false,\n\t\t\ti, column, columnIdx, width, outerWidth,\n\t\t\tbrowser = oSettings.oBrowser,\n\t\t\tie67 = browser.bScrollOversize;\n\t\n\t\tvar styleWidth = table.style.width;\n\t\tif ( styleWidth && styleWidth.indexOf('%') !== -1 ) {\n\t\t\ttableWidthAttr = styleWidth;\n\t\t}\n\t\n\t\t/* Convert any user input sizes into pixel sizes */\n\t\tfor ( i=0 ; i<visibleColumns.length ; i++ ) {\n\t\t\tcolumn = columns[ visibleColumns[i] ];\n\t\n\t\t\tif ( column.sWidth !== null ) {\n\t\t\t\tcolumn.sWidth = _fnConvertToWidth( column.sWidthOrig, tableContainer );\n\t\n\t\t\t\tuserInputs = true;\n\t\t\t}\n\t\t}\n\t\n\t\t/* If the number of columns in the DOM equals the number that we have to\n\t\t * process in DataTables, then we can use the offsets that are created by\n\t\t * the web- browser. No custom sizes can be set in order for this to happen,\n\t\t * nor scrolling used\n\t\t */\n\t\tif ( ie67 || ! userInputs && ! scrollX && ! scrollY &&\n\t\t     columnCount == _fnVisbleColumns( oSettings ) &&\n\t\t     columnCount == headerCells.length\n\t\t) {\n\t\t\tfor ( i=0 ; i<columnCount ; i++ ) {\n\t\t\t\tvar colIdx = _fnVisibleToColumnIndex( oSettings, i );\n\t\n\t\t\t\tif ( colIdx !== null ) {\n\t\t\t\t\tcolumns[ colIdx ].sWidth = _fnStringToCss( headerCells.eq(i).width() );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Otherwise construct a single row, worst case, table with the widest\n\t\t\t// node in the data, assign any user defined widths, then insert it into\n\t\t\t// the DOM and allow the browser to do all the hard work of calculating\n\t\t\t// table widths\n\t\t\tvar tmpTable = $(table).clone() // don't use cloneNode - IE8 will remove events on the main table\n\t\t\t\t.css( 'visibility', 'hidden' )\n\t\t\t\t.removeAttr( 'id' );\n\t\n\t\t\t// Clean up the table body\n\t\t\ttmpTable.find('tbody tr').remove();\n\t\t\tvar tr = $('<tr/>').appendTo( tmpTable.find('tbody') );\n\t\n\t\t\t// Clone the table header and footer - we can't use the header / footer\n\t\t\t// from the cloned table, since if scrolling is active, the table's\n\t\t\t// real header and footer are contained in different table tags\n\t\t\ttmpTable.find('thead, tfoot').remove();\n\t\t\ttmpTable\n\t\t\t\t.append( $(oSettings.nTHead).clone() )\n\t\t\t\t.append( $(oSettings.nTFoot).clone() );\n\t\n\t\t\t// Remove any assigned widths from the footer (from scrolling)\n\t\t\ttmpTable.find('tfoot th, tfoot td').css('width', '');\n\t\n\t\t\t// Apply custom sizing to the cloned header\n\t\t\theaderCells = _fnGetUniqueThs( oSettings, tmpTable.find('thead')[0] );\n\t\n\t\t\tfor ( i=0 ; i<visibleColumns.length ; i++ ) {\n\t\t\t\tcolumn = columns[ visibleColumns[i] ];\n\t\n\t\t\t\theaderCells[i].style.width = column.sWidthOrig !== null && column.sWidthOrig !== '' ?\n\t\t\t\t\t_fnStringToCss( column.sWidthOrig ) :\n\t\t\t\t\t'';\n\t\n\t\t\t\t// For scrollX we need to force the column width otherwise the\n\t\t\t\t// browser will collapse it. If this width is smaller than the\n\t\t\t\t// width the column requires, then it will have no effect\n\t\t\t\tif ( column.sWidthOrig && scrollX ) {\n\t\t\t\t\t$( headerCells[i] ).append( $('<div/>').css( {\n\t\t\t\t\t\twidth: column.sWidthOrig,\n\t\t\t\t\t\tmargin: 0,\n\t\t\t\t\t\tpadding: 0,\n\t\t\t\t\t\tborder: 0,\n\t\t\t\t\t\theight: 1\n\t\t\t\t\t} ) );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// Find the widest cell for each column and put it into the table\n\t\t\tif ( oSettings.aoData.length ) {\n\t\t\t\tfor ( i=0 ; i<visibleColumns.length ; i++ ) {\n\t\t\t\t\tcolumnIdx = visibleColumns[i];\n\t\t\t\t\tcolumn = columns[ columnIdx ];\n\t\n\t\t\t\t\t$( _fnGetWidestNode( oSettings, columnIdx ) )\n\t\t\t\t\t\t.clone( false )\n\t\t\t\t\t\t.append( column.sContentPadding )\n\t\t\t\t\t\t.appendTo( tr );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// Tidy the temporary table - remove name attributes so there aren't\n\t\t\t// duplicated in the dom (radio elements for example)\n\t\t\t$('[name]', tmpTable).removeAttr('name');\n\t\n\t\t\t// Table has been built, attach to the document so we can work with it.\n\t\t\t// A holding element is used, positioned at the top of the container\n\t\t\t// with minimal height, so it has no effect on if the container scrolls\n\t\t\t// or not. Otherwise it might trigger scrolling when it actually isn't\n\t\t\t// needed\n\t\t\tvar holder = $('<div/>').css( scrollX || scrollY ?\n\t\t\t\t\t{\n\t\t\t\t\t\tposition: 'absolute',\n\t\t\t\t\t\ttop: 0,\n\t\t\t\t\t\tleft: 0,\n\t\t\t\t\t\theight: 1,\n\t\t\t\t\t\tright: 0,\n\t\t\t\t\t\toverflow: 'hidden'\n\t\t\t\t\t} :\n\t\t\t\t\t{}\n\t\t\t\t)\n\t\t\t\t.append( tmpTable )\n\t\t\t\t.appendTo( tableContainer );\n\t\n\t\t\t// When scrolling (X or Y) we want to set the width of the table as \n\t\t\t// appropriate. However, when not scrolling leave the table width as it\n\t\t\t// is. This results in slightly different, but I think correct behaviour\n\t\t\tif ( scrollX && scrollXInner ) {\n\t\t\t\ttmpTable.width( scrollXInner );\n\t\t\t}\n\t\t\telse if ( scrollX ) {\n\t\t\t\ttmpTable.css( 'width', 'auto' );\n\t\t\t\ttmpTable.removeAttr('width');\n\t\n\t\t\t\t// If there is no width attribute or style, then allow the table to\n\t\t\t\t// collapse\n\t\t\t\tif ( tmpTable.width() < tableContainer.clientWidth && tableWidthAttr ) {\n\t\t\t\t\ttmpTable.width( tableContainer.clientWidth );\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if ( scrollY ) {\n\t\t\t\ttmpTable.width( tableContainer.clientWidth );\n\t\t\t}\n\t\t\telse if ( tableWidthAttr ) {\n\t\t\t\ttmpTable.width( tableWidthAttr );\n\t\t\t}\n\t\n\t\t\t// Get the width of each column in the constructed table - we need to\n\t\t\t// know the inner width (so it can be assigned to the other table's\n\t\t\t// cells) and the outer width so we can calculate the full width of the\n\t\t\t// table. This is safe since DataTables requires a unique cell for each\n\t\t\t// column, but if ever a header can span multiple columns, this will\n\t\t\t// need to be modified.\n\t\t\tvar total = 0;\n\t\t\tfor ( i=0 ; i<visibleColumns.length ; i++ ) {\n\t\t\t\tvar cell = $(headerCells[i]);\n\t\t\t\tvar border = cell.outerWidth() - cell.width();\n\t\n\t\t\t\t// Use getBounding... where possible (not IE8-) because it can give\n\t\t\t\t// sub-pixel accuracy, which we then want to round up!\n\t\t\t\tvar bounding = browser.bBounding ?\n\t\t\t\t\tMath.ceil( headerCells[i].getBoundingClientRect().width ) :\n\t\t\t\t\tcell.outerWidth();\n\t\n\t\t\t\t// Total is tracked to remove any sub-pixel errors as the outerWidth\n\t\t\t\t// of the table might not equal the total given here (IE!).\n\t\t\t\ttotal += bounding;\n\t\n\t\t\t\t// Width for each column to use\n\t\t\t\tcolumns[ visibleColumns[i] ].sWidth = _fnStringToCss( bounding - border );\n\t\t\t}\n\t\n\t\t\ttable.style.width = _fnStringToCss( total );\n\t\n\t\t\t// Finished with the table - ditch it\n\t\t\tholder.remove();\n\t\t}\n\t\n\t\t// If there is a width attr, we want to attach an event listener which\n\t\t// allows the table sizing to automatically adjust when the window is\n\t\t// resized. Use the width attr rather than CSS, since we can't know if the\n\t\t// CSS is a relative value or absolute - DOM read is always px.\n\t\tif ( tableWidthAttr ) {\n\t\t\ttable.style.width = _fnStringToCss( tableWidthAttr );\n\t\t}\n\t\n\t\tif ( (tableWidthAttr || scrollX) && ! oSettings._reszEvt ) {\n\t\t\tvar bindResize = function () {\n\t\t\t\t$(window).bind('resize.DT-'+oSettings.sInstance, _fnThrottle( function () {\n\t\t\t\t\t_fnAdjustColumnSizing( oSettings );\n\t\t\t\t} ) );\n\t\t\t};\n\t\n\t\t\t// IE6/7 will crash if we bind a resize event handler on page load.\n\t\t\t// To be removed in 1.11 which drops IE6/7 support\n\t\t\tif ( ie67 ) {\n\t\t\t\tsetTimeout( bindResize, 1000 );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbindResize();\n\t\t\t}\n\t\n\t\t\toSettings._reszEvt = true;\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Throttle the calls to a function. Arguments and context are maintained for\n\t * the throttled function\n\t *  @param {function} fn Function to be called\n\t *  @param {int} [freq=200] call frequency in mS\n\t *  @returns {function} wrapped function\n\t *  @memberof DataTable#oApi\n\t */\n\tvar _fnThrottle = DataTable.util.throttle;\n\t\n\t\n\t/**\n\t * Convert a CSS unit width to pixels (e.g. 2em)\n\t *  @param {string} width width to be converted\n\t *  @param {node} parent parent to get the with for (required for relative widths) - optional\n\t *  @returns {int} width in pixels\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnConvertToWidth ( width, parent )\n\t{\n\t\tif ( ! width ) {\n\t\t\treturn 0;\n\t\t}\n\t\n\t\tvar n = $('<div/>')\n\t\t\t.css( 'width', _fnStringToCss( width ) )\n\t\t\t.appendTo( parent || document.body );\n\t\n\t\tvar val = n[0].offsetWidth;\n\t\tn.remove();\n\t\n\t\treturn val;\n\t}\n\t\n\t\n\t/**\n\t * Get the widest node\n\t *  @param {object} settings dataTables settings object\n\t *  @param {int} colIdx column of interest\n\t *  @returns {node} widest table node\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetWidestNode( settings, colIdx )\n\t{\n\t\tvar idx = _fnGetMaxLenString( settings, colIdx );\n\t\tif ( idx < 0 ) {\n\t\t\treturn null;\n\t\t}\n\t\n\t\tvar data = settings.aoData[ idx ];\n\t\treturn ! data.nTr ? // Might not have been created when deferred rendering\n\t\t\t$('<td/>').html( _fnGetCellData( settings, idx, colIdx, 'display' ) )[0] :\n\t\t\tdata.anCells[ colIdx ];\n\t}\n\t\n\t\n\t/**\n\t * Get the maximum strlen for each data column\n\t *  @param {object} settings dataTables settings object\n\t *  @param {int} colIdx column of interest\n\t *  @returns {string} max string length for each column\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnGetMaxLenString( settings, colIdx )\n\t{\n\t\tvar s, max=-1, maxIdx = -1;\n\t\n\t\tfor ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {\n\t\t\ts = _fnGetCellData( settings, i, colIdx, 'display' )+'';\n\t\t\ts = s.replace( __re_html_remove, '' );\n\t\t\ts = s.replace( /&nbsp;/g, ' ' );\n\t\n\t\t\tif ( s.length > max ) {\n\t\t\t\tmax = s.length;\n\t\t\t\tmaxIdx = i;\n\t\t\t}\n\t\t}\n\t\n\t\treturn maxIdx;\n\t}\n\t\n\t\n\t/**\n\t * Append a CSS unit (only if required) to a string\n\t *  @param {string} value to css-ify\n\t *  @returns {string} value with css unit\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnStringToCss( s )\n\t{\n\t\tif ( s === null ) {\n\t\t\treturn '0px';\n\t\t}\n\t\n\t\tif ( typeof s == 'number' ) {\n\t\t\treturn s < 0 ?\n\t\t\t\t'0px' :\n\t\t\t\ts+'px';\n\t\t}\n\t\n\t\t// Check it has a unit character already\n\t\treturn s.match(/\\d$/) ?\n\t\t\ts+'px' :\n\t\t\ts;\n\t}\n\t\n\t\n\t\n\tfunction _fnSortFlatten ( settings )\n\t{\n\t\tvar\n\t\t\ti, iLen, k, kLen,\n\t\t\taSort = [],\n\t\t\taiOrig = [],\n\t\t\taoColumns = settings.aoColumns,\n\t\t\taDataSort, iCol, sType, srcCol,\n\t\t\tfixed = settings.aaSortingFixed,\n\t\t\tfixedObj = $.isPlainObject( fixed ),\n\t\t\tnestedSort = [],\n\t\t\tadd = function ( a ) {\n\t\t\t\tif ( a.length && ! $.isArray( a[0] ) ) {\n\t\t\t\t\t// 1D array\n\t\t\t\t\tnestedSort.push( a );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t// 2D array\n\t\t\t\t\t$.merge( nestedSort, a );\n\t\t\t\t}\n\t\t\t};\n\t\n\t\t// Build the sort array, with pre-fix and post-fix options if they have been\n\t\t// specified\n\t\tif ( $.isArray( fixed ) ) {\n\t\t\tadd( fixed );\n\t\t}\n\t\n\t\tif ( fixedObj && fixed.pre ) {\n\t\t\tadd( fixed.pre );\n\t\t}\n\t\n\t\tadd( settings.aaSorting );\n\t\n\t\tif (fixedObj && fixed.post ) {\n\t\t\tadd( fixed.post );\n\t\t}\n\t\n\t\tfor ( i=0 ; i<nestedSort.length ; i++ )\n\t\t{\n\t\t\tsrcCol = nestedSort[i][0];\n\t\t\taDataSort = aoColumns[ srcCol ].aDataSort;\n\t\n\t\t\tfor ( k=0, kLen=aDataSort.length ; k<kLen ; k++ )\n\t\t\t{\n\t\t\t\tiCol = aDataSort[k];\n\t\t\t\tsType = aoColumns[ iCol ].sType || 'string';\n\t\n\t\t\t\tif ( nestedSort[i]._idx === undefined ) {\n\t\t\t\t\tnestedSort[i]._idx = $.inArray( nestedSort[i][1], aoColumns[iCol].asSorting );\n\t\t\t\t}\n\t\n\t\t\t\taSort.push( {\n\t\t\t\t\tsrc:       srcCol,\n\t\t\t\t\tcol:       iCol,\n\t\t\t\t\tdir:       nestedSort[i][1],\n\t\t\t\t\tindex:     nestedSort[i]._idx,\n\t\t\t\t\ttype:      sType,\n\t\t\t\t\tformatter: DataTable.ext.type.order[ sType+\"-pre\" ]\n\t\t\t\t} );\n\t\t\t}\n\t\t}\n\t\n\t\treturn aSort;\n\t}\n\t\n\t/**\n\t * Change the order of the table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t *  @todo This really needs split up!\n\t */\n\tfunction _fnSort ( oSettings )\n\t{\n\t\tvar\n\t\t\ti, ien, iLen, j, jLen, k, kLen,\n\t\t\tsDataType, nTh,\n\t\t\taiOrig = [],\n\t\t\toExtSort = DataTable.ext.type.order,\n\t\t\taoData = oSettings.aoData,\n\t\t\taoColumns = oSettings.aoColumns,\n\t\t\taDataSort, data, iCol, sType, oSort,\n\t\t\tformatters = 0,\n\t\t\tsortCol,\n\t\t\tdisplayMaster = oSettings.aiDisplayMaster,\n\t\t\taSort;\n\t\n\t\t// Resolve any column types that are unknown due to addition or invalidation\n\t\t// @todo Can this be moved into a 'data-ready' handler which is called when\n\t\t//   data is going to be used in the table?\n\t\t_fnColumnTypes( oSettings );\n\t\n\t\taSort = _fnSortFlatten( oSettings );\n\t\n\t\tfor ( i=0, ien=aSort.length ; i<ien ; i++ ) {\n\t\t\tsortCol = aSort[i];\n\t\n\t\t\t// Track if we can use the fast sort algorithm\n\t\t\tif ( sortCol.formatter ) {\n\t\t\t\tformatters++;\n\t\t\t}\n\t\n\t\t\t// Load the data needed for the sort, for each cell\n\t\t\t_fnSortData( oSettings, sortCol.col );\n\t\t}\n\t\n\t\t/* No sorting required if server-side or no sorting array */\n\t\tif ( _fnDataSource( oSettings ) != 'ssp' && aSort.length !== 0 )\n\t\t{\n\t\t\t// Create a value - key array of the current row positions such that we can use their\n\t\t\t// current position during the sort, if values match, in order to perform stable sorting\n\t\t\tfor ( i=0, iLen=displayMaster.length ; i<iLen ; i++ ) {\n\t\t\t\taiOrig[ displayMaster[i] ] = i;\n\t\t\t}\n\t\n\t\t\t/* Do the sort - here we want multi-column sorting based on a given data source (column)\n\t\t\t * and sorting function (from oSort) in a certain direction. It's reasonably complex to\n\t\t\t * follow on it's own, but this is what we want (example two column sorting):\n\t\t\t *  fnLocalSorting = function(a,b){\n\t\t\t *    var iTest;\n\t\t\t *    iTest = oSort['string-asc']('data11', 'data12');\n\t\t\t *      if (iTest !== 0)\n\t\t\t *        return iTest;\n\t\t\t *    iTest = oSort['numeric-desc']('data21', 'data22');\n\t\t\t *    if (iTest !== 0)\n\t\t\t *      return iTest;\n\t\t\t *    return oSort['numeric-asc']( aiOrig[a], aiOrig[b] );\n\t\t\t *  }\n\t\t\t * Basically we have a test for each sorting column, if the data in that column is equal,\n\t\t\t * test the next column. If all columns match, then we use a numeric sort on the row\n\t\t\t * positions in the original data array to provide a stable sort.\n\t\t\t *\n\t\t\t * Note - I know it seems excessive to have two sorting methods, but the first is around\n\t\t\t * 15% faster, so the second is only maintained for backwards compatibility with sorting\n\t\t\t * methods which do not have a pre-sort formatting function.\n\t\t\t */\n\t\t\tif ( formatters === aSort.length ) {\n\t\t\t\t// All sort types have formatting functions\n\t\t\t\tdisplayMaster.sort( function ( a, b ) {\n\t\t\t\t\tvar\n\t\t\t\t\t\tx, y, k, test, sort,\n\t\t\t\t\t\tlen=aSort.length,\n\t\t\t\t\t\tdataA = aoData[a]._aSortData,\n\t\t\t\t\t\tdataB = aoData[b]._aSortData;\n\t\n\t\t\t\t\tfor ( k=0 ; k<len ; k++ ) {\n\t\t\t\t\t\tsort = aSort[k];\n\t\n\t\t\t\t\t\tx = dataA[ sort.col ];\n\t\t\t\t\t\ty = dataB[ sort.col ];\n\t\n\t\t\t\t\t\ttest = x<y ? -1 : x>y ? 1 : 0;\n\t\t\t\t\t\tif ( test !== 0 ) {\n\t\t\t\t\t\t\treturn sort.dir === 'asc' ? test : -test;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\n\t\t\t\t\tx = aiOrig[a];\n\t\t\t\t\ty = aiOrig[b];\n\t\t\t\t\treturn x<y ? -1 : x>y ? 1 : 0;\n\t\t\t\t} );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Depreciated - remove in 1.11 (providing a plug-in option)\n\t\t\t\t// Not all sort types have formatting methods, so we have to call their sorting\n\t\t\t\t// methods.\n\t\t\t\tdisplayMaster.sort( function ( a, b ) {\n\t\t\t\t\tvar\n\t\t\t\t\t\tx, y, k, l, test, sort, fn,\n\t\t\t\t\t\tlen=aSort.length,\n\t\t\t\t\t\tdataA = aoData[a]._aSortData,\n\t\t\t\t\t\tdataB = aoData[b]._aSortData;\n\t\n\t\t\t\t\tfor ( k=0 ; k<len ; k++ ) {\n\t\t\t\t\t\tsort = aSort[k];\n\t\n\t\t\t\t\t\tx = dataA[ sort.col ];\n\t\t\t\t\t\ty = dataB[ sort.col ];\n\t\n\t\t\t\t\t\tfn = oExtSort[ sort.type+\"-\"+sort.dir ] || oExtSort[ \"string-\"+sort.dir ];\n\t\t\t\t\t\ttest = fn( x, y );\n\t\t\t\t\t\tif ( test !== 0 ) {\n\t\t\t\t\t\t\treturn test;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\n\t\t\t\t\tx = aiOrig[a];\n\t\t\t\t\ty = aiOrig[b];\n\t\t\t\t\treturn x<y ? -1 : x>y ? 1 : 0;\n\t\t\t\t} );\n\t\t\t}\n\t\t}\n\t\n\t\t/* Tell the draw function that we have sorted the data */\n\t\toSettings.bSorted = true;\n\t}\n\t\n\t\n\tfunction _fnSortAria ( settings )\n\t{\n\t\tvar label;\n\t\tvar nextSort;\n\t\tvar columns = settings.aoColumns;\n\t\tvar aSort = _fnSortFlatten( settings );\n\t\tvar oAria = settings.oLanguage.oAria;\n\t\n\t\t// ARIA attributes - need to loop all columns, to update all (removing old\n\t\t// attributes as needed)\n\t\tfor ( var i=0, iLen=columns.length ; i<iLen ; i++ )\n\t\t{\n\t\t\tvar col = columns[i];\n\t\t\tvar asSorting = col.asSorting;\n\t\t\tvar sTitle = col.sTitle.replace( /<.*?>/g, \"\" );\n\t\t\tvar th = col.nTh;\n\t\n\t\t\t// IE7 is throwing an error when setting these properties with jQuery's\n\t\t\t// attr() and removeAttr() methods...\n\t\t\tth.removeAttribute('aria-sort');\n\t\n\t\t\t/* In ARIA only the first sorting column can be marked as sorting - no multi-sort option */\n\t\t\tif ( col.bSortable ) {\n\t\t\t\tif ( aSort.length > 0 && aSort[0].col == i ) {\n\t\t\t\t\tth.setAttribute('aria-sort', aSort[0].dir==\"asc\" ? \"ascending\" : \"descending\" );\n\t\t\t\t\tnextSort = asSorting[ aSort[0].index+1 ] || asSorting[0];\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tnextSort = asSorting[0];\n\t\t\t\t}\n\t\n\t\t\t\tlabel = sTitle + ( nextSort === \"asc\" ?\n\t\t\t\t\toAria.sSortAscending :\n\t\t\t\t\toAria.sSortDescending\n\t\t\t\t);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tlabel = sTitle;\n\t\t\t}\n\t\n\t\t\tth.setAttribute('aria-label', label);\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Function to run on user sort request\n\t *  @param {object} settings dataTables settings object\n\t *  @param {node} attachTo node to attach the handler to\n\t *  @param {int} colIdx column sorting index\n\t *  @param {boolean} [append=false] Append the requested sort to the existing\n\t *    sort if true (i.e. multi-column sort)\n\t *  @param {function} [callback] callback function\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSortListener ( settings, colIdx, append, callback )\n\t{\n\t\tvar col = settings.aoColumns[ colIdx ];\n\t\tvar sorting = settings.aaSorting;\n\t\tvar asSorting = col.asSorting;\n\t\tvar nextSortIdx;\n\t\tvar next = function ( a, overflow ) {\n\t\t\tvar idx = a._idx;\n\t\t\tif ( idx === undefined ) {\n\t\t\t\tidx = $.inArray( a[1], asSorting );\n\t\t\t}\n\t\n\t\t\treturn idx+1 < asSorting.length ?\n\t\t\t\tidx+1 :\n\t\t\t\toverflow ?\n\t\t\t\t\tnull :\n\t\t\t\t\t0;\n\t\t};\n\t\n\t\t// Convert to 2D array if needed\n\t\tif ( typeof sorting[0] === 'number' ) {\n\t\t\tsorting = settings.aaSorting = [ sorting ];\n\t\t}\n\t\n\t\t// If appending the sort then we are multi-column sorting\n\t\tif ( append && settings.oFeatures.bSortMulti ) {\n\t\t\t// Are we already doing some kind of sort on this column?\n\t\t\tvar sortIdx = $.inArray( colIdx, _pluck(sorting, '0') );\n\t\n\t\t\tif ( sortIdx !== -1 ) {\n\t\t\t\t// Yes, modify the sort\n\t\t\t\tnextSortIdx = next( sorting[sortIdx], true );\n\t\n\t\t\t\tif ( nextSortIdx === null && sorting.length === 1 ) {\n\t\t\t\t\tnextSortIdx = 0; // can't remove sorting completely\n\t\t\t\t}\n\t\n\t\t\t\tif ( nextSortIdx === null ) {\n\t\t\t\t\tsorting.splice( sortIdx, 1 );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tsorting[sortIdx][1] = asSorting[ nextSortIdx ];\n\t\t\t\t\tsorting[sortIdx]._idx = nextSortIdx;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// No sort on this column yet\n\t\t\t\tsorting.push( [ colIdx, asSorting[0], 0 ] );\n\t\t\t\tsorting[sorting.length-1]._idx = 0;\n\t\t\t}\n\t\t}\n\t\telse if ( sorting.length && sorting[0][0] == colIdx ) {\n\t\t\t// Single column - already sorting on this column, modify the sort\n\t\t\tnextSortIdx = next( sorting[0] );\n\t\n\t\t\tsorting.length = 1;\n\t\t\tsorting[0][1] = asSorting[ nextSortIdx ];\n\t\t\tsorting[0]._idx = nextSortIdx;\n\t\t}\n\t\telse {\n\t\t\t// Single column - sort only on this column\n\t\t\tsorting.length = 0;\n\t\t\tsorting.push( [ colIdx, asSorting[0] ] );\n\t\t\tsorting[0]._idx = 0;\n\t\t}\n\t\n\t\t// Run the sort by calling a full redraw\n\t\t_fnReDraw( settings );\n\t\n\t\t// callback used for async user interaction\n\t\tif ( typeof callback == 'function' ) {\n\t\t\tcallback( settings );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Attach a sort handler (click) to a node\n\t *  @param {object} settings dataTables settings object\n\t *  @param {node} attachTo node to attach the handler to\n\t *  @param {int} colIdx column sorting index\n\t *  @param {function} [callback] callback function\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSortAttachListener ( settings, attachTo, colIdx, callback )\n\t{\n\t\tvar col = settings.aoColumns[ colIdx ];\n\t\n\t\t_fnBindAction( attachTo, {}, function (e) {\n\t\t\t/* If the column is not sortable - don't to anything */\n\t\t\tif ( col.bSortable === false ) {\n\t\t\t\treturn;\n\t\t\t}\n\t\n\t\t\t// If processing is enabled use a timeout to allow the processing\n\t\t\t// display to be shown - otherwise to it synchronously\n\t\t\tif ( settings.oFeatures.bProcessing ) {\n\t\t\t\t_fnProcessingDisplay( settings, true );\n\t\n\t\t\t\tsetTimeout( function() {\n\t\t\t\t\t_fnSortListener( settings, colIdx, e.shiftKey, callback );\n\t\n\t\t\t\t\t// In server-side processing, the draw callback will remove the\n\t\t\t\t\t// processing display\n\t\t\t\t\tif ( _fnDataSource( settings ) !== 'ssp' ) {\n\t\t\t\t\t\t_fnProcessingDisplay( settings, false );\n\t\t\t\t\t}\n\t\t\t\t}, 0 );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t_fnSortListener( settings, colIdx, e.shiftKey, callback );\n\t\t\t}\n\t\t} );\n\t}\n\t\n\t\n\t/**\n\t * Set the sorting classes on table's body, Note: it is safe to call this function\n\t * when bSort and bSortClasses are false\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSortingClasses( settings )\n\t{\n\t\tvar oldSort = settings.aLastSort;\n\t\tvar sortClass = settings.oClasses.sSortColumn;\n\t\tvar sort = _fnSortFlatten( settings );\n\t\tvar features = settings.oFeatures;\n\t\tvar i, ien, colIdx;\n\t\n\t\tif ( features.bSort && features.bSortClasses ) {\n\t\t\t// Remove old sorting classes\n\t\t\tfor ( i=0, ien=oldSort.length ; i<ien ; i++ ) {\n\t\t\t\tcolIdx = oldSort[i].src;\n\t\n\t\t\t\t// Remove column sorting\n\t\t\t\t$( _pluck( settings.aoData, 'anCells', colIdx ) )\n\t\t\t\t\t.removeClass( sortClass + (i<2 ? i+1 : 3) );\n\t\t\t}\n\t\n\t\t\t// Add new column sorting\n\t\t\tfor ( i=0, ien=sort.length ; i<ien ; i++ ) {\n\t\t\t\tcolIdx = sort[i].src;\n\t\n\t\t\t\t$( _pluck( settings.aoData, 'anCells', colIdx ) )\n\t\t\t\t\t.addClass( sortClass + (i<2 ? i+1 : 3) );\n\t\t\t}\n\t\t}\n\t\n\t\tsettings.aLastSort = sort;\n\t}\n\t\n\t\n\t// Get the data to sort a column, be it from cache, fresh (populating the\n\t// cache), or from a sort formatter\n\tfunction _fnSortData( settings, idx )\n\t{\n\t\t// Custom sorting function - provided by the sort data type\n\t\tvar column = settings.aoColumns[ idx ];\n\t\tvar customSort = DataTable.ext.order[ column.sSortDataType ];\n\t\tvar customData;\n\t\n\t\tif ( customSort ) {\n\t\t\tcustomData = customSort.call( settings.oInstance, settings, idx,\n\t\t\t\t_fnColumnIndexToVisible( settings, idx )\n\t\t\t);\n\t\t}\n\t\n\t\t// Use / populate cache\n\t\tvar row, cellData;\n\t\tvar formatter = DataTable.ext.type.order[ column.sType+\"-pre\" ];\n\t\n\t\tfor ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {\n\t\t\trow = settings.aoData[i];\n\t\n\t\t\tif ( ! row._aSortData ) {\n\t\t\t\trow._aSortData = [];\n\t\t\t}\n\t\n\t\t\tif ( ! row._aSortData[idx] || customSort ) {\n\t\t\t\tcellData = customSort ?\n\t\t\t\t\tcustomData[i] : // If there was a custom sort function, use data from there\n\t\t\t\t\t_fnGetCellData( settings, i, idx, 'sort' );\n\t\n\t\t\t\trow._aSortData[ idx ] = formatter ?\n\t\t\t\t\tformatter( cellData ) :\n\t\t\t\t\tcellData;\n\t\t\t}\n\t\t}\n\t}\n\t\n\t\n\t\n\t/**\n\t * Save the state of a table\n\t *  @param {object} oSettings dataTables settings object\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSaveState ( settings )\n\t{\n\t\tif ( !settings.oFeatures.bStateSave || settings.bDestroying )\n\t\t{\n\t\t\treturn;\n\t\t}\n\t\n\t\t/* Store the interesting variables */\n\t\tvar state = {\n\t\t\ttime:    +new Date(),\n\t\t\tstart:   settings._iDisplayStart,\n\t\t\tlength:  settings._iDisplayLength,\n\t\t\torder:   $.extend( true, [], settings.aaSorting ),\n\t\t\tsearch:  _fnSearchToCamel( settings.oPreviousSearch ),\n\t\t\tcolumns: $.map( settings.aoColumns, function ( col, i ) {\n\t\t\t\treturn {\n\t\t\t\t\tvisible: col.bVisible,\n\t\t\t\t\tsearch: _fnSearchToCamel( settings.aoPreSearchCols[i] )\n\t\t\t\t};\n\t\t\t} )\n\t\t};\n\t\n\t\t_fnCallbackFire( settings, \"aoStateSaveParams\", 'stateSaveParams', [settings, state] );\n\t\n\t\tsettings.oSavedState = state;\n\t\tsettings.fnStateSaveCallback.call( settings.oInstance, settings, state );\n\t}\n\t\n\t\n\t/**\n\t * Attempt to load a saved table state\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {object} oInit DataTables init object so we can override settings\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnLoadState ( settings, oInit )\n\t{\n\t\tvar i, ien;\n\t\tvar columns = settings.aoColumns;\n\t\n\t\tif ( ! settings.oFeatures.bStateSave ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar state = settings.fnStateLoadCallback.call( settings.oInstance, settings );\n\t\tif ( ! state || ! state.time ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\t/* Allow custom and plug-in manipulation functions to alter the saved data set and\n\t\t * cancelling of loading by returning false\n\t\t */\n\t\tvar abStateLoad = _fnCallbackFire( settings, 'aoStateLoadParams', 'stateLoadParams', [settings, state] );\n\t\tif ( $.inArray( false, abStateLoad ) !== -1 ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\t/* Reject old data */\n\t\tvar duration = settings.iStateDuration;\n\t\tif ( duration > 0 && state.time < +new Date() - (duration*1000) ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\t// Number of columns have changed - all bets are off, no restore of settings\n\t\tif ( columns.length !== state.columns.length ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\t// Store the saved state so it might be accessed at any time\n\t\tsettings.oLoadedState = $.extend( true, {}, state );\n\t\n\t\t// Restore key features - todo - for 1.11 this needs to be done by\n\t\t// subscribed events\n\t\tif ( state.start !== undefined ) {\n\t\t\tsettings._iDisplayStart    = state.start;\n\t\t\tsettings.iInitDisplayStart = state.start;\n\t\t}\n\t\tif ( state.length !== undefined ) {\n\t\t\tsettings._iDisplayLength   = state.length;\n\t\t}\n\t\n\t\t// Order\n\t\tif ( state.order !== undefined ) {\n\t\t\tsettings.aaSorting = [];\n\t\t\t$.each( state.order, function ( i, col ) {\n\t\t\t\tsettings.aaSorting.push( col[0] >= columns.length ?\n\t\t\t\t\t[ 0, col[1] ] :\n\t\t\t\t\tcol\n\t\t\t\t);\n\t\t\t} );\n\t\t}\n\t\n\t\t// Search\n\t\tif ( state.search !== undefined ) {\n\t\t\t$.extend( settings.oPreviousSearch, _fnSearchToHung( state.search ) );\n\t\t}\n\t\n\t\t// Columns\n\t\tfor ( i=0, ien=state.columns.length ; i<ien ; i++ ) {\n\t\t\tvar col = state.columns[i];\n\t\n\t\t\t// Visibility\n\t\t\tif ( col.visible !== undefined ) {\n\t\t\t\tcolumns[i].bVisible = col.visible;\n\t\t\t}\n\t\n\t\t\t// Search\n\t\t\tif ( col.search !== undefined ) {\n\t\t\t\t$.extend( settings.aoPreSearchCols[i], _fnSearchToHung( col.search ) );\n\t\t\t}\n\t\t}\n\t\n\t\t_fnCallbackFire( settings, 'aoStateLoaded', 'stateLoaded', [settings, state] );\n\t}\n\t\n\t\n\t/**\n\t * Return the settings object for a particular table\n\t *  @param {node} table table we are using as a dataTable\n\t *  @returns {object} Settings object - or null if not found\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnSettingsFromNode ( table )\n\t{\n\t\tvar settings = DataTable.settings;\n\t\tvar idx = $.inArray( table, _pluck( settings, 'nTable' ) );\n\t\n\t\treturn idx !== -1 ?\n\t\t\tsettings[ idx ] :\n\t\t\tnull;\n\t}\n\t\n\t\n\t/**\n\t * Log an error message\n\t *  @param {object} settings dataTables settings object\n\t *  @param {int} level log error messages, or display them to the user\n\t *  @param {string} msg error message\n\t *  @param {int} tn Technical note id to get more information about the error.\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnLog( settings, level, msg, tn )\n\t{\n\t\tmsg = 'DataTables warning: '+\n\t\t\t(settings ? 'table id='+settings.sTableId+' - ' : '')+msg;\n\t\n\t\tif ( tn ) {\n\t\t\tmsg += '. For more information about this error, please see '+\n\t\t\t'http://datatables.net/tn/'+tn;\n\t\t}\n\t\n\t\tif ( ! level  ) {\n\t\t\t// Backwards compatibility pre 1.10\n\t\t\tvar ext = DataTable.ext;\n\t\t\tvar type = ext.sErrMode || ext.errMode;\n\t\n\t\t\tif ( settings ) {\n\t\t\t\t_fnCallbackFire( settings, null, 'error', [ settings, tn, msg ] );\n\t\t\t}\n\t\n\t\t\tif ( type == 'alert' ) {\n\t\t\t\talert( msg );\n\t\t\t}\n\t\t\telse if ( type == 'throw' ) {\n\t\t\t\tthrow new Error(msg);\n\t\t\t}\n\t\t\telse if ( typeof type == 'function' ) {\n\t\t\t\ttype( settings, tn, msg );\n\t\t\t}\n\t\t}\n\t\telse if ( window.console && console.log ) {\n\t\t\tconsole.log( msg );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * See if a property is defined on one object, if so assign it to the other object\n\t *  @param {object} ret target object\n\t *  @param {object} src source object\n\t *  @param {string} name property\n\t *  @param {string} [mappedName] name to map too - optional, name used if not given\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnMap( ret, src, name, mappedName )\n\t{\n\t\tif ( $.isArray( name ) ) {\n\t\t\t$.each( name, function (i, val) {\n\t\t\t\tif ( $.isArray( val ) ) {\n\t\t\t\t\t_fnMap( ret, src, val[0], val[1] );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t_fnMap( ret, src, val );\n\t\t\t\t}\n\t\t\t} );\n\t\n\t\t\treturn;\n\t\t}\n\t\n\t\tif ( mappedName === undefined ) {\n\t\t\tmappedName = name;\n\t\t}\n\t\n\t\tif ( src[name] !== undefined ) {\n\t\t\tret[mappedName] = src[name];\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Extend objects - very similar to jQuery.extend, but deep copy objects, and\n\t * shallow copy arrays. The reason we need to do this, is that we don't want to\n\t * deep copy array init values (such as aaSorting) since the dev wouldn't be\n\t * able to override them, but we do want to deep copy arrays.\n\t *  @param {object} out Object to extend\n\t *  @param {object} extender Object from which the properties will be applied to\n\t *      out\n\t *  @param {boolean} breakRefs If true, then arrays will be sliced to take an\n\t *      independent copy with the exception of the `data` or `aaData` parameters\n\t *      if they are present. This is so you can pass in a collection to\n\t *      DataTables and have that used as your data source without breaking the\n\t *      references\n\t *  @returns {object} out Reference, just for convenience - out === the return.\n\t *  @memberof DataTable#oApi\n\t *  @todo This doesn't take account of arrays inside the deep copied objects.\n\t */\n\tfunction _fnExtend( out, extender, breakRefs )\n\t{\n\t\tvar val;\n\t\n\t\tfor ( var prop in extender ) {\n\t\t\tif ( extender.hasOwnProperty(prop) ) {\n\t\t\t\tval = extender[prop];\n\t\n\t\t\t\tif ( $.isPlainObject( val ) ) {\n\t\t\t\t\tif ( ! $.isPlainObject( out[prop] ) ) {\n\t\t\t\t\t\tout[prop] = {};\n\t\t\t\t\t}\n\t\t\t\t\t$.extend( true, out[prop], val );\n\t\t\t\t}\n\t\t\t\telse if ( breakRefs && prop !== 'data' && prop !== 'aaData' && $.isArray(val) ) {\n\t\t\t\t\tout[prop] = val.slice();\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tout[prop] = val;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\treturn out;\n\t}\n\t\n\t\n\t/**\n\t * Bind an event handers to allow a click or return key to activate the callback.\n\t * This is good for accessibility since a return on the keyboard will have the\n\t * same effect as a click, if the element has focus.\n\t *  @param {element} n Element to bind the action to\n\t *  @param {object} oData Data object to pass to the triggered function\n\t *  @param {function} fn Callback function for when the event is triggered\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnBindAction( n, oData, fn )\n\t{\n\t\t$(n)\n\t\t\t.bind( 'click.DT', oData, function (e) {\n\t\t\t\t\tn.blur(); // Remove focus outline for mouse users\n\t\t\t\t\tfn(e);\n\t\t\t\t} )\n\t\t\t.bind( 'keypress.DT', oData, function (e){\n\t\t\t\t\tif ( e.which === 13 ) {\n\t\t\t\t\t\te.preventDefault();\n\t\t\t\t\t\tfn(e);\n\t\t\t\t\t}\n\t\t\t\t} )\n\t\t\t.bind( 'selectstart.DT', function () {\n\t\t\t\t\t/* Take the brutal approach to cancelling text selection */\n\t\t\t\t\treturn false;\n\t\t\t\t} );\n\t}\n\t\n\t\n\t/**\n\t * Register a callback function. Easily allows a callback function to be added to\n\t * an array store of callback functions that can then all be called together.\n\t *  @param {object} oSettings dataTables settings object\n\t *  @param {string} sStore Name of the array storage for the callbacks in oSettings\n\t *  @param {function} fn Function to be called back\n\t *  @param {string} sName Identifying name for the callback (i.e. a label)\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnCallbackReg( oSettings, sStore, fn, sName )\n\t{\n\t\tif ( fn )\n\t\t{\n\t\t\toSettings[sStore].push( {\n\t\t\t\t\"fn\": fn,\n\t\t\t\t\"sName\": sName\n\t\t\t} );\n\t\t}\n\t}\n\t\n\t\n\t/**\n\t * Fire callback functions and trigger events. Note that the loop over the\n\t * callback array store is done backwards! Further note that you do not want to\n\t * fire off triggers in time sensitive applications (for example cell creation)\n\t * as its slow.\n\t *  @param {object} settings dataTables settings object\n\t *  @param {string} callbackArr Name of the array storage for the callbacks in\n\t *      oSettings\n\t *  @param {string} eventName Name of the jQuery custom event to trigger. If\n\t *      null no trigger is fired\n\t *  @param {array} args Array of arguments to pass to the callback function /\n\t *      trigger\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnCallbackFire( settings, callbackArr, eventName, args )\n\t{\n\t\tvar ret = [];\n\t\n\t\tif ( callbackArr ) {\n\t\t\tret = $.map( settings[callbackArr].slice().reverse(), function (val, i) {\n\t\t\t\treturn val.fn.apply( settings.oInstance, args );\n\t\t\t} );\n\t\t}\n\t\n\t\tif ( eventName !== null ) {\n\t\t\tvar e = $.Event( eventName+'.dt' );\n\t\n\t\t\t$(settings.nTable).trigger( e, args );\n\t\n\t\t\tret.push( e.result );\n\t\t}\n\t\n\t\treturn ret;\n\t}\n\t\n\t\n\tfunction _fnLengthOverflow ( settings )\n\t{\n\t\tvar\n\t\t\tstart = settings._iDisplayStart,\n\t\t\tend = settings.fnDisplayEnd(),\n\t\t\tlen = settings._iDisplayLength;\n\t\n\t\t/* If we have space to show extra rows (backing up from the end point - then do so */\n\t\tif ( start >= end )\n\t\t{\n\t\t\tstart = end - len;\n\t\t}\n\t\n\t\t// Keep the start record on the current page\n\t\tstart -= (start % len);\n\t\n\t\tif ( len === -1 || start < 0 )\n\t\t{\n\t\t\tstart = 0;\n\t\t}\n\t\n\t\tsettings._iDisplayStart = start;\n\t}\n\t\n\t\n\tfunction _fnRenderer( settings, type )\n\t{\n\t\tvar renderer = settings.renderer;\n\t\tvar host = DataTable.ext.renderer[type];\n\t\n\t\tif ( $.isPlainObject( renderer ) && renderer[type] ) {\n\t\t\t// Specific renderer for this type. If available use it, otherwise use\n\t\t\t// the default.\n\t\t\treturn host[renderer[type]] || host._;\n\t\t}\n\t\telse if ( typeof renderer === 'string' ) {\n\t\t\t// Common renderer - if there is one available for this type use it,\n\t\t\t// otherwise use the default\n\t\t\treturn host[renderer] || host._;\n\t\t}\n\t\n\t\t// Use the default\n\t\treturn host._;\n\t}\n\t\n\t\n\t/**\n\t * Detect the data source being used for the table. Used to simplify the code\n\t * a little (ajax) and to make it compress a little smaller.\n\t *\n\t *  @param {object} settings dataTables settings object\n\t *  @returns {string} Data source\n\t *  @memberof DataTable#oApi\n\t */\n\tfunction _fnDataSource ( settings )\n\t{\n\t\tif ( settings.oFeatures.bServerSide ) {\n\t\t\treturn 'ssp';\n\t\t}\n\t\telse if ( settings.ajax || settings.sAjaxSource ) {\n\t\t\treturn 'ajax';\n\t\t}\n\t\treturn 'dom';\n\t}\n\t\n\n\t\n\t\n\t/**\n\t * Computed structure of the DataTables API, defined by the options passed to\n\t * `DataTable.Api.register()` when building the API.\n\t *\n\t * The structure is built in order to speed creation and extension of the Api\n\t * objects since the extensions are effectively pre-parsed.\n\t *\n\t * The array is an array of objects with the following structure, where this\n\t * base array represents the Api prototype base:\n\t *\n\t *     [\n\t *       {\n\t *         name:      'data'                -- string   - Property name\n\t *         val:       function () {},       -- function - Api method (or undefined if just an object\n\t *         methodExt: [ ... ],              -- array    - Array of Api object definitions to extend the method result\n\t *         propExt:   [ ... ]               -- array    - Array of Api object definitions to extend the property\n\t *       },\n\t *       {\n\t *         name:     'row'\n\t *         val:       {},\n\t *         methodExt: [ ... ],\n\t *         propExt:   [\n\t *           {\n\t *             name:      'data'\n\t *             val:       function () {},\n\t *             methodExt: [ ... ],\n\t *             propExt:   [ ... ]\n\t *           },\n\t *           ...\n\t *         ]\n\t *       }\n\t *     ]\n\t *\n\t * @type {Array}\n\t * @ignore\n\t */\n\tvar __apiStruct = [];\n\t\n\t\n\t/**\n\t * `Array.prototype` reference.\n\t *\n\t * @type object\n\t * @ignore\n\t */\n\tvar __arrayProto = Array.prototype;\n\t\n\t\n\t/**\n\t * Abstraction for `context` parameter of the `Api` constructor to allow it to\n\t * take several different forms for ease of use.\n\t *\n\t * Each of the input parameter types will be converted to a DataTables settings\n\t * object where possible.\n\t *\n\t * @param  {string|node|jQuery|object} mixed DataTable identifier. Can be one\n\t *   of:\n\t *\n\t *   * `string` - jQuery selector. Any DataTables' matching the given selector\n\t *     with be found and used.\n\t *   * `node` - `TABLE` node which has already been formed into a DataTable.\n\t *   * `jQuery` - A jQuery object of `TABLE` nodes.\n\t *   * `object` - DataTables settings object\n\t *   * `DataTables.Api` - API instance\n\t * @return {array|null} Matching DataTables settings objects. `null` or\n\t *   `undefined` is returned if no matching DataTable is found.\n\t * @ignore\n\t */\n\tvar _toSettings = function ( mixed )\n\t{\n\t\tvar idx, jq;\n\t\tvar settings = DataTable.settings;\n\t\tvar tables = $.map( settings, function (el, i) {\n\t\t\treturn el.nTable;\n\t\t} );\n\t\n\t\tif ( ! mixed ) {\n\t\t\treturn [];\n\t\t}\n\t\telse if ( mixed.nTable && mixed.oApi ) {\n\t\t\t// DataTables settings object\n\t\t\treturn [ mixed ];\n\t\t}\n\t\telse if ( mixed.nodeName && mixed.nodeName.toLowerCase() === 'table' ) {\n\t\t\t// Table node\n\t\t\tidx = $.inArray( mixed, tables );\n\t\t\treturn idx !== -1 ? [ settings[idx] ] : null;\n\t\t}\n\t\telse if ( mixed && typeof mixed.settings === 'function' ) {\n\t\t\treturn mixed.settings().toArray();\n\t\t}\n\t\telse if ( typeof mixed === 'string' ) {\n\t\t\t// jQuery selector\n\t\t\tjq = $(mixed);\n\t\t}\n\t\telse if ( mixed instanceof $ ) {\n\t\t\t// jQuery object (also DataTables instance)\n\t\t\tjq = mixed;\n\t\t}\n\t\n\t\tif ( jq ) {\n\t\t\treturn jq.map( function(i) {\n\t\t\t\tidx = $.inArray( this, tables );\n\t\t\t\treturn idx !== -1 ? settings[idx] : null;\n\t\t\t} ).toArray();\n\t\t}\n\t};\n\t\n\t\n\t/**\n\t * DataTables API class - used to control and interface with  one or more\n\t * DataTables enhanced tables.\n\t *\n\t * The API class is heavily based on jQuery, presenting a chainable interface\n\t * that you can use to interact with tables. Each instance of the API class has\n\t * a \"context\" - i.e. the tables that it will operate on. This could be a single\n\t * table, all tables on a page or a sub-set thereof.\n\t *\n\t * Additionally the API is designed to allow you to easily work with the data in\n\t * the tables, retrieving and manipulating it as required. This is done by\n\t * presenting the API class as an array like interface. The contents of the\n\t * array depend upon the actions requested by each method (for example\n\t * `rows().nodes()` will return an array of nodes, while `rows().data()` will\n\t * return an array of objects or arrays depending upon your table's\n\t * configuration). The API object has a number of array like methods (`push`,\n\t * `pop`, `reverse` etc) as well as additional helper methods (`each`, `pluck`,\n\t * `unique` etc) to assist your working with the data held in a table.\n\t *\n\t * Most methods (those which return an Api instance) are chainable, which means\n\t * the return from a method call also has all of the methods available that the\n\t * top level object had. For example, these two calls are equivalent:\n\t *\n\t *     // Not chained\n\t *     api.row.add( {...} );\n\t *     api.draw();\n\t *\n\t *     // Chained\n\t *     api.row.add( {...} ).draw();\n\t *\n\t * @class DataTable.Api\n\t * @param {array|object|string|jQuery} context DataTable identifier. This is\n\t *   used to define which DataTables enhanced tables this API will operate on.\n\t *   Can be one of:\n\t *\n\t *   * `string` - jQuery selector. Any DataTables' matching the given selector\n\t *     with be found and used.\n\t *   * `node` - `TABLE` node which has already been formed into a DataTable.\n\t *   * `jQuery` - A jQuery object of `TABLE` nodes.\n\t *   * `object` - DataTables settings object\n\t * @param {array} [data] Data to initialise the Api instance with.\n\t *\n\t * @example\n\t *   // Direct initialisation during DataTables construction\n\t *   var api = $('#example').DataTable();\n\t *\n\t * @example\n\t *   // Initialisation using a DataTables jQuery object\n\t *   var api = $('#example').dataTable().api();\n\t *\n\t * @example\n\t *   // Initialisation as a constructor\n\t *   var api = new $.fn.DataTable.Api( 'table.dataTable' );\n\t */\n\t_Api = function ( context, data )\n\t{\n\t\tif ( ! (this instanceof _Api) ) {\n\t\t\treturn new _Api( context, data );\n\t\t}\n\t\n\t\tvar settings = [];\n\t\tvar ctxSettings = function ( o ) {\n\t\t\tvar a = _toSettings( o );\n\t\t\tif ( a ) {\n\t\t\t\tsettings = settings.concat( a );\n\t\t\t}\n\t\t};\n\t\n\t\tif ( $.isArray( context ) ) {\n\t\t\tfor ( var i=0, ien=context.length ; i<ien ; i++ ) {\n\t\t\t\tctxSettings( context[i] );\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tctxSettings( context );\n\t\t}\n\t\n\t\t// Remove duplicates\n\t\tthis.context = _unique( settings );\n\t\n\t\t// Initial data\n\t\tif ( data ) {\n\t\t\t$.merge( this, data );\n\t\t}\n\t\n\t\t// selector\n\t\tthis.selector = {\n\t\t\trows: null,\n\t\t\tcols: null,\n\t\t\topts: null\n\t\t};\n\t\n\t\t_Api.extend( this, this, __apiStruct );\n\t};\n\t\n\tDataTable.Api = _Api;\n\t\n\t// Don't destroy the existing prototype, just extend it. Required for jQuery 2's\n\t// isPlainObject.\n\t$.extend( _Api.prototype, {\n\t\tany: function ()\n\t\t{\n\t\t\treturn this.count() !== 0;\n\t\t},\n\t\n\t\n\t\tconcat:  __arrayProto.concat,\n\t\n\t\n\t\tcontext: [], // array of table settings objects\n\t\n\t\n\t\tcount: function ()\n\t\t{\n\t\t\treturn this.flatten().length;\n\t\t},\n\t\n\t\n\t\teach: function ( fn )\n\t\t{\n\t\t\tfor ( var i=0, ien=this.length ; i<ien; i++ ) {\n\t\t\t\tfn.call( this, this[i], i, this );\n\t\t\t}\n\t\n\t\t\treturn this;\n\t\t},\n\t\n\t\n\t\teq: function ( idx )\n\t\t{\n\t\t\tvar ctx = this.context;\n\t\n\t\t\treturn ctx.length > idx ?\n\t\t\t\tnew _Api( ctx[idx], this[idx] ) :\n\t\t\t\tnull;\n\t\t},\n\t\n\t\n\t\tfilter: function ( fn )\n\t\t{\n\t\t\tvar a = [];\n\t\n\t\t\tif ( __arrayProto.filter ) {\n\t\t\t\ta = __arrayProto.filter.call( this, fn, this );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Compatibility for browsers without EMCA-252-5 (JS 1.6)\n\t\t\t\tfor ( var i=0, ien=this.length ; i<ien ; i++ ) {\n\t\t\t\t\tif ( fn.call( this, this[i], i, this ) ) {\n\t\t\t\t\t\ta.push( this[i] );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\treturn new _Api( this.context, a );\n\t\t},\n\t\n\t\n\t\tflatten: function ()\n\t\t{\n\t\t\tvar a = [];\n\t\t\treturn new _Api( this.context, a.concat.apply( a, this.toArray() ) );\n\t\t},\n\t\n\t\n\t\tjoin:    __arrayProto.join,\n\t\n\t\n\t\tindexOf: __arrayProto.indexOf || function (obj, start)\n\t\t{\n\t\t\tfor ( var i=(start || 0), ien=this.length ; i<ien ; i++ ) {\n\t\t\t\tif ( this[i] === obj ) {\n\t\t\t\t\treturn i;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn -1;\n\t\t},\n\t\n\t\titerator: function ( flatten, type, fn, alwaysNew ) {\n\t\t\tvar\n\t\t\t\ta = [], ret,\n\t\t\t\ti, ien, j, jen,\n\t\t\t\tcontext = this.context,\n\t\t\t\trows, items, item,\n\t\t\t\tselector = this.selector;\n\t\n\t\t\t// Argument shifting\n\t\t\tif ( typeof flatten === 'string' ) {\n\t\t\t\talwaysNew = fn;\n\t\t\t\tfn = type;\n\t\t\t\ttype = flatten;\n\t\t\t\tflatten = false;\n\t\t\t}\n\t\n\t\t\tfor ( i=0, ien=context.length ; i<ien ; i++ ) {\n\t\t\t\tvar apiInst = new _Api( context[i] );\n\t\n\t\t\t\tif ( type === 'table' ) {\n\t\t\t\t\tret = fn.call( apiInst, context[i], i );\n\t\n\t\t\t\t\tif ( ret !== undefined ) {\n\t\t\t\t\t\ta.push( ret );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if ( type === 'columns' || type === 'rows' ) {\n\t\t\t\t\t// this has same length as context - one entry for each table\n\t\t\t\t\tret = fn.call( apiInst, context[i], this[i], i );\n\t\n\t\t\t\t\tif ( ret !== undefined ) {\n\t\t\t\t\t\ta.push( ret );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if ( type === 'column' || type === 'column-rows' || type === 'row' || type === 'cell' ) {\n\t\t\t\t\t// columns and rows share the same structure.\n\t\t\t\t\t// 'this' is an array of column indexes for each context\n\t\t\t\t\titems = this[i];\n\t\n\t\t\t\t\tif ( type === 'column-rows' ) {\n\t\t\t\t\t\trows = _selector_row_indexes( context[i], selector.opts );\n\t\t\t\t\t}\n\t\n\t\t\t\t\tfor ( j=0, jen=items.length ; j<jen ; j++ ) {\n\t\t\t\t\t\titem = items[j];\n\t\n\t\t\t\t\t\tif ( type === 'cell' ) {\n\t\t\t\t\t\t\tret = fn.call( apiInst, context[i], item.row, item.column, i, j );\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tret = fn.call( apiInst, context[i], item, i, j, rows );\n\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\tif ( ret !== undefined ) {\n\t\t\t\t\t\t\ta.push( ret );\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\tif ( a.length || alwaysNew ) {\n\t\t\t\tvar api = new _Api( context, flatten ? a.concat.apply( [], a ) : a );\n\t\t\t\tvar apiSelector = api.selector;\n\t\t\t\tapiSelector.rows = selector.rows;\n\t\t\t\tapiSelector.cols = selector.cols;\n\t\t\t\tapiSelector.opts = selector.opts;\n\t\t\t\treturn api;\n\t\t\t}\n\t\t\treturn this;\n\t\t},\n\t\n\t\n\t\tlastIndexOf: __arrayProto.lastIndexOf || function (obj, start)\n\t\t{\n\t\t\t// Bit cheeky...\n\t\t\treturn this.indexOf.apply( this.toArray.reverse(), arguments );\n\t\t},\n\t\n\t\n\t\tlength:  0,\n\t\n\t\n\t\tmap: function ( fn )\n\t\t{\n\t\t\tvar a = [];\n\t\n\t\t\tif ( __arrayProto.map ) {\n\t\t\t\ta = __arrayProto.map.call( this, fn, this );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Compatibility for browsers without EMCA-252-5 (JS 1.6)\n\t\t\t\tfor ( var i=0, ien=this.length ; i<ien ; i++ ) {\n\t\t\t\t\ta.push( fn.call( this, this[i], i ) );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\treturn new _Api( this.context, a );\n\t\t},\n\t\n\t\n\t\tpluck: function ( prop )\n\t\t{\n\t\t\treturn this.map( function ( el ) {\n\t\t\t\treturn el[ prop ];\n\t\t\t} );\n\t\t},\n\t\n\t\tpop:     __arrayProto.pop,\n\t\n\t\n\t\tpush:    __arrayProto.push,\n\t\n\t\n\t\t// Does not return an API instance\n\t\treduce: __arrayProto.reduce || function ( fn, init )\n\t\t{\n\t\t\treturn _fnReduce( this, fn, init, 0, this.length, 1 );\n\t\t},\n\t\n\t\n\t\treduceRight: __arrayProto.reduceRight || function ( fn, init )\n\t\t{\n\t\t\treturn _fnReduce( this, fn, init, this.length-1, -1, -1 );\n\t\t},\n\t\n\t\n\t\treverse: __arrayProto.reverse,\n\t\n\t\n\t\t// Object with rows, columns and opts\n\t\tselector: null,\n\t\n\t\n\t\tshift:   __arrayProto.shift,\n\t\n\t\n\t\tsort:    __arrayProto.sort, // ? name - order?\n\t\n\t\n\t\tsplice:  __arrayProto.splice,\n\t\n\t\n\t\ttoArray: function ()\n\t\t{\n\t\t\treturn __arrayProto.slice.call( this );\n\t\t},\n\t\n\t\n\t\tto$: function ()\n\t\t{\n\t\t\treturn $( this );\n\t\t},\n\t\n\t\n\t\ttoJQuery: function ()\n\t\t{\n\t\t\treturn $( this );\n\t\t},\n\t\n\t\n\t\tunique: function ()\n\t\t{\n\t\t\treturn new _Api( this.context, _unique(this) );\n\t\t},\n\t\n\t\n\t\tunshift: __arrayProto.unshift\n\t} );\n\t\n\t\n\t_Api.extend = function ( scope, obj, ext )\n\t{\n\t\t// Only extend API instances and static properties of the API\n\t\tif ( ! ext.length || ! obj || ( ! (obj instanceof _Api) && ! obj.__dt_wrapper ) ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar\n\t\t\ti, ien,\n\t\t\tj, jen,\n\t\t\tstruct, inner,\n\t\t\tmethodScoping = function ( scope, fn, struc ) {\n\t\t\t\treturn function () {\n\t\t\t\t\tvar ret = fn.apply( scope, arguments );\n\t\n\t\t\t\t\t// Method extension\n\t\t\t\t\t_Api.extend( ret, ret, struc.methodExt );\n\t\t\t\t\treturn ret;\n\t\t\t\t};\n\t\t\t};\n\t\n\t\tfor ( i=0, ien=ext.length ; i<ien ; i++ ) {\n\t\t\tstruct = ext[i];\n\t\n\t\t\t// Value\n\t\t\tobj[ struct.name ] = typeof struct.val === 'function' ?\n\t\t\t\tmethodScoping( scope, struct.val, struct ) :\n\t\t\t\t$.isPlainObject( struct.val ) ?\n\t\t\t\t\t{} :\n\t\t\t\t\tstruct.val;\n\t\n\t\t\tobj[ struct.name ].__dt_wrapper = true;\n\t\n\t\t\t// Property extension\n\t\t\t_Api.extend( scope, obj[ struct.name ], struct.propExt );\n\t\t}\n\t};\n\t\n\t\n\t// @todo - Is there need for an augment function?\n\t// _Api.augment = function ( inst, name )\n\t// {\n\t// \t// Find src object in the structure from the name\n\t// \tvar parts = name.split('.');\n\t\n\t// \t_Api.extend( inst, obj );\n\t// };\n\t\n\t\n\t//     [\n\t//       {\n\t//         name:      'data'                -- string   - Property name\n\t//         val:       function () {},       -- function - Api method (or undefined if just an object\n\t//         methodExt: [ ... ],              -- array    - Array of Api object definitions to extend the method result\n\t//         propExt:   [ ... ]               -- array    - Array of Api object definitions to extend the property\n\t//       },\n\t//       {\n\t//         name:     'row'\n\t//         val:       {},\n\t//         methodExt: [ ... ],\n\t//         propExt:   [\n\t//           {\n\t//             name:      'data'\n\t//             val:       function () {},\n\t//             methodExt: [ ... ],\n\t//             propExt:   [ ... ]\n\t//           },\n\t//           ...\n\t//         ]\n\t//       }\n\t//     ]\n\t\n\t_Api.register = _api_register = function ( name, val )\n\t{\n\t\tif ( $.isArray( name ) ) {\n\t\t\tfor ( var j=0, jen=name.length ; j<jen ; j++ ) {\n\t\t\t\t_Api.register( name[j], val );\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\t\n\t\tvar\n\t\t\ti, ien,\n\t\t\their = name.split('.'),\n\t\t\tstruct = __apiStruct,\n\t\t\tkey, method;\n\t\n\t\tvar find = function ( src, name ) {\n\t\t\tfor ( var i=0, ien=src.length ; i<ien ; i++ ) {\n\t\t\t\tif ( src[i].name === name ) {\n\t\t\t\t\treturn src[i];\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn null;\n\t\t};\n\t\n\t\tfor ( i=0, ien=heir.length ; i<ien ; i++ ) {\n\t\t\tmethod = heir[i].indexOf('()') !== -1;\n\t\t\tkey = method ?\n\t\t\t\their[i].replace('()', '') :\n\t\t\t\their[i];\n\t\n\t\t\tvar src = find( struct, key );\n\t\t\tif ( ! src ) {\n\t\t\t\tsrc = {\n\t\t\t\t\tname:      key,\n\t\t\t\t\tval:       {},\n\t\t\t\t\tmethodExt: [],\n\t\t\t\t\tpropExt:   []\n\t\t\t\t};\n\t\t\t\tstruct.push( src );\n\t\t\t}\n\t\n\t\t\tif ( i === ien-1 ) {\n\t\t\t\tsrc.val = val;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tstruct = method ?\n\t\t\t\t\tsrc.methodExt :\n\t\t\t\t\tsrc.propExt;\n\t\t\t}\n\t\t}\n\t};\n\t\n\t\n\t_Api.registerPlural = _api_registerPlural = function ( pluralName, singularName, val ) {\n\t\t_Api.register( pluralName, val );\n\t\n\t\t_Api.register( singularName, function () {\n\t\t\tvar ret = val.apply( this, arguments );\n\t\n\t\t\tif ( ret === this ) {\n\t\t\t\t// Returned item is the API instance that was passed in, return it\n\t\t\t\treturn this;\n\t\t\t}\n\t\t\telse if ( ret instanceof _Api ) {\n\t\t\t\t// New API instance returned, want the value from the first item\n\t\t\t\t// in the returned array for the singular result.\n\t\t\t\treturn ret.length ?\n\t\t\t\t\t$.isArray( ret[0] ) ?\n\t\t\t\t\t\tnew _Api( ret.context, ret[0] ) : // Array results are 'enhanced'\n\t\t\t\t\t\tret[0] :\n\t\t\t\t\tundefined;\n\t\t\t}\n\t\n\t\t\t// Non-API return - just fire it back\n\t\t\treturn ret;\n\t\t} );\n\t};\n\t\n\t\n\t/**\n\t * Selector for HTML tables. Apply the given selector to the give array of\n\t * DataTables settings objects.\n\t *\n\t * @param {string|integer} [selector] jQuery selector string or integer\n\t * @param  {array} Array of DataTables settings objects to be filtered\n\t * @return {array}\n\t * @ignore\n\t */\n\tvar __table_selector = function ( selector, a )\n\t{\n\t\t// Integer is used to pick out a table by index\n\t\tif ( typeof selector === 'number' ) {\n\t\t\treturn [ a[ selector ] ];\n\t\t}\n\t\n\t\t// Perform a jQuery selector on the table nodes\n\t\tvar nodes = $.map( a, function (el, i) {\n\t\t\treturn el.nTable;\n\t\t} );\n\t\n\t\treturn $(nodes)\n\t\t\t.filter( selector )\n\t\t\t.map( function (i) {\n\t\t\t\t// Need to translate back from the table node to the settings\n\t\t\t\tvar idx = $.inArray( this, nodes );\n\t\t\t\treturn a[ idx ];\n\t\t\t} )\n\t\t\t.toArray();\n\t};\n\t\n\t\n\t\n\t/**\n\t * Context selector for the API's context (i.e. the tables the API instance\n\t * refers to.\n\t *\n\t * @name    DataTable.Api#tables\n\t * @param {string|integer} [selector] Selector to pick which tables the iterator\n\t *   should operate on. If not given, all tables in the current context are\n\t *   used. This can be given as a jQuery selector (for example `':gt(0)'`) to\n\t *   select multiple tables or as an integer to select a single table.\n\t * @returns {DataTable.Api} Returns a new API instance if a selector is given.\n\t */\n\t_api_register( 'tables()', function ( selector ) {\n\t\t// A new instance is created if there was a selector specified\n\t\treturn selector ?\n\t\t\tnew _Api( __table_selector( selector, this.context ) ) :\n\t\t\tthis;\n\t} );\n\t\n\t\n\t_api_register( 'table()', function ( selector ) {\n\t\tvar tables = this.tables( selector );\n\t\tvar ctx = tables.context;\n\t\n\t\t// Truncate to the first matched table\n\t\treturn ctx.length ?\n\t\t\tnew _Api( ctx[0] ) :\n\t\t\ttables;\n\t} );\n\t\n\t\n\t_api_registerPlural( 'tables().nodes()', 'table().node()' , function () {\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\treturn ctx.nTable;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'tables().body()', 'table().body()' , function () {\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\treturn ctx.nTBody;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'tables().header()', 'table().header()' , function () {\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\treturn ctx.nTHead;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'tables().footer()', 'table().footer()' , function () {\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\treturn ctx.nTFoot;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'tables().containers()', 'table().container()' , function () {\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\treturn ctx.nTableWrapper;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t\n\t/**\n\t * Redraw the tables in the current context.\n\t */\n\t_api_register( 'draw()', function ( paging ) {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tif ( paging === 'page' ) {\n\t\t\t\t_fnDraw( settings );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif ( typeof paging === 'string' ) {\n\t\t\t\t\tpaging = paging === 'full-hold' ?\n\t\t\t\t\t\tfalse :\n\t\t\t\t\t\ttrue;\n\t\t\t\t}\n\t\n\t\t\t\t_fnReDraw( settings, paging===false );\n\t\t\t}\n\t\t} );\n\t} );\n\t\n\t\n\t\n\t/**\n\t * Get the current page index.\n\t *\n\t * @return {integer} Current page index (zero based)\n\t *//**\n\t * Set the current page.\n\t *\n\t * Note that if you attempt to show a page which does not exist, DataTables will\n\t * not throw an error, but rather reset the paging.\n\t *\n\t * @param {integer|string} action The paging action to take. This can be one of:\n\t *  * `integer` - The page index to jump to\n\t *  * `string` - An action to take:\n\t *    * `first` - Jump to first page.\n\t *    * `next` - Jump to the next page\n\t *    * `previous` - Jump to previous page\n\t *    * `last` - Jump to the last page.\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'page()', function ( action ) {\n\t\tif ( action === undefined ) {\n\t\t\treturn this.page.info().page; // not an expensive call\n\t\t}\n\t\n\t\t// else, have an action to take on all tables\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnPageChange( settings, action );\n\t\t} );\n\t} );\n\t\n\t\n\t/**\n\t * Paging information for the first table in the current context.\n\t *\n\t * If you require paging information for another table, use the `table()` method\n\t * with a suitable selector.\n\t *\n\t * @return {object} Object with the following properties set:\n\t *  * `page` - Current page index (zero based - i.e. the first page is `0`)\n\t *  * `pages` - Total number of pages\n\t *  * `start` - Display index for the first record shown on the current page\n\t *  * `end` - Display index for the last record shown on the current page\n\t *  * `length` - Display length (number of records). Note that generally `start\n\t *    + length = end`, but this is not always true, for example if there are\n\t *    only 2 records to show on the final page, with a length of 10.\n\t *  * `recordsTotal` - Full data set length\n\t *  * `recordsDisplay` - Data set length once the current filtering criterion\n\t *    are applied.\n\t */\n\t_api_register( 'page.info()', function ( action ) {\n\t\tif ( this.context.length === 0 ) {\n\t\t\treturn undefined;\n\t\t}\n\t\n\t\tvar\n\t\t\tsettings   = this.context[0],\n\t\t\tstart      = settings._iDisplayStart,\n\t\t\tlen        = settings.oFeatures.bPaginate ? settings._iDisplayLength : -1,\n\t\t\tvisRecords = settings.fnRecordsDisplay(),\n\t\t\tall        = len === -1;\n\t\n\t\treturn {\n\t\t\t\"page\":           all ? 0 : Math.floor( start / len ),\n\t\t\t\"pages\":          all ? 1 : Math.ceil( visRecords / len ),\n\t\t\t\"start\":          start,\n\t\t\t\"end\":            settings.fnDisplayEnd(),\n\t\t\t\"length\":         len,\n\t\t\t\"recordsTotal\":   settings.fnRecordsTotal(),\n\t\t\t\"recordsDisplay\": visRecords,\n\t\t\t\"serverSide\":     _fnDataSource( settings ) === 'ssp'\n\t\t};\n\t} );\n\t\n\t\n\t/**\n\t * Get the current page length.\n\t *\n\t * @return {integer} Current page length. Note `-1` indicates that all records\n\t *   are to be shown.\n\t *//**\n\t * Set the current page length.\n\t *\n\t * @param {integer} Page length to set. Use `-1` to show all records.\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'page.len()', function ( len ) {\n\t\t// Note that we can't call this function 'length()' because `length`\n\t\t// is a Javascript property of functions which defines how many arguments\n\t\t// the function expects.\n\t\tif ( len === undefined ) {\n\t\t\treturn this.context.length !== 0 ?\n\t\t\t\tthis.context[0]._iDisplayLength :\n\t\t\t\tundefined;\n\t\t}\n\t\n\t\t// else, set the page length\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnLengthChange( settings, len );\n\t\t} );\n\t} );\n\t\n\t\n\t\n\tvar __reload = function ( settings, holdPosition, callback ) {\n\t\t// Use the draw event to trigger a callback\n\t\tif ( callback ) {\n\t\t\tvar api = new _Api( settings );\n\t\n\t\t\tapi.one( 'draw', function () {\n\t\t\t\tcallback( api.ajax.json() );\n\t\t\t} );\n\t\t}\n\t\n\t\tif ( _fnDataSource( settings ) == 'ssp' ) {\n\t\t\t_fnReDraw( settings, holdPosition );\n\t\t}\n\t\telse {\n\t\t\t_fnProcessingDisplay( settings, true );\n\t\n\t\t\t// Cancel an existing request\n\t\t\tvar xhr = settings.jqXHR;\n\t\t\tif ( xhr && xhr.readyState !== 4 ) {\n\t\t\t\txhr.abort();\n\t\t\t}\n\t\n\t\t\t// Trigger xhr\n\t\t\t_fnBuildAjax( settings, [], function( json ) {\n\t\t\t\t_fnClearTable( settings );\n\t\n\t\t\t\tvar data = _fnAjaxDataSrc( settings, json );\n\t\t\t\tfor ( var i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t\t\t_fnAddData( settings, data[i] );\n\t\t\t\t}\n\t\n\t\t\t\t_fnReDraw( settings, holdPosition );\n\t\t\t\t_fnProcessingDisplay( settings, false );\n\t\t\t} );\n\t\t}\n\t};\n\t\n\t\n\t/**\n\t * Get the JSON response from the last Ajax request that DataTables made to the\n\t * server. Note that this returns the JSON from the first table in the current\n\t * context.\n\t *\n\t * @return {object} JSON received from the server.\n\t */\n\t_api_register( 'ajax.json()', function () {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( ctx.length > 0 ) {\n\t\t\treturn ctx[0].json;\n\t\t}\n\t\n\t\t// else return undefined;\n\t} );\n\t\n\t\n\t/**\n\t * Get the data submitted in the last Ajax request\n\t */\n\t_api_register( 'ajax.params()', function () {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( ctx.length > 0 ) {\n\t\t\treturn ctx[0].oAjaxData;\n\t\t}\n\t\n\t\t// else return undefined;\n\t} );\n\t\n\t\n\t/**\n\t * Reload tables from the Ajax data source. Note that this function will\n\t * automatically re-draw the table when the remote data has been loaded.\n\t *\n\t * @param {boolean} [reset=true] Reset (default) or hold the current paging\n\t *   position. A full re-sort and re-filter is performed when this method is\n\t *   called, which is why the pagination reset is the default action.\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'ajax.reload()', function ( callback, resetPaging ) {\n\t\treturn this.iterator( 'table', function (settings) {\n\t\t\t__reload( settings, resetPaging===false, callback );\n\t\t} );\n\t} );\n\t\n\t\n\t/**\n\t * Get the current Ajax URL. Note that this returns the URL from the first\n\t * table in the current context.\n\t *\n\t * @return {string} Current Ajax source URL\n\t *//**\n\t * Set the Ajax URL. Note that this will set the URL for all tables in the\n\t * current context.\n\t *\n\t * @param {string} url URL to set.\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'ajax.url()', function ( url ) {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( url === undefined ) {\n\t\t\t// get\n\t\t\tif ( ctx.length === 0 ) {\n\t\t\t\treturn undefined;\n\t\t\t}\n\t\t\tctx = ctx[0];\n\t\n\t\t\treturn ctx.ajax ?\n\t\t\t\t$.isPlainObject( ctx.ajax ) ?\n\t\t\t\t\tctx.ajax.url :\n\t\t\t\t\tctx.ajax :\n\t\t\t\tctx.sAjaxSource;\n\t\t}\n\t\n\t\t// set\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tif ( $.isPlainObject( settings.ajax ) ) {\n\t\t\t\tsettings.ajax.url = url;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tsettings.ajax = url;\n\t\t\t}\n\t\t\t// No need to consider sAjaxSource here since DataTables gives priority\n\t\t\t// to `ajax` over `sAjaxSource`. So setting `ajax` here, renders any\n\t\t\t// value of `sAjaxSource` redundant.\n\t\t} );\n\t} );\n\t\n\t\n\t/**\n\t * Load data from the newly set Ajax URL. Note that this method is only\n\t * available when `ajax.url()` is used to set a URL. Additionally, this method\n\t * has the same effect as calling `ajax.reload()` but is provided for\n\t * convenience when setting a new URL. Like `ajax.reload()` it will\n\t * automatically redraw the table once the remote data has been loaded.\n\t *\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'ajax.url().load()', function ( callback, resetPaging ) {\n\t\t// Same as a reload, but makes sense to present it for easy access after a\n\t\t// url change\n\t\treturn this.iterator( 'table', function ( ctx ) {\n\t\t\t__reload( ctx, resetPaging===false, callback );\n\t\t} );\n\t} );\n\t\n\t\n\t\n\t\n\tvar _selector_run = function ( type, selector, selectFn, settings, opts )\n\t{\n\t\tvar\n\t\t\tout = [], res,\n\t\t\ta, i, ien, j, jen,\n\t\t\tselectorType = typeof selector;\n\t\n\t\t// Can't just check for isArray here, as an API or jQuery instance might be\n\t\t// given with their array like look\n\t\tif ( ! selector || selectorType === 'string' || selectorType === 'function' || selector.length === undefined ) {\n\t\t\tselector = [ selector ];\n\t\t}\n\t\n\t\tfor ( i=0, ien=selector.length ; i<ien ; i++ ) {\n\t\t\ta = selector[i] && selector[i].split ?\n\t\t\t\tselector[i].split(',') :\n\t\t\t\t[ selector[i] ];\n\t\n\t\t\tfor ( j=0, jen=a.length ; j<jen ; j++ ) {\n\t\t\t\tres = selectFn( typeof a[j] === 'string' ? $.trim(a[j]) : a[j] );\n\t\n\t\t\t\tif ( res && res.length ) {\n\t\t\t\t\tout = out.concat( res );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\t// selector extensions\n\t\tvar ext = _ext.selector[ type ];\n\t\tif ( ext.length ) {\n\t\t\tfor ( i=0, ien=ext.length ; i<ien ; i++ ) {\n\t\t\t\tout = ext[i]( settings, opts, out );\n\t\t\t}\n\t\t}\n\t\n\t\treturn _unique( out );\n\t};\n\t\n\t\n\tvar _selector_opts = function ( opts )\n\t{\n\t\tif ( ! opts ) {\n\t\t\topts = {};\n\t\t}\n\t\n\t\t// Backwards compatibility for 1.9- which used the terminology filter rather\n\t\t// than search\n\t\tif ( opts.filter && opts.search === undefined ) {\n\t\t\topts.search = opts.filter;\n\t\t}\n\t\n\t\treturn $.extend( {\n\t\t\tsearch: 'none',\n\t\t\torder: 'current',\n\t\t\tpage: 'all'\n\t\t}, opts );\n\t};\n\t\n\t\n\tvar _selector_first = function ( inst )\n\t{\n\t\t// Reduce the API instance to the first item found\n\t\tfor ( var i=0, ien=inst.length ; i<ien ; i++ ) {\n\t\t\tif ( inst[i].length > 0 ) {\n\t\t\t\t// Assign the first element to the first item in the instance\n\t\t\t\t// and truncate the instance and context\n\t\t\t\tinst[0] = inst[i];\n\t\t\t\tinst[0].length = 1;\n\t\t\t\tinst.length = 1;\n\t\t\t\tinst.context = [ inst.context[i] ];\n\t\n\t\t\t\treturn inst;\n\t\t\t}\n\t\t}\n\t\n\t\t// Not found - return an empty instance\n\t\tinst.length = 0;\n\t\treturn inst;\n\t};\n\t\n\t\n\tvar _selector_row_indexes = function ( settings, opts )\n\t{\n\t\tvar\n\t\t\ti, ien, tmp, a=[],\n\t\t\tdisplayFiltered = settings.aiDisplay,\n\t\t\tdisplayMaster = settings.aiDisplayMaster;\n\t\n\t\tvar\n\t\t\tsearch = opts.search,  // none, applied, removed\n\t\t\torder  = opts.order,   // applied, current, index (original - compatibility with 1.9)\n\t\t\tpage   = opts.page;    // all, current\n\t\n\t\tif ( _fnDataSource( settings ) == 'ssp' ) {\n\t\t\t// In server-side processing mode, most options are irrelevant since\n\t\t\t// rows not shown don't exist and the index order is the applied order\n\t\t\t// Removed is a special case - for consistency just return an empty\n\t\t\t// array\n\t\t\treturn search === 'removed' ?\n\t\t\t\t[] :\n\t\t\t\t_range( 0, displayMaster.length );\n\t\t}\n\t\telse if ( page == 'current' ) {\n\t\t\t// Current page implies that order=current and fitler=applied, since it is\n\t\t\t// fairly senseless otherwise, regardless of what order and search actually\n\t\t\t// are\n\t\t\tfor ( i=settings._iDisplayStart, ien=settings.fnDisplayEnd() ; i<ien ; i++ ) {\n\t\t\t\ta.push( displayFiltered[i] );\n\t\t\t}\n\t\t}\n\t\telse if ( order == 'current' || order == 'applied' ) {\n\t\t\ta = search == 'none' ?\n\t\t\t\tdisplayMaster.slice() :                      // no search\n\t\t\t\tsearch == 'applied' ?\n\t\t\t\t\tdisplayFiltered.slice() :                // applied search\n\t\t\t\t\t$.map( displayMaster, function (el, i) { // removed search\n\t\t\t\t\t\treturn $.inArray( el, displayFiltered ) === -1 ? el : null;\n\t\t\t\t\t} );\n\t\t}\n\t\telse if ( order == 'index' || order == 'original' ) {\n\t\t\tfor ( i=0, ien=settings.aoData.length ; i<ien ; i++ ) {\n\t\t\t\tif ( search == 'none' ) {\n\t\t\t\t\ta.push( i );\n\t\t\t\t}\n\t\t\t\telse { // applied | removed\n\t\t\t\t\ttmp = $.inArray( i, displayFiltered );\n\t\n\t\t\t\t\tif ((tmp === -1 && search == 'removed') ||\n\t\t\t\t\t\t(tmp >= 0   && search == 'applied') )\n\t\t\t\t\t{\n\t\t\t\t\t\ta.push( i );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\n\t\treturn a;\n\t};\n\t\n\t\n\t/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\t * Rows\n\t *\n\t * {}          - no selector - use all available rows\n\t * {integer}   - row aoData index\n\t * {node}      - TR node\n\t * {string}    - jQuery selector to apply to the TR elements\n\t * {array}     - jQuery array of nodes, or simply an array of TR nodes\n\t *\n\t */\n\t\n\t\n\tvar __row_selector = function ( settings, selector, opts )\n\t{\n\t\tvar run = function ( sel ) {\n\t\t\tvar selInt = _intVal( sel );\n\t\t\tvar i, ien;\n\t\n\t\t\t// Short cut - selector is a number and no options provided (default is\n\t\t\t// all records, so no need to check if the index is in there, since it\n\t\t\t// must be - dev error if the index doesn't exist).\n\t\t\tif ( selInt !== null && ! opts ) {\n\t\t\t\treturn [ selInt ];\n\t\t\t}\n\t\n\t\t\tvar rows = _selector_row_indexes( settings, opts );\n\t\n\t\t\tif ( selInt !== null && $.inArray( selInt, rows ) !== -1 ) {\n\t\t\t\t// Selector - integer\n\t\t\t\treturn [ selInt ];\n\t\t\t}\n\t\t\telse if ( ! sel ) {\n\t\t\t\t// Selector - none\n\t\t\t\treturn rows;\n\t\t\t}\n\t\n\t\t\t// Selector - function\n\t\t\tif ( typeof sel === 'function' ) {\n\t\t\t\treturn $.map( rows, function (idx) {\n\t\t\t\t\tvar row = settings.aoData[ idx ];\n\t\t\t\t\treturn sel( idx, row._aData, row.nTr ) ? idx : null;\n\t\t\t\t} );\n\t\t\t}\n\t\n\t\t\t// Get nodes in the order from the `rows` array with null values removed\n\t\t\tvar nodes = _removeEmpty(\n\t\t\t\t_pluck_order( settings.aoData, rows, 'nTr' )\n\t\t\t);\n\t\n\t\t\t// Selector - node\n\t\t\tif ( sel.nodeName ) {\n\t\t\t\tif ( sel._DT_RowIndex !== undefined ) {\n\t\t\t\t\treturn [ sel._DT_RowIndex ]; // Property added by DT for fast lookup\n\t\t\t\t}\n\t\t\t\telse if ( sel._DT_CellIndex ) {\n\t\t\t\t\treturn [ sel._DT_CellIndex.row ];\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tvar host = $(sel).closest('*[data-dt-row]');\n\t\t\t\t\treturn host.length ?\n\t\t\t\t\t\t[ host.data('dt-row') ] :\n\t\t\t\t\t\t[];\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// ID selector. Want to always be able to select rows by id, regardless\n\t\t\t// of if the tr element has been created or not, so can't rely upon\n\t\t\t// jQuery here - hence a custom implementation. This does not match\n\t\t\t// Sizzle's fast selector or HTML4 - in HTML5 the ID can be anything,\n\t\t\t// but to select it using a CSS selector engine (like Sizzle or\n\t\t\t// querySelect) it would need to need to be escaped for some characters.\n\t\t\t// DataTables simplifies this for row selectors since you can select\n\t\t\t// only a row. A # indicates an id any anything that follows is the id -\n\t\t\t// unescaped.\n\t\t\tif ( typeof sel === 'string' && sel.charAt(0) === '#' ) {\n\t\t\t\t// get row index from id\n\t\t\t\tvar rowObj = settings.aIds[ sel.replace( /^#/, '' ) ];\n\t\t\t\tif ( rowObj !== undefined ) {\n\t\t\t\t\treturn [ rowObj.idx ];\n\t\t\t\t}\n\t\n\t\t\t\t// need to fall through to jQuery in case there is DOM id that\n\t\t\t\t// matches\n\t\t\t}\n\t\n\t\t\t// Selector - jQuery selector string, array of nodes or jQuery object/\n\t\t\t// As jQuery's .filter() allows jQuery objects to be passed in filter,\n\t\t\t// it also allows arrays, so this will cope with all three options\n\t\t\treturn $(nodes)\n\t\t\t\t.filter( sel )\n\t\t\t\t.map( function () {\n\t\t\t\t\treturn this._DT_RowIndex;\n\t\t\t\t} )\n\t\t\t\t.toArray();\n\t\t};\n\t\n\t\treturn _selector_run( 'row', selector, run, settings, opts );\n\t};\n\t\n\t\n\t_api_register( 'rows()', function ( selector, opts ) {\n\t\t// argument shifting\n\t\tif ( selector === undefined ) {\n\t\t\tselector = '';\n\t\t}\n\t\telse if ( $.isPlainObject( selector ) ) {\n\t\t\topts = selector;\n\t\t\tselector = '';\n\t\t}\n\t\n\t\topts = _selector_opts( opts );\n\t\n\t\tvar inst = this.iterator( 'table', function ( settings ) {\n\t\t\treturn __row_selector( settings, selector, opts );\n\t\t}, 1 );\n\t\n\t\t// Want argument shifting here and in __row_selector?\n\t\tinst.selector.rows = selector;\n\t\tinst.selector.opts = opts;\n\t\n\t\treturn inst;\n\t} );\n\t\n\t_api_register( 'rows().nodes()', function () {\n\t\treturn this.iterator( 'row', function ( settings, row ) {\n\t\t\treturn settings.aoData[ row ].nTr || undefined;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_register( 'rows().data()', function () {\n\t\treturn this.iterator( true, 'rows', function ( settings, rows ) {\n\t\t\treturn _pluck_order( settings.aoData, rows, '_aData' );\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'rows().cache()', 'row().cache()', function ( type ) {\n\t\treturn this.iterator( 'row', function ( settings, row ) {\n\t\t\tvar r = settings.aoData[ row ];\n\t\t\treturn type === 'search' ? r._aFilterData : r._aSortData;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'rows().invalidate()', 'row().invalidate()', function ( src ) {\n\t\treturn this.iterator( 'row', function ( settings, row ) {\n\t\t\t_fnInvalidate( settings, row, src );\n\t\t} );\n\t} );\n\t\n\t_api_registerPlural( 'rows().indexes()', 'row().index()', function () {\n\t\treturn this.iterator( 'row', function ( settings, row ) {\n\t\t\treturn row;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'rows().ids()', 'row().id()', function ( hash ) {\n\t\tvar a = [];\n\t\tvar context = this.context;\n\t\n\t\t// `iterator` will drop undefined values, but in this case we want them\n\t\tfor ( var i=0, ien=context.length ; i<ien ; i++ ) {\n\t\t\tfor ( var j=0, jen=this[i].length ; j<jen ; j++ ) {\n\t\t\t\tvar id = context[i].rowIdFn( context[i].aoData[ this[i][j] ]._aData );\n\t\t\t\ta.push( (hash === true ? '#' : '' )+ id );\n\t\t\t}\n\t\t}\n\t\n\t\treturn new _Api( context, a );\n\t} );\n\t\n\t_api_registerPlural( 'rows().remove()', 'row().remove()', function () {\n\t\tvar that = this;\n\t\n\t\tthis.iterator( 'row', function ( settings, row, thatIdx ) {\n\t\t\tvar data = settings.aoData;\n\t\t\tvar rowData = data[ row ];\n\t\t\tvar i, ien, j, jen;\n\t\t\tvar loopRow, loopCells;\n\t\n\t\t\tdata.splice( row, 1 );\n\t\n\t\t\t// Update the cached indexes\n\t\t\tfor ( i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t\tloopRow = data[i];\n\t\t\t\tloopCells = loopRow.anCells;\n\t\n\t\t\t\t// Rows\n\t\t\t\tif ( loopRow.nTr !== null ) {\n\t\t\t\t\tloopRow.nTr._DT_RowIndex = i;\n\t\t\t\t}\n\t\n\t\t\t\t// Cells\n\t\t\t\tif ( loopCells !== null ) {\n\t\t\t\t\tfor ( j=0, jen=loopCells.length ; j<jen ; j++ ) {\n\t\t\t\t\t\tloopCells[j]._DT_CellIndex.row = i;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// Delete from the display arrays\n\t\t\t_fnDeleteIndex( settings.aiDisplayMaster, row );\n\t\t\t_fnDeleteIndex( settings.aiDisplay, row );\n\t\t\t_fnDeleteIndex( that[ thatIdx ], row, false ); // maintain local indexes\n\t\n\t\t\t// Check for an 'overflow' they case for displaying the table\n\t\t\t_fnLengthOverflow( settings );\n\t\n\t\t\t// Remove the row's ID reference if there is one\n\t\t\tvar id = settings.rowIdFn( rowData._aData );\n\t\t\tif ( id !== undefined ) {\n\t\t\t\tdelete settings.aIds[ id ];\n\t\t\t}\n\t\t} );\n\t\n\t\tthis.iterator( 'table', function ( settings ) {\n\t\t\tfor ( var i=0, ien=settings.aoData.length ; i<ien ; i++ ) {\n\t\t\t\tsettings.aoData[i].idx = i;\n\t\t\t}\n\t\t} );\n\t\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( 'rows.add()', function ( rows ) {\n\t\tvar newRows = this.iterator( 'table', function ( settings ) {\n\t\t\t\tvar row, i, ien;\n\t\t\t\tvar out = [];\n\t\n\t\t\t\tfor ( i=0, ien=rows.length ; i<ien ; i++ ) {\n\t\t\t\t\trow = rows[i];\n\t\n\t\t\t\t\tif ( row.nodeName && row.nodeName.toUpperCase() === 'TR' ) {\n\t\t\t\t\t\tout.push( _fnAddTr( settings, row )[0] );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tout.push( _fnAddData( settings, row ) );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\treturn out;\n\t\t\t}, 1 );\n\t\n\t\t// Return an Api.rows() extended instance, so rows().nodes() etc can be used\n\t\tvar modRows = this.rows( -1 );\n\t\tmodRows.pop();\n\t\t$.merge( modRows, newRows );\n\t\n\t\treturn modRows;\n\t} );\n\t\n\t\n\t\n\t\n\t\n\t/**\n\t *\n\t */\n\t_api_register( 'row()', function ( selector, opts ) {\n\t\treturn _selector_first( this.rows( selector, opts ) );\n\t} );\n\t\n\t\n\t_api_register( 'row().data()', function ( data ) {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( data === undefined ) {\n\t\t\t// Get\n\t\t\treturn ctx.length && this.length ?\n\t\t\t\tctx[0].aoData[ this[0] ]._aData :\n\t\t\t\tundefined;\n\t\t}\n\t\n\t\t// Set\n\t\tctx[0].aoData[ this[0] ]._aData = data;\n\t\n\t\t// Automatically invalidate\n\t\t_fnInvalidate( ctx[0], this[0], 'data' );\n\t\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( 'row().node()', function () {\n\t\tvar ctx = this.context;\n\t\n\t\treturn ctx.length && this.length ?\n\t\t\tctx[0].aoData[ this[0] ].nTr || null :\n\t\t\tnull;\n\t} );\n\t\n\t\n\t_api_register( 'row.add()', function ( row ) {\n\t\t// Allow a jQuery object to be passed in - only a single row is added from\n\t\t// it though - the first element in the set\n\t\tif ( row instanceof $ && row.length ) {\n\t\t\trow = row[0];\n\t\t}\n\t\n\t\tvar rows = this.iterator( 'table', function ( settings ) {\n\t\t\tif ( row.nodeName && row.nodeName.toUpperCase() === 'TR' ) {\n\t\t\t\treturn _fnAddTr( settings, row )[0];\n\t\t\t}\n\t\t\treturn _fnAddData( settings, row );\n\t\t} );\n\t\n\t\t// Return an Api.rows() extended instance, with the newly added row selected\n\t\treturn this.row( rows[0] );\n\t} );\n\t\n\t\n\t\n\tvar __details_add = function ( ctx, row, data, klass )\n\t{\n\t\t// Convert to array of TR elements\n\t\tvar rows = [];\n\t\tvar addRow = function ( r, k ) {\n\t\t\t// Recursion to allow for arrays of jQuery objects\n\t\t\tif ( $.isArray( r ) || r instanceof $ ) {\n\t\t\t\tfor ( var i=0, ien=r.length ; i<ien ; i++ ) {\n\t\t\t\t\taddRow( r[i], k );\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\t\n\t\t\t// If we get a TR element, then just add it directly - up to the dev\n\t\t\t// to add the correct number of columns etc\n\t\t\tif ( r.nodeName && r.nodeName.toLowerCase() === 'tr' ) {\n\t\t\t\trows.push( r );\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Otherwise create a row with a wrapper\n\t\t\t\tvar created = $('<tr><td/></tr>').addClass( k );\n\t\t\t\t$('td', created)\n\t\t\t\t\t.addClass( k )\n\t\t\t\t\t.html( r )\n\t\t\t\t\t[0].colSpan = _fnVisbleColumns( ctx );\n\t\n\t\t\t\trows.push( created[0] );\n\t\t\t}\n\t\t};\n\t\n\t\taddRow( data, klass );\n\t\n\t\tif ( row._details ) {\n\t\t\trow._details.remove();\n\t\t}\n\t\n\t\trow._details = $(rows);\n\t\n\t\t// If the children were already shown, that state should be retained\n\t\tif ( row._detailsShow ) {\n\t\t\trow._details.insertAfter( row.nTr );\n\t\t}\n\t};\n\t\n\t\n\tvar __details_remove = function ( api, idx )\n\t{\n\t\tvar ctx = api.context;\n\t\n\t\tif ( ctx.length ) {\n\t\t\tvar row = ctx[0].aoData[ idx !== undefined ? idx : api[0] ];\n\t\n\t\t\tif ( row && row._details ) {\n\t\t\t\trow._details.remove();\n\t\n\t\t\t\trow._detailsShow = undefined;\n\t\t\t\trow._details = undefined;\n\t\t\t}\n\t\t}\n\t};\n\t\n\t\n\tvar __details_display = function ( api, show ) {\n\t\tvar ctx = api.context;\n\t\n\t\tif ( ctx.length && api.length ) {\n\t\t\tvar row = ctx[0].aoData[ api[0] ];\n\t\n\t\t\tif ( row._details ) {\n\t\t\t\trow._detailsShow = show;\n\t\n\t\t\t\tif ( show ) {\n\t\t\t\t\trow._details.insertAfter( row.nTr );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\trow._details.detach();\n\t\t\t\t}\n\t\n\t\t\t\t__details_events( ctx[0] );\n\t\t\t}\n\t\t}\n\t};\n\t\n\t\n\tvar __details_events = function ( settings )\n\t{\n\t\tvar api = new _Api( settings );\n\t\tvar namespace = '.dt.DT_details';\n\t\tvar drawEvent = 'draw'+namespace;\n\t\tvar colvisEvent = 'column-visibility'+namespace;\n\t\tvar destroyEvent = 'destroy'+namespace;\n\t\tvar data = settings.aoData;\n\t\n\t\tapi.off( drawEvent +' '+ colvisEvent +' '+ destroyEvent );\n\t\n\t\tif ( _pluck( data, '_details' ).length > 0 ) {\n\t\t\t// On each draw, insert the required elements into the document\n\t\t\tapi.on( drawEvent, function ( e, ctx ) {\n\t\t\t\tif ( settings !== ctx ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\n\t\t\t\tapi.rows( {page:'current'} ).eq(0).each( function (idx) {\n\t\t\t\t\t// Internal data grab\n\t\t\t\t\tvar row = data[ idx ];\n\t\n\t\t\t\t\tif ( row._detailsShow ) {\n\t\t\t\t\t\trow._details.insertAfter( row.nTr );\n\t\t\t\t\t}\n\t\t\t\t} );\n\t\t\t} );\n\t\n\t\t\t// Column visibility change - update the colspan\n\t\t\tapi.on( colvisEvent, function ( e, ctx, idx, vis ) {\n\t\t\t\tif ( settings !== ctx ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\n\t\t\t\t// Update the colspan for the details rows (note, only if it already has\n\t\t\t\t// a colspan)\n\t\t\t\tvar row, visible = _fnVisbleColumns( ctx );\n\t\n\t\t\t\tfor ( var i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t\t\trow = data[i];\n\t\n\t\t\t\t\tif ( row._details ) {\n\t\t\t\t\t\trow._details.children('td[colspan]').attr('colspan', visible );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} );\n\t\n\t\t\t// Table destroyed - nuke any child rows\n\t\t\tapi.on( destroyEvent, function ( e, ctx ) {\n\t\t\t\tif ( settings !== ctx ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\n\t\t\t\tfor ( var i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t\t\tif ( data[i]._details ) {\n\t\t\t\t\t\t__details_remove( api, i );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} );\n\t\t}\n\t};\n\t\n\t// Strings for the method names to help minification\n\tvar _emp = '';\n\tvar _child_obj = _emp+'row().child';\n\tvar _child_mth = _child_obj+'()';\n\t\n\t// data can be:\n\t//  tr\n\t//  string\n\t//  jQuery or array of any of the above\n\t_api_register( _child_mth, function ( data, klass ) {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( data === undefined ) {\n\t\t\t// get\n\t\t\treturn ctx.length && this.length ?\n\t\t\t\tctx[0].aoData[ this[0] ]._details :\n\t\t\t\tundefined;\n\t\t}\n\t\telse if ( data === true ) {\n\t\t\t// show\n\t\t\tthis.child.show();\n\t\t}\n\t\telse if ( data === false ) {\n\t\t\t// remove\n\t\t\t__details_remove( this );\n\t\t}\n\t\telse if ( ctx.length && this.length ) {\n\t\t\t// set\n\t\t\t__details_add( ctx[0], ctx[0].aoData[ this[0] ], data, klass );\n\t\t}\n\t\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( [\n\t\t_child_obj+'.show()',\n\t\t_child_mth+'.show()' // only when `child()` was called with parameters (without\n\t], function ( show ) {   // it returns an object and this method is not executed)\n\t\t__details_display( this, true );\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( [\n\t\t_child_obj+'.hide()',\n\t\t_child_mth+'.hide()' // only when `child()` was called with parameters (without\n\t], function () {         // it returns an object and this method is not executed)\n\t\t__details_display( this, false );\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( [\n\t\t_child_obj+'.remove()',\n\t\t_child_mth+'.remove()' // only when `child()` was called with parameters (without\n\t], function () {           // it returns an object and this method is not executed)\n\t\t__details_remove( this );\n\t\treturn this;\n\t} );\n\t\n\t\n\t_api_register( _child_obj+'.isShown()', function () {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( ctx.length && this.length ) {\n\t\t\t// _detailsShown as false or undefined will fall through to return false\n\t\t\treturn ctx[0].aoData[ this[0] ]._detailsShow || false;\n\t\t}\n\t\treturn false;\n\t} );\n\t\n\t\n\t\n\t/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\t * Columns\n\t *\n\t * {integer}           - column index (>=0 count from left, <0 count from right)\n\t * \"{integer}:visIdx\"  - visible column index (i.e. translate to column index)  (>=0 count from left, <0 count from right)\n\t * \"{integer}:visible\" - alias for {integer}:visIdx  (>=0 count from left, <0 count from right)\n\t * \"{string}:name\"     - column name\n\t * \"{string}\"          - jQuery selector on column header nodes\n\t *\n\t */\n\t\n\t// can be an array of these items, comma separated list, or an array of comma\n\t// separated lists\n\t\n\tvar __re_column_selector = /^(.+):(name|visIdx|visible)$/;\n\t\n\t\n\t// r1 and r2 are redundant - but it means that the parameters match for the\n\t// iterator callback in columns().data()\n\tvar __columnData = function ( settings, column, r1, r2, rows ) {\n\t\tvar a = [];\n\t\tfor ( var row=0, ien=rows.length ; row<ien ; row++ ) {\n\t\t\ta.push( _fnGetCellData( settings, rows[row], column ) );\n\t\t}\n\t\treturn a;\n\t};\n\t\n\t\n\tvar __column_selector = function ( settings, selector, opts )\n\t{\n\t\tvar\n\t\t\tcolumns = settings.aoColumns,\n\t\t\tnames = _pluck( columns, 'sName' ),\n\t\t\tnodes = _pluck( columns, 'nTh' );\n\t\n\t\tvar run = function ( s ) {\n\t\t\tvar selInt = _intVal( s );\n\t\n\t\t\t// Selector - all\n\t\t\tif ( s === '' ) {\n\t\t\t\treturn _range( columns.length );\n\t\t\t}\n\t\n\t\t\t// Selector - index\n\t\t\tif ( selInt !== null ) {\n\t\t\t\treturn [ selInt >= 0 ?\n\t\t\t\t\tselInt : // Count from left\n\t\t\t\t\tcolumns.length + selInt // Count from right (+ because its a negative value)\n\t\t\t\t];\n\t\t\t}\n\t\n\t\t\t// Selector = function\n\t\t\tif ( typeof s === 'function' ) {\n\t\t\t\tvar rows = _selector_row_indexes( settings, opts );\n\t\n\t\t\t\treturn $.map( columns, function (col, idx) {\n\t\t\t\t\treturn s(\n\t\t\t\t\t\t\tidx,\n\t\t\t\t\t\t\t__columnData( settings, idx, 0, 0, rows ),\n\t\t\t\t\t\t\tnodes[ idx ]\n\t\t\t\t\t\t) ? idx : null;\n\t\t\t\t} );\n\t\t\t}\n\t\n\t\t\t// jQuery or string selector\n\t\t\tvar match = typeof s === 'string' ?\n\t\t\t\ts.match( __re_column_selector ) :\n\t\t\t\t'';\n\t\n\t\t\tif ( match ) {\n\t\t\t\tswitch( match[2] ) {\n\t\t\t\t\tcase 'visIdx':\n\t\t\t\t\tcase 'visible':\n\t\t\t\t\t\tvar idx = parseInt( match[1], 10 );\n\t\t\t\t\t\t// Visible index given, convert to column index\n\t\t\t\t\t\tif ( idx < 0 ) {\n\t\t\t\t\t\t\t// Counting from the right\n\t\t\t\t\t\t\tvar visColumns = $.map( columns, function (col,i) {\n\t\t\t\t\t\t\t\treturn col.bVisible ? i : null;\n\t\t\t\t\t\t\t} );\n\t\t\t\t\t\t\treturn [ visColumns[ visColumns.length + idx ] ];\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Counting from the left\n\t\t\t\t\t\treturn [ _fnVisibleToColumnIndex( settings, idx ) ];\n\t\n\t\t\t\t\tcase 'name':\n\t\t\t\t\t\t// match by name. `names` is column index complete and in order\n\t\t\t\t\t\treturn $.map( names, function (name, i) {\n\t\t\t\t\t\t\treturn name === match[1] ? i : null;\n\t\t\t\t\t\t} );\n\t\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn [];\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t// Cell in the table body\n\t\t\tif ( s.nodeName && s._DT_CellIndex ) {\n\t\t\t\treturn [ s._DT_CellIndex.column ];\n\t\t\t}\n\t\n\t\t\t// jQuery selector on the TH elements for the columns\n\t\t\tvar jqResult = $( nodes )\n\t\t\t\t.filter( s )\n\t\t\t\t.map( function () {\n\t\t\t\t\treturn $.inArray( this, nodes ); // `nodes` is column index complete and in order\n\t\t\t\t} )\n\t\t\t\t.toArray();\n\t\n\t\t\tif ( jqResult.length || ! s.nodeName ) {\n\t\t\t\treturn jqResult;\n\t\t\t}\n\t\n\t\t\t// Otherwise a node which might have a `dt-column` data attribute, or be\n\t\t\t// a child or such an element\n\t\t\tvar host = $(s).closest('*[data-dt-column]');\n\t\t\treturn host.length ?\n\t\t\t\t[ host.data('dt-column') ] :\n\t\t\t\t[];\n\t\t};\n\t\n\t\treturn _selector_run( 'column', selector, run, settings, opts );\n\t};\n\t\n\t\n\tvar __setColumnVis = function ( settings, column, vis ) {\n\t\tvar\n\t\t\tcols = settings.aoColumns,\n\t\t\tcol  = cols[ column ],\n\t\t\tdata = settings.aoData,\n\t\t\trow, cells, i, ien, tr;\n\t\n\t\t// Get\n\t\tif ( vis === undefined ) {\n\t\t\treturn col.bVisible;\n\t\t}\n\t\n\t\t// Set\n\t\t// No change\n\t\tif ( col.bVisible === vis ) {\n\t\t\treturn;\n\t\t}\n\t\n\t\tif ( vis ) {\n\t\t\t// Insert column\n\t\t\t// Need to decide if we should use appendChild or insertBefore\n\t\t\tvar insertBefore = $.inArray( true, _pluck(cols, 'bVisible'), column+1 );\n\t\n\t\t\tfor ( i=0, ien=data.length ; i<ien ; i++ ) {\n\t\t\t\ttr = data[i].nTr;\n\t\t\t\tcells = data[i].anCells;\n\t\n\t\t\t\tif ( tr ) {\n\t\t\t\t\t// insertBefore can act like appendChild if 2nd arg is null\n\t\t\t\t\ttr.insertBefore( cells[ column ], cells[ insertBefore ] || null );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\t// Remove column\n\t\t\t$( _pluck( settings.aoData, 'anCells', column ) ).detach();\n\t\t}\n\t\n\t\t// Common actions\n\t\tcol.bVisible = vis;\n\t\t_fnDrawHead( settings, settings.aoHeader );\n\t\t_fnDrawHead( settings, settings.aoFooter );\n\t\n\t\t_fnSaveState( settings );\n\t};\n\t\n\t\n\t_api_register( 'columns()', function ( selector, opts ) {\n\t\t// argument shifting\n\t\tif ( selector === undefined ) {\n\t\t\tselector = '';\n\t\t}\n\t\telse if ( $.isPlainObject( selector ) ) {\n\t\t\topts = selector;\n\t\t\tselector = '';\n\t\t}\n\t\n\t\topts = _selector_opts( opts );\n\t\n\t\tvar inst = this.iterator( 'table', function ( settings ) {\n\t\t\treturn __column_selector( settings, selector, opts );\n\t\t}, 1 );\n\t\n\t\t// Want argument shifting here and in _row_selector?\n\t\tinst.selector.cols = selector;\n\t\tinst.selector.opts = opts;\n\t\n\t\treturn inst;\n\t} );\n\t\n\t_api_registerPlural( 'columns().header()', 'column().header()', function ( selector, opts ) {\n\t\treturn this.iterator( 'column', function ( settings, column ) {\n\t\t\treturn settings.aoColumns[column].nTh;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().footer()', 'column().footer()', function ( selector, opts ) {\n\t\treturn this.iterator( 'column', function ( settings, column ) {\n\t\t\treturn settings.aoColumns[column].nTf;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().data()', 'column().data()', function () {\n\t\treturn this.iterator( 'column-rows', __columnData, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().dataSrc()', 'column().dataSrc()', function () {\n\t\treturn this.iterator( 'column', function ( settings, column ) {\n\t\t\treturn settings.aoColumns[column].mData;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().cache()', 'column().cache()', function ( type ) {\n\t\treturn this.iterator( 'column-rows', function ( settings, column, i, j, rows ) {\n\t\t\treturn _pluck_order( settings.aoData, rows,\n\t\t\t\ttype === 'search' ? '_aFilterData' : '_aSortData', column\n\t\t\t);\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().nodes()', 'column().nodes()', function () {\n\t\treturn this.iterator( 'column-rows', function ( settings, column, i, j, rows ) {\n\t\t\treturn _pluck_order( settings.aoData, rows, 'anCells', column ) ;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_registerPlural( 'columns().visible()', 'column().visible()', function ( vis, calc ) {\n\t\tvar ret = this.iterator( 'column', function ( settings, column ) {\n\t\t\tif ( vis === undefined ) {\n\t\t\t\treturn settings.aoColumns[ column ].bVisible;\n\t\t\t} // else\n\t\t\t__setColumnVis( settings, column, vis );\n\t\t} );\n\t\n\t\t// Group the column visibility changes\n\t\tif ( vis !== undefined ) {\n\t\t\t// Second loop once the first is done for events\n\t\t\tthis.iterator( 'column', function ( settings, column ) {\n\t\t\t\t_fnCallbackFire( settings, null, 'column-visibility', [settings, column, vis, calc] );\n\t\t\t} );\n\t\n\t\t\tif ( calc === undefined || calc ) {\n\t\t\t\tthis.columns.adjust();\n\t\t\t}\n\t\t}\n\t\n\t\treturn ret;\n\t} );\n\t\n\t_api_registerPlural( 'columns().indexes()', 'column().index()', function ( type ) {\n\t\treturn this.iterator( 'column', function ( settings, column ) {\n\t\t\treturn type === 'visible' ?\n\t\t\t\t_fnColumnIndexToVisible( settings, column ) :\n\t\t\t\tcolumn;\n\t\t}, 1 );\n\t} );\n\t\n\t_api_register( 'columns.adjust()', function () {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnAdjustColumnSizing( settings );\n\t\t}, 1 );\n\t} );\n\t\n\t_api_register( 'column.index()', function ( type, idx ) {\n\t\tif ( this.context.length !== 0 ) {\n\t\t\tvar ctx = this.context[0];\n\t\n\t\t\tif ( type === 'fromVisible' || type === 'toData' ) {\n\t\t\t\treturn _fnVisibleToColumnIndex( ctx, idx );\n\t\t\t}\n\t\t\telse if ( type === 'fromData' || type === 'toVisible' ) {\n\t\t\t\treturn _fnColumnIndexToVisible( ctx, idx );\n\t\t\t}\n\t\t}\n\t} );\n\t\n\t_api_register( 'column()', function ( selector, opts ) {\n\t\treturn _selector_first( this.columns( selector, opts ) );\n\t} );\n\t\n\t\n\t\n\tvar __cell_selector = function ( settings, selector, opts )\n\t{\n\t\tvar data = settings.aoData;\n\t\tvar rows = _selector_row_indexes( settings, opts );\n\t\tvar cells = _removeEmpty( _pluck_order( data, rows, 'anCells' ) );\n\t\tvar allCells = $( [].concat.apply([], cells) );\n\t\tvar row;\n\t\tvar columns = settings.aoColumns.length;\n\t\tvar a, i, ien, j, o, host;\n\t\n\t\tvar run = function ( s ) {\n\t\t\tvar fnSelector = typeof s === 'function';\n\t\n\t\t\tif ( s === null || s === undefined || fnSelector ) {\n\t\t\t\t// All cells and function selectors\n\t\t\t\ta = [];\n\t\n\t\t\t\tfor ( i=0, ien=rows.length ; i<ien ; i++ ) {\n\t\t\t\t\trow = rows[i];\n\t\n\t\t\t\t\tfor ( j=0 ; j<columns ; j++ ) {\n\t\t\t\t\t\to = {\n\t\t\t\t\t\t\trow: row,\n\t\t\t\t\t\t\tcolumn: j\n\t\t\t\t\t\t};\n\t\n\t\t\t\t\t\tif ( fnSelector ) {\n\t\t\t\t\t\t\t// Selector - function\n\t\t\t\t\t\t\thost = data[ row ];\n\t\n\t\t\t\t\t\t\tif ( s( o, _fnGetCellData(settings, row, j), host.anCells ? host.anCells[j] : null ) ) {\n\t\t\t\t\t\t\t\ta.push( o );\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t// Selector - all\n\t\t\t\t\t\t\ta.push( o );\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\n\t\t\t\treturn a;\n\t\t\t}\n\t\t\t\n\t\t\t// Selector - index\n\t\t\tif ( $.isPlainObject( s ) ) {\n\t\t\t\treturn [s];\n\t\t\t}\n\t\n\t\t\t// Selector - jQuery filtered cells\n\t\t\tvar jqResult = allCells\n\t\t\t\t.filter( s )\n\t\t\t\t.map( function (i, el) {\n\t\t\t\t\treturn { // use a new object, in case someone changes the values\n\t\t\t\t\t\trow:    el._DT_CellIndex.row,\n\t\t\t\t\t\tcolumn: el._DT_CellIndex.column\n\t \t\t\t\t};\n\t\t\t\t} )\n\t\t\t\t.toArray();\n\t\n\t\t\tif ( jqResult.length || ! s.nodeName ) {\n\t\t\t\treturn jqResult;\n\t\t\t}\n\t\n\t\t\t// Otherwise the selector is a node, and there is one last option - the\n\t\t\t// element might be a child of an element which has dt-row and dt-column\n\t\t\t// data attributes\n\t\t\thost = $(s).closest('*[data-dt-row]');\n\t\t\treturn host.length ?\n\t\t\t\t[ {\n\t\t\t\t\trow: host.data('dt-row'),\n\t\t\t\t\tcolumn: host.data('dt-column')\n\t\t\t\t} ] :\n\t\t\t\t[];\n\t\t};\n\t\n\t\treturn _selector_run( 'cell', selector, run, settings, opts );\n\t};\n\t\n\t\n\t\n\t\n\t_api_register( 'cells()', function ( rowSelector, columnSelector, opts ) {\n\t\t// Argument shifting\n\t\tif ( $.isPlainObject( rowSelector ) ) {\n\t\t\t// Indexes\n\t\t\tif ( rowSelector.row === undefined ) {\n\t\t\t\t// Selector options in first parameter\n\t\t\t\topts = rowSelector;\n\t\t\t\trowSelector = null;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Cell index objects in first parameter\n\t\t\t\topts = columnSelector;\n\t\t\t\tcolumnSelector = null;\n\t\t\t}\n\t\t}\n\t\tif ( $.isPlainObject( columnSelector ) ) {\n\t\t\topts = columnSelector;\n\t\t\tcolumnSelector = null;\n\t\t}\n\t\n\t\t// Cell selector\n\t\tif ( columnSelector === null || columnSelector === undefined ) {\n\t\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t\treturn __cell_selector( settings, rowSelector, _selector_opts( opts ) );\n\t\t\t} );\n\t\t}\n\t\n\t\t// Row + column selector\n\t\tvar columns = this.columns( columnSelector, opts );\n\t\tvar rows = this.rows( rowSelector, opts );\n\t\tvar a, i, ien, j, jen;\n\t\n\t\tvar cells = this.iterator( 'table', function ( settings, idx ) {\n\t\t\ta = [];\n\t\n\t\t\tfor ( i=0, ien=rows[idx].length ; i<ien ; i++ ) {\n\t\t\t\tfor ( j=0, jen=columns[idx].length ; j<jen ; j++ ) {\n\t\t\t\t\ta.push( {\n\t\t\t\t\t\trow:    rows[idx][i],\n\t\t\t\t\t\tcolumn: columns[idx][j]\n\t\t\t\t\t} );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\treturn a;\n\t\t}, 1 );\n\t\n\t\t$.extend( cells.selector, {\n\t\t\tcols: columnSelector,\n\t\t\trows: rowSelector,\n\t\t\topts: opts\n\t\t} );\n\t\n\t\treturn cells;\n\t} );\n\t\n\t\n\t_api_registerPlural( 'cells().nodes()', 'cell().node()', function () {\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\tvar data = settings.aoData[ row ];\n\t\n\t\t\treturn data && data.anCells ?\n\t\t\t\tdata.anCells[ column ] :\n\t\t\t\tundefined;\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_register( 'cells().data()', function () {\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\treturn _fnGetCellData( settings, row, column );\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'cells().cache()', 'cell().cache()', function ( type ) {\n\t\ttype = type === 'search' ? '_aFilterData' : '_aSortData';\n\t\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\treturn settings.aoData[ row ][ type ][ column ];\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'cells().render()', 'cell().render()', function ( type ) {\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\treturn _fnGetCellData( settings, row, column, type );\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'cells().indexes()', 'cell().index()', function () {\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\treturn {\n\t\t\t\trow: row,\n\t\t\t\tcolumn: column,\n\t\t\t\tcolumnVisible: _fnColumnIndexToVisible( settings, column )\n\t\t\t};\n\t\t}, 1 );\n\t} );\n\t\n\t\n\t_api_registerPlural( 'cells().invalidate()', 'cell().invalidate()', function ( src ) {\n\t\treturn this.iterator( 'cell', function ( settings, row, column ) {\n\t\t\t_fnInvalidate( settings, row, src, column );\n\t\t} );\n\t} );\n\t\n\t\n\t\n\t_api_register( 'cell()', function ( rowSelector, columnSelector, opts ) {\n\t\treturn _selector_first( this.cells( rowSelector, columnSelector, opts ) );\n\t} );\n\t\n\t\n\t_api_register( 'cell().data()', function ( data ) {\n\t\tvar ctx = this.context;\n\t\tvar cell = this[0];\n\t\n\t\tif ( data === undefined ) {\n\t\t\t// Get\n\t\t\treturn ctx.length && cell.length ?\n\t\t\t\t_fnGetCellData( ctx[0], cell[0].row, cell[0].column ) :\n\t\t\t\tundefined;\n\t\t}\n\t\n\t\t// Set\n\t\t_fnSetCellData( ctx[0], cell[0].row, cell[0].column, data );\n\t\t_fnInvalidate( ctx[0], cell[0].row, 'data', cell[0].column );\n\t\n\t\treturn this;\n\t} );\n\t\n\t\n\t\n\t/**\n\t * Get current ordering (sorting) that has been applied to the table.\n\t *\n\t * @returns {array} 2D array containing the sorting information for the first\n\t *   table in the current context. Each element in the parent array represents\n\t *   a column being sorted upon (i.e. multi-sorting with two columns would have\n\t *   2 inner arrays). The inner arrays may have 2 or 3 elements. The first is\n\t *   the column index that the sorting condition applies to, the second is the\n\t *   direction of the sort (`desc` or `asc`) and, optionally, the third is the\n\t *   index of the sorting order from the `column.sorting` initialisation array.\n\t *//**\n\t * Set the ordering for the table.\n\t *\n\t * @param {integer} order Column index to sort upon.\n\t * @param {string} direction Direction of the sort to be applied (`asc` or `desc`)\n\t * @returns {DataTables.Api} this\n\t *//**\n\t * Set the ordering for the table.\n\t *\n\t * @param {array} order 1D array of sorting information to be applied.\n\t * @param {array} [...] Optional additional sorting conditions\n\t * @returns {DataTables.Api} this\n\t *//**\n\t * Set the ordering for the table.\n\t *\n\t * @param {array} order 2D array of sorting information to be applied.\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'order()', function ( order, dir ) {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( order === undefined ) {\n\t\t\t// get\n\t\t\treturn ctx.length !== 0 ?\n\t\t\t\tctx[0].aaSorting :\n\t\t\t\tundefined;\n\t\t}\n\t\n\t\t// set\n\t\tif ( typeof order === 'number' ) {\n\t\t\t// Simple column / direction passed in\n\t\t\torder = [ [ order, dir ] ];\n\t\t}\n\t\telse if ( order.length && ! $.isArray( order[0] ) ) {\n\t\t\t// Arguments passed in (list of 1D arrays)\n\t\t\torder = Array.prototype.slice.call( arguments );\n\t\t}\n\t\t// otherwise a 2D array was passed in\n\t\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tsettings.aaSorting = order.slice();\n\t\t} );\n\t} );\n\t\n\t\n\t/**\n\t * Attach a sort listener to an element for a given column\n\t *\n\t * @param {node|jQuery|string} node Identifier for the element(s) to attach the\n\t *   listener to. This can take the form of a single DOM node, a jQuery\n\t *   collection of nodes or a jQuery selector which will identify the node(s).\n\t * @param {integer} column the column that a click on this node will sort on\n\t * @param {function} [callback] callback function when sort is run\n\t * @returns {DataTables.Api} this\n\t */\n\t_api_register( 'order.listener()', function ( node, column, callback ) {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnSortAttachListener( settings, node, column, callback );\n\t\t} );\n\t} );\n\t\n\t\n\t_api_register( 'order.fixed()', function ( set ) {\n\t\tif ( ! set ) {\n\t\t\tvar ctx = this.context;\n\t\t\tvar fixed = ctx.length ?\n\t\t\t\tctx[0].aaSortingFixed :\n\t\t\t\tundefined;\n\t\n\t\t\treturn $.isArray( fixed ) ?\n\t\t\t\t{ pre: fixed } :\n\t\t\t\tfixed;\n\t\t}\n\t\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tsettings.aaSortingFixed = $.extend( true, {}, set );\n\t\t} );\n\t} );\n\t\n\t\n\t// Order by the selected column(s)\n\t_api_register( [\n\t\t'columns().order()',\n\t\t'column().order()'\n\t], function ( dir ) {\n\t\tvar that = this;\n\t\n\t\treturn this.iterator( 'table', function ( settings, i ) {\n\t\t\tvar sort = [];\n\t\n\t\t\t$.each( that[i], function (j, col) {\n\t\t\t\tsort.push( [ col, dir ] );\n\t\t\t} );\n\t\n\t\t\tsettings.aaSorting = sort;\n\t\t} );\n\t} );\n\t\n\t\n\t\n\t_api_register( 'search()', function ( input, regex, smart, caseInsen ) {\n\t\tvar ctx = this.context;\n\t\n\t\tif ( input === undefined ) {\n\t\t\t// get\n\t\t\treturn ctx.length !== 0 ?\n\t\t\t\tctx[0].oPreviousSearch.sSearch :\n\t\t\t\tundefined;\n\t\t}\n\t\n\t\t// set\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tif ( ! settings.oFeatures.bFilter ) {\n\t\t\t\treturn;\n\t\t\t}\n\t\n\t\t\t_fnFilterComplete( settings, $.extend( {}, settings.oPreviousSearch, {\n\t\t\t\t\"sSearch\": input+\"\",\n\t\t\t\t\"bRegex\":  regex === null ? false : regex,\n\t\t\t\t\"bSmart\":  smart === null ? true  : smart,\n\t\t\t\t\"bCaseInsensitive\": caseInsen === null ? true : caseInsen\n\t\t\t} ), 1 );\n\t\t} );\n\t} );\n\t\n\t\n\t_api_registerPlural(\n\t\t'columns().search()',\n\t\t'column().search()',\n\t\tfunction ( input, regex, smart, caseInsen ) {\n\t\t\treturn this.iterator( 'column', function ( settings, column ) {\n\t\t\t\tvar preSearch = settings.aoPreSearchCols;\n\t\n\t\t\t\tif ( input === undefined ) {\n\t\t\t\t\t// get\n\t\t\t\t\treturn preSearch[ column ].sSearch;\n\t\t\t\t}\n\t\n\t\t\t\t// set\n\t\t\t\tif ( ! settings.oFeatures.bFilter ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\n\t\t\t\t$.extend( preSearch[ column ], {\n\t\t\t\t\t\"sSearch\": input+\"\",\n\t\t\t\t\t\"bRegex\":  regex === null ? false : regex,\n\t\t\t\t\t\"bSmart\":  smart === null ? true  : smart,\n\t\t\t\t\t\"bCaseInsensitive\": caseInsen === null ? true : caseInsen\n\t\t\t\t} );\n\t\n\t\t\t\t_fnFilterComplete( settings, settings.oPreviousSearch, 1 );\n\t\t\t} );\n\t\t}\n\t);\n\t\n\t/*\n\t * State API methods\n\t */\n\t\n\t_api_register( 'state()', function () {\n\t\treturn this.context.length ?\n\t\t\tthis.context[0].oSavedState :\n\t\t\tnull;\n\t} );\n\t\n\t\n\t_api_register( 'state.clear()', function () {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t// Save an empty object\n\t\t\tsettings.fnStateSaveCallback.call( settings.oInstance, settings, {} );\n\t\t} );\n\t} );\n\t\n\t\n\t_api_register( 'state.loaded()', function () {\n\t\treturn this.context.length ?\n\t\t\tthis.context[0].oLoadedState :\n\t\t\tnull;\n\t} );\n\t\n\t\n\t_api_register( 'state.save()', function () {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnSaveState( settings );\n\t\t} );\n\t} );\n\t\n\t\n\t\n\t/**\n\t * Provide a common method for plug-ins to check the version of DataTables being\n\t * used, in order to ensure compatibility.\n\t *\n\t *  @param {string} version Version string to check for, in the format \"X.Y.Z\".\n\t *    Note that the formats \"X\" and \"X.Y\" are also acceptable.\n\t *  @returns {boolean} true if this version of DataTables is greater or equal to\n\t *    the required version, or false if this version of DataTales is not\n\t *    suitable\n\t *  @static\n\t *  @dtopt API-Static\n\t *\n\t *  @example\n\t *    alert( $.fn.dataTable.versionCheck( '1.9.0' ) );\n\t */\n\tDataTable.versionCheck = DataTable.fnVersionCheck = function( version )\n\t{\n\t\tvar aThis = DataTable.version.split('.');\n\t\tvar aThat = version.split('.');\n\t\tvar iThis, iThat;\n\t\n\t\tfor ( var i=0, iLen=aThat.length ; i<iLen ; i++ ) {\n\t\t\tiThis = parseInt( aThis[i], 10 ) || 0;\n\t\t\tiThat = parseInt( aThat[i], 10 ) || 0;\n\t\n\t\t\t// Parts are the same, keep comparing\n\t\t\tif (iThis === iThat) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\n\t\t\t// Parts are different, return immediately\n\t\t\treturn iThis > iThat;\n\t\t}\n\t\n\t\treturn true;\n\t};\n\t\n\t\n\t/**\n\t * Check if a `<table>` node is a DataTable table already or not.\n\t *\n\t *  @param {node|jquery|string} table Table node, jQuery object or jQuery\n\t *      selector for the table to test. Note that if more than more than one\n\t *      table is passed on, only the first will be checked\n\t *  @returns {boolean} true the table given is a DataTable, or false otherwise\n\t *  @static\n\t *  @dtopt API-Static\n\t *\n\t *  @example\n\t *    if ( ! $.fn.DataTable.isDataTable( '#example' ) ) {\n\t *      $('#example').dataTable();\n\t *    }\n\t */\n\tDataTable.isDataTable = DataTable.fnIsDataTable = function ( table )\n\t{\n\t\tvar t = $(table).get(0);\n\t\tvar is = false;\n\t\n\t\t$.each( DataTable.settings, function (i, o) {\n\t\t\tvar head = o.nScrollHead ? $('table', o.nScrollHead)[0] : null;\n\t\t\tvar foot = o.nScrollFoot ? $('table', o.nScrollFoot)[0] : null;\n\t\n\t\t\tif ( o.nTable === t || head === t || foot === t ) {\n\t\t\t\tis = true;\n\t\t\t}\n\t\t} );\n\t\n\t\treturn is;\n\t};\n\t\n\t\n\t/**\n\t * Get all DataTable tables that have been initialised - optionally you can\n\t * select to get only currently visible tables.\n\t *\n\t *  @param {boolean} [visible=false] Flag to indicate if you want all (default)\n\t *    or visible tables only.\n\t *  @returns {array} Array of `table` nodes (not DataTable instances) which are\n\t *    DataTables\n\t *  @static\n\t *  @dtopt API-Static\n\t *\n\t *  @example\n\t *    $.each( $.fn.dataTable.tables(true), function () {\n\t *      $(table).DataTable().columns.adjust();\n\t *    } );\n\t */\n\tDataTable.tables = DataTable.fnTables = function ( visible )\n\t{\n\t\tvar api = false;\n\t\n\t\tif ( $.isPlainObject( visible ) ) {\n\t\t\tapi = visible.api;\n\t\t\tvisible = visible.visible;\n\t\t}\n\t\n\t\tvar a = $.map( DataTable.settings, function (o) {\n\t\t\tif ( !visible || (visible && $(o.nTable).is(':visible')) ) {\n\t\t\t\treturn o.nTable;\n\t\t\t}\n\t\t} );\n\t\n\t\treturn api ?\n\t\t\tnew _Api( a ) :\n\t\t\ta;\n\t};\n\t\n\t\n\t/**\n\t * Convert from camel case parameters to Hungarian notation. This is made public\n\t * for the extensions to provide the same ability as DataTables core to accept\n\t * either the 1.9 style Hungarian notation, or the 1.10+ style camelCase\n\t * parameters.\n\t *\n\t *  @param {object} src The model object which holds all parameters that can be\n\t *    mapped.\n\t *  @param {object} user The object to convert from camel case to Hungarian.\n\t *  @param {boolean} force When set to `true`, properties which already have a\n\t *    Hungarian value in the `user` object will be overwritten. Otherwise they\n\t *    won't be.\n\t */\n\tDataTable.camelToHungarian = _fnCamelToHungarian;\n\t\n\t\n\t\n\t/**\n\t *\n\t */\n\t_api_register( '$()', function ( selector, opts ) {\n\t\tvar\n\t\t\trows   = this.rows( opts ).nodes(), // Get all rows\n\t\t\tjqRows = $(rows);\n\t\n\t\treturn $( [].concat(\n\t\t\tjqRows.filter( selector ).toArray(),\n\t\t\tjqRows.find( selector ).toArray()\n\t\t) );\n\t} );\n\t\n\t\n\t// jQuery functions to operate on the tables\n\t$.each( [ 'on', 'one', 'off' ], function (i, key) {\n\t\t_api_register( key+'()', function ( /* event, handler */ ) {\n\t\t\tvar args = Array.prototype.slice.call(arguments);\n\t\n\t\t\t// Add the `dt` namespace automatically if it isn't already present\n\t\t\tif ( ! args[0].match(/\\.dt\\b/) ) {\n\t\t\t\targs[0] += '.dt';\n\t\t\t}\n\t\n\t\t\tvar inst = $( this.tables().nodes() );\n\t\t\tinst[key].apply( inst, args );\n\t\t\treturn this;\n\t\t} );\n\t} );\n\t\n\t\n\t_api_register( 'clear()', function () {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\t_fnClearTable( settings );\n\t\t} );\n\t} );\n\t\n\t\n\t_api_register( 'settings()', function () {\n\t\treturn new _Api( this.context, this.context );\n\t} );\n\t\n\t\n\t_api_register( 'init()', function () {\n\t\tvar ctx = this.context;\n\t\treturn ctx.length ? ctx[0].oInit : null;\n\t} );\n\t\n\t\n\t_api_register( 'data()', function () {\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\treturn _pluck( settings.aoData, '_aData' );\n\t\t} ).flatten();\n\t} );\n\t\n\t\n\t_api_register( 'destroy()', function ( remove ) {\n\t\tremove = remove || false;\n\t\n\t\treturn this.iterator( 'table', function ( settings ) {\n\t\t\tvar orig      = settings.nTableWrapper.parentNode;\n\t\t\tvar classes   = settings.oClasses;\n\t\t\tvar table     = settings.nTable;\n\t\t\tvar tbody     = settings.nTBody;\n\t\t\tvar thead     = settings.nTHead;\n\t\t\tvar tfoot     = settings.nTFoot;\n\t\t\tvar jqTable   = $(table);\n\t\t\tvar jqTbody   = $(tbody);\n\t\t\tvar jqWrapper = $(settings.nTableWrapper);\n\t\t\tvar rows      = $.map( settings.aoData, function (r) { return r.nTr; } );\n\t\t\tvar i, ien;\n\t\n\t\t\t// Flag to note that the table is currently being destroyed - no action\n\t\t\t// should be taken\n\t\t\tsettings.bDestroying = true;\n\t\n\t\t\t// Fire off the destroy callbacks for plug-ins etc\n\t\t\t_fnCallbackFire( settings, \"aoDestroyCallback\", \"destroy\", [settings] );\n\t\n\t\t\t// If not being removed from the document, make all columns visible\n\t\t\tif ( ! remove ) {\n\t\t\t\tnew _Api( settings ).columns().visible( true );\n\t\t\t}\n\t\n\t\t\t// Blitz all `DT` namespaced events (these are internal events, the\n\t\t\t// lowercase, `dt` events are user subscribed and they are responsible\n\t\t\t// for removing them\n\t\t\tjqWrapper.unbind('.DT').find(':not(tbody *)').unbind('.DT');\n\t\t\t$(window).unbind('.DT-'+settings.sInstance);\n\t\n\t\t\t// When scrolling we had to break the table up - restore it\n\t\t\tif ( table != thead.parentNode ) {\n\t\t\t\tjqTable.children('thead').detach();\n\t\t\t\tjqTable.append( thead );\n\t\t\t}\n\t\n\t\t\tif ( tfoot && table != tfoot.parentNode ) {\n\t\t\t\tjqTable.children('tfoot').detach();\n\t\t\t\tjqTable.append( tfoot );\n\t\t\t}\n\t\n\t\t\tsettings.aaSorting = [];\n\t\t\tsettings.aaSortingFixed = [];\n\t\t\t_fnSortingClasses( settings );\n\t\n\t\t\t$( rows ).removeClass( settings.asStripeClasses.join(' ') );\n\t\n\t\t\t$('th, td', thead).removeClass( classes.sSortable+' '+\n\t\t\t\tclasses.sSortableAsc+' '+classes.sSortableDesc+' '+classes.sSortableNone\n\t\t\t);\n\t\n\t\t\tif ( settings.bJUI ) {\n\t\t\t\t$('th span.'+classes.sSortIcon+ ', td span.'+classes.sSortIcon, thead).detach();\n\t\t\t\t$('th, td', thead).each( function () {\n\t\t\t\t\tvar wrapper = $('div.'+classes.sSortJUIWrapper, this);\n\t\t\t\t\t$(this).append( wrapper.contents() );\n\t\t\t\t\twrapper.detach();\n\t\t\t\t} );\n\t\t\t}\n\t\n\t\t\t// Add the TR elements back into the table in their original order\n\t\t\tjqTbody.children().detach();\n\t\t\tjqTbody.append( rows );\n\t\n\t\t\t// Remove the DataTables generated nodes, events and classes\n\t\t\tvar removedMethod = remove ? 'remove' : 'detach';\n\t\t\tjqTable[ removedMethod ]();\n\t\t\tjqWrapper[ removedMethod ]();\n\t\n\t\t\t// If we need to reattach the table to the document\n\t\t\tif ( ! remove && orig ) {\n\t\t\t\t// insertBefore acts like appendChild if !arg[1]\n\t\t\t\torig.insertBefore( table, settings.nTableReinsertBefore );\n\t\n\t\t\t\t// Restore the width of the original table - was read from the style property,\n\t\t\t\t// so we can restore directly to that\n\t\t\t\tjqTable\n\t\t\t\t\t.css( 'width', settings.sDestroyWidth )\n\t\t\t\t\t.removeClass( classes.sTable );\n\t\n\t\t\t\t// If the were originally stripe classes - then we add them back here.\n\t\t\t\t// Note this is not fool proof (for example if not all rows had stripe\n\t\t\t\t// classes - but it's a good effort without getting carried away\n\t\t\t\tien = settings.asDestroyStripes.length;\n\t\n\t\t\t\tif ( ien ) {\n\t\t\t\t\tjqTbody.children().each( function (i) {\n\t\t\t\t\t\t$(this).addClass( settings.asDestroyStripes[i % ien] );\n\t\t\t\t\t} );\n\t\t\t\t}\n\t\t\t}\n\t\n\t\t\t/* Remove the settings object from the settings array */\n\t\t\tvar idx = $.inArray( settings, DataTable.settings );\n\t\t\tif ( idx !== -1 ) {\n\t\t\t\tDataTable.settings.splice( idx, 1 );\n\t\t\t}\n\t\t} );\n\t} );\n\t\n\t\n\t// Add the `every()` method for rows, columns and cells in a compact form\n\t$.each( [ 'column', 'row', 'cell' ], function ( i, type ) {\n\t\t_api_register( type+'s().every()', function ( fn ) {\n\t\t\tvar opts = this.selector.opts;\n\t\t\tvar api = this;\n\t\n\t\t\treturn this.iterator( type, function ( settings, arg1, arg2, arg3, arg4 ) {\n\t\t\t\t// Rows and columns:\n\t\t\t\t//  arg1 - index\n\t\t\t\t//  arg2 - table counter\n\t\t\t\t//  arg3 - loop counter\n\t\t\t\t//  arg4 - undefined\n\t\t\t\t// Cells:\n\t\t\t\t//  arg1 - row index\n\t\t\t\t//  arg2 - column index\n\t\t\t\t//  arg3 - table counter\n\t\t\t\t//  arg4 - loop counter\n\t\t\t\tfn.call(\n\t\t\t\t\tapi[ type ](\n\t\t\t\t\t\targ1,\n\t\t\t\t\t\ttype==='cell' ? arg2 : opts,\n\t\t\t\t\t\ttype==='cell' ? opts : undefined\n\t\t\t\t\t),\n\t\t\t\t\targ1, arg2, arg3, arg4\n\t\t\t\t);\n\t\t\t} );\n\t\t} );\n\t} );\n\t\n\t\n\t// i18n method for extensions to be able to use the language object from the\n\t// DataTable\n\t_api_register( 'i18n()', function ( token, def, plural ) {\n\t\tvar ctx = this.context[0];\n\t\tvar resolved = _fnGetObjectDataFn( token )( ctx.oLanguage );\n\t\n\t\tif ( resolved === undefined ) {\n\t\t\tresolved = def;\n\t\t}\n\t\n\t\tif ( plural !== undefined && $.isPlainObject( resolved ) ) {\n\t\t\tresolved = resolved[ plural ] !== undefined ?\n\t\t\t\tresolved[ plural ] :\n\t\t\t\tresolved._;\n\t\t}\n\t\n\t\treturn resolved.replace( '%d', plural ); // nb: plural might be undefined,\n\t} );\n\n\t/**\n\t * Version string for plug-ins to check compatibility. Allowed format is\n\t * `a.b.c-d` where: a:int, b:int, c:int, d:string(dev|beta|alpha). `d` is used\n\t * only for non-release builds. See http://semver.org/ for more information.\n\t *  @member\n\t *  @type string\n\t *  @default Version number\n\t */\n\tDataTable.version = \"1.10.12\";\n\n\t/**\n\t * Private data store, containing all of the settings objects that are\n\t * created for the tables on a given page.\n\t *\n\t * Note that the `DataTable.settings` object is aliased to\n\t * `jQuery.fn.dataTableExt` through which it may be accessed and\n\t * manipulated, or `jQuery.fn.dataTable.settings`.\n\t *  @member\n\t *  @type array\n\t *  @default []\n\t *  @private\n\t */\n\tDataTable.settings = [];\n\n\t/**\n\t * Object models container, for the various models that DataTables has\n\t * available to it. These models define the objects that are used to hold\n\t * the active state and configuration of the table.\n\t *  @namespace\n\t */\n\tDataTable.models = {};\n\t\n\t\n\t\n\t/**\n\t * Template object for the way in which DataTables holds information about\n\t * search information for the global filter and individual column filters.\n\t *  @namespace\n\t */\n\tDataTable.models.oSearch = {\n\t\t/**\n\t\t * Flag to indicate if the filtering should be case insensitive or not\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t */\n\t\t\"bCaseInsensitive\": true,\n\t\n\t\t/**\n\t\t * Applied search term\n\t\t *  @type string\n\t\t *  @default <i>Empty string</i>\n\t\t */\n\t\t\"sSearch\": \"\",\n\t\n\t\t/**\n\t\t * Flag to indicate if the search term should be interpreted as a\n\t\t * regular expression (true) or not (false) and therefore and special\n\t\t * regex characters escaped.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t */\n\t\t\"bRegex\": false,\n\t\n\t\t/**\n\t\t * Flag to indicate if DataTables is to use its smart filtering or not.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t */\n\t\t\"bSmart\": true\n\t};\n\t\n\t\n\t\n\t\n\t/**\n\t * Template object for the way in which DataTables holds information about\n\t * each individual row. This is the object format used for the settings\n\t * aoData array.\n\t *  @namespace\n\t */\n\tDataTable.models.oRow = {\n\t\t/**\n\t\t * TR element for the row\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTr\": null,\n\t\n\t\t/**\n\t\t * Array of TD elements for each row. This is null until the row has been\n\t\t * created.\n\t\t *  @type array nodes\n\t\t *  @default []\n\t\t */\n\t\t\"anCells\": null,\n\t\n\t\t/**\n\t\t * Data object from the original data source for the row. This is either\n\t\t * an array if using the traditional form of DataTables, or an object if\n\t\t * using mData options. The exact type will depend on the passed in\n\t\t * data from the data source, or will be an array if using DOM a data\n\t\t * source.\n\t\t *  @type array|object\n\t\t *  @default []\n\t\t */\n\t\t\"_aData\": [],\n\t\n\t\t/**\n\t\t * Sorting data cache - this array is ostensibly the same length as the\n\t\t * number of columns (although each index is generated only as it is\n\t\t * needed), and holds the data that is used for sorting each column in the\n\t\t * row. We do this cache generation at the start of the sort in order that\n\t\t * the formatting of the sort data need be done only once for each cell\n\t\t * per sort. This array should not be read from or written to by anything\n\t\t * other than the master sorting methods.\n\t\t *  @type array\n\t\t *  @default null\n\t\t *  @private\n\t\t */\n\t\t\"_aSortData\": null,\n\t\n\t\t/**\n\t\t * Per cell filtering data cache. As per the sort data cache, used to\n\t\t * increase the performance of the filtering in DataTables\n\t\t *  @type array\n\t\t *  @default null\n\t\t *  @private\n\t\t */\n\t\t\"_aFilterData\": null,\n\t\n\t\t/**\n\t\t * Filtering data cache. This is the same as the cell filtering cache, but\n\t\t * in this case a string rather than an array. This is easily computed with\n\t\t * a join on `_aFilterData`, but is provided as a cache so the join isn't\n\t\t * needed on every search (memory traded for performance)\n\t\t *  @type array\n\t\t *  @default null\n\t\t *  @private\n\t\t */\n\t\t\"_sFilterRow\": null,\n\t\n\t\t/**\n\t\t * Cache of the class name that DataTables has applied to the row, so we\n\t\t * can quickly look at this variable rather than needing to do a DOM check\n\t\t * on className for the nTr property.\n\t\t *  @type string\n\t\t *  @default <i>Empty string</i>\n\t\t *  @private\n\t\t */\n\t\t\"_sRowStripe\": \"\",\n\t\n\t\t/**\n\t\t * Denote if the original data source was from the DOM, or the data source\n\t\t * object. This is used for invalidating data, so DataTables can\n\t\t * automatically read data from the original source, unless uninstructed\n\t\t * otherwise.\n\t\t *  @type string\n\t\t *  @default null\n\t\t *  @private\n\t\t */\n\t\t\"src\": null,\n\t\n\t\t/**\n\t\t * Index in the aoData array. This saves an indexOf lookup when we have the\n\t\t * object, but want to know the index\n\t\t *  @type integer\n\t\t *  @default -1\n\t\t *  @private\n\t\t */\n\t\t\"idx\": -1\n\t};\n\t\n\t\n\t/**\n\t * Template object for the column information object in DataTables. This object\n\t * is held in the settings aoColumns array and contains all the information that\n\t * DataTables needs about each individual column.\n\t *\n\t * Note that this object is related to {@link DataTable.defaults.column}\n\t * but this one is the internal data store for DataTables's cache of columns.\n\t * It should NOT be manipulated outside of DataTables. Any configuration should\n\t * be done through the initialisation options.\n\t *  @namespace\n\t */\n\tDataTable.models.oColumn = {\n\t\t/**\n\t\t * Column index. This could be worked out on-the-fly with $.inArray, but it\n\t\t * is faster to just hold it as a variable\n\t\t *  @type integer\n\t\t *  @default null\n\t\t */\n\t\t\"idx\": null,\n\t\n\t\t/**\n\t\t * A list of the columns that sorting should occur on when this column\n\t\t * is sorted. That this property is an array allows multi-column sorting\n\t\t * to be defined for a column (for example first name / last name columns\n\t\t * would benefit from this). The values are integers pointing to the\n\t\t * columns to be sorted on (typically it will be a single integer pointing\n\t\t * at itself, but that doesn't need to be the case).\n\t\t *  @type array\n\t\t */\n\t\t\"aDataSort\": null,\n\t\n\t\t/**\n\t\t * Define the sorting directions that are applied to the column, in sequence\n\t\t * as the column is repeatedly sorted upon - i.e. the first value is used\n\t\t * as the sorting direction when the column if first sorted (clicked on).\n\t\t * Sort it again (click again) and it will move on to the next index.\n\t\t * Repeat until loop.\n\t\t *  @type array\n\t\t */\n\t\t\"asSorting\": null,\n\t\n\t\t/**\n\t\t * Flag to indicate if the column is searchable, and thus should be included\n\t\t * in the filtering or not.\n\t\t *  @type boolean\n\t\t */\n\t\t\"bSearchable\": null,\n\t\n\t\t/**\n\t\t * Flag to indicate if the column is sortable or not.\n\t\t *  @type boolean\n\t\t */\n\t\t\"bSortable\": null,\n\t\n\t\t/**\n\t\t * Flag to indicate if the column is currently visible in the table or not\n\t\t *  @type boolean\n\t\t */\n\t\t\"bVisible\": null,\n\t\n\t\t/**\n\t\t * Store for manual type assignment using the `column.type` option. This\n\t\t * is held in store so we can manipulate the column's `sType` property.\n\t\t *  @type string\n\t\t *  @default null\n\t\t *  @private\n\t\t */\n\t\t\"_sManualType\": null,\n\t\n\t\t/**\n\t\t * Flag to indicate if HTML5 data attributes should be used as the data\n\t\t * source for filtering or sorting. True is either are.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *  @private\n\t\t */\n\t\t\"_bAttrSrc\": false,\n\t\n\t\t/**\n\t\t * Developer definable function that is called whenever a cell is created (Ajax source,\n\t\t * etc) or processed for input (DOM source). This can be used as a compliment to mRender\n\t\t * allowing you to modify the DOM element (add background colour for example) when the\n\t\t * element is available.\n\t\t *  @type function\n\t\t *  @param {element} nTd The TD node that has been created\n\t\t *  @param {*} sData The Data for the cell\n\t\t *  @param {array|object} oData The data for the whole row\n\t\t *  @param {int} iRow The row index for the aoData data store\n\t\t *  @default null\n\t\t */\n\t\t\"fnCreatedCell\": null,\n\t\n\t\t/**\n\t\t * Function to get data from a cell in a column. You should <b>never</b>\n\t\t * access data directly through _aData internally in DataTables - always use\n\t\t * the method attached to this property. It allows mData to function as\n\t\t * required. This function is automatically assigned by the column\n\t\t * initialisation method\n\t\t *  @type function\n\t\t *  @param {array|object} oData The data array/object for the array\n\t\t *    (i.e. aoData[]._aData)\n\t\t *  @param {string} sSpecific The specific data type you want to get -\n\t\t *    'display', 'type' 'filter' 'sort'\n\t\t *  @returns {*} The data for the cell from the given row's data\n\t\t *  @default null\n\t\t */\n\t\t\"fnGetData\": null,\n\t\n\t\t/**\n\t\t * Function to set data for a cell in the column. You should <b>never</b>\n\t\t * set the data directly to _aData internally in DataTables - always use\n\t\t * this method. It allows mData to function as required. This function\n\t\t * is automatically assigned by the column initialisation method\n\t\t *  @type function\n\t\t *  @param {array|object} oData The data array/object for the array\n\t\t *    (i.e. aoData[]._aData)\n\t\t *  @param {*} sValue Value to set\n\t\t *  @default null\n\t\t */\n\t\t\"fnSetData\": null,\n\t\n\t\t/**\n\t\t * Property to read the value for the cells in the column from the data\n\t\t * source array / object. If null, then the default content is used, if a\n\t\t * function is given then the return from the function is used.\n\t\t *  @type function|int|string|null\n\t\t *  @default null\n\t\t */\n\t\t\"mData\": null,\n\t\n\t\t/**\n\t\t * Partner property to mData which is used (only when defined) to get\n\t\t * the data - i.e. it is basically the same as mData, but without the\n\t\t * 'set' option, and also the data fed to it is the result from mData.\n\t\t * This is the rendering method to match the data method of mData.\n\t\t *  @type function|int|string|null\n\t\t *  @default null\n\t\t */\n\t\t\"mRender\": null,\n\t\n\t\t/**\n\t\t * Unique header TH/TD element for this column - this is what the sorting\n\t\t * listener is attached to (if sorting is enabled.)\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTh\": null,\n\t\n\t\t/**\n\t\t * Unique footer TH/TD element for this column (if there is one). Not used\n\t\t * in DataTables as such, but can be used for plug-ins to reference the\n\t\t * footer for each column.\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTf\": null,\n\t\n\t\t/**\n\t\t * The class to apply to all TD elements in the table's TBODY for the column\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sClass\": null,\n\t\n\t\t/**\n\t\t * When DataTables calculates the column widths to assign to each column,\n\t\t * it finds the longest string in each column and then constructs a\n\t\t * temporary table and reads the widths from that. The problem with this\n\t\t * is that \"mmm\" is much wider then \"iiii\", but the latter is a longer\n\t\t * string - thus the calculation can go wrong (doing it properly and putting\n\t\t * it into an DOM object and measuring that is horribly(!) slow). Thus as\n\t\t * a \"work around\" we provide this option. It will append its value to the\n\t\t * text that is found to be the longest string for the column - i.e. padding.\n\t\t *  @type string\n\t\t */\n\t\t\"sContentPadding\": null,\n\t\n\t\t/**\n\t\t * Allows a default value to be given for a column's data, and will be used\n\t\t * whenever a null data source is encountered (this can be because mData\n\t\t * is set to null, or because the data source itself is null).\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sDefaultContent\": null,\n\t\n\t\t/**\n\t\t * Name for the column, allowing reference to the column by name as well as\n\t\t * by index (needs a lookup to work by name).\n\t\t *  @type string\n\t\t */\n\t\t\"sName\": null,\n\t\n\t\t/**\n\t\t * Custom sorting data type - defines which of the available plug-ins in\n\t\t * afnSortData the custom sorting will use - if any is defined.\n\t\t *  @type string\n\t\t *  @default std\n\t\t */\n\t\t\"sSortDataType\": 'std',\n\t\n\t\t/**\n\t\t * Class to be applied to the header element when sorting on this column\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sSortingClass\": null,\n\t\n\t\t/**\n\t\t * Class to be applied to the header element when sorting on this column -\n\t\t * when jQuery UI theming is used.\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sSortingClassJUI\": null,\n\t\n\t\t/**\n\t\t * Title of the column - what is seen in the TH element (nTh).\n\t\t *  @type string\n\t\t */\n\t\t\"sTitle\": null,\n\t\n\t\t/**\n\t\t * Column sorting and filtering type\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sType\": null,\n\t\n\t\t/**\n\t\t * Width of the column\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sWidth\": null,\n\t\n\t\t/**\n\t\t * Width of the column when it was first \"encountered\"\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sWidthOrig\": null\n\t};\n\t\n\t\n\t/*\n\t * Developer note: The properties of the object below are given in Hungarian\n\t * notation, that was used as the interface for DataTables prior to v1.10, however\n\t * from v1.10 onwards the primary interface is camel case. In order to avoid\n\t * breaking backwards compatibility utterly with this change, the Hungarian\n\t * version is still, internally the primary interface, but is is not documented\n\t * - hence the @name tags in each doc comment. This allows a Javascript function\n\t * to create a map from Hungarian notation to camel case (going the other direction\n\t * would require each property to be listed, which would at around 3K to the size\n\t * of DataTables, while this method is about a 0.5K hit.\n\t *\n\t * Ultimately this does pave the way for Hungarian notation to be dropped\n\t * completely, but that is a massive amount of work and will break current\n\t * installs (therefore is on-hold until v2).\n\t */\n\t\n\t/**\n\t * Initialisation options that can be given to DataTables at initialisation\n\t * time.\n\t *  @namespace\n\t */\n\tDataTable.defaults = {\n\t\t/**\n\t\t * An array of data to use for the table, passed in at initialisation which\n\t\t * will be used in preference to any data which is already in the DOM. This is\n\t\t * particularly useful for constructing tables purely in Javascript, for\n\t\t * example with a custom Ajax call.\n\t\t *  @type array\n\t\t *  @default null\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.data\n\t\t *\n\t\t *  @example\n\t\t *    // Using a 2D array data source\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"data\": [\n\t\t *          ['Trident', 'Internet Explorer 4.0', 'Win 95+', 4, 'X'],\n\t\t *          ['Trident', 'Internet Explorer 5.0', 'Win 95+', 5, 'C'],\n\t\t *        ],\n\t\t *        \"columns\": [\n\t\t *          { \"title\": \"Engine\" },\n\t\t *          { \"title\": \"Browser\" },\n\t\t *          { \"title\": \"Platform\" },\n\t\t *          { \"title\": \"Version\" },\n\t\t *          { \"title\": \"Grade\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using an array of objects as a data source (`data`)\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"data\": [\n\t\t *          {\n\t\t *            \"engine\":   \"Trident\",\n\t\t *            \"browser\":  \"Internet Explorer 4.0\",\n\t\t *            \"platform\": \"Win 95+\",\n\t\t *            \"version\":  4,\n\t\t *            \"grade\":    \"X\"\n\t\t *          },\n\t\t *          {\n\t\t *            \"engine\":   \"Trident\",\n\t\t *            \"browser\":  \"Internet Explorer 5.0\",\n\t\t *            \"platform\": \"Win 95+\",\n\t\t *            \"version\":  5,\n\t\t *            \"grade\":    \"C\"\n\t\t *          }\n\t\t *        ],\n\t\t *        \"columns\": [\n\t\t *          { \"title\": \"Engine\",   \"data\": \"engine\" },\n\t\t *          { \"title\": \"Browser\",  \"data\": \"browser\" },\n\t\t *          { \"title\": \"Platform\", \"data\": \"platform\" },\n\t\t *          { \"title\": \"Version\",  \"data\": \"version\" },\n\t\t *          { \"title\": \"Grade\",    \"data\": \"grade\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"aaData\": null,\n\t\n\t\n\t\t/**\n\t\t * If ordering is enabled, then DataTables will perform a first pass sort on\n\t\t * initialisation. You can define which column(s) the sort is performed\n\t\t * upon, and the sorting direction, with this variable. The `sorting` array\n\t\t * should contain an array for each column to be sorted initially containing\n\t\t * the column's index and a direction string ('asc' or 'desc').\n\t\t *  @type array\n\t\t *  @default [[0,'asc']]\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.order\n\t\t *\n\t\t *  @example\n\t\t *    // Sort by 3rd column first, and then 4th column\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"order\": [[2,'asc'], [3,'desc']]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *    // No initial sorting\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"order\": []\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"aaSorting\": [[0,'asc']],\n\t\n\t\n\t\t/**\n\t\t * This parameter is basically identical to the `sorting` parameter, but\n\t\t * cannot be overridden by user interaction with the table. What this means\n\t\t * is that you could have a column (visible or hidden) which the sorting\n\t\t * will always be forced on first - any sorting after that (from the user)\n\t\t * will then be performed as required. This can be useful for grouping rows\n\t\t * together.\n\t\t *  @type array\n\t\t *  @default null\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.orderFixed\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"orderFixed\": [[0,'asc']]\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"aaSortingFixed\": [],\n\t\n\t\n\t\t/**\n\t\t * DataTables can be instructed to load data to display in the table from a\n\t\t * Ajax source. This option defines how that Ajax call is made and where to.\n\t\t *\n\t\t * The `ajax` property has three different modes of operation, depending on\n\t\t * how it is defined. These are:\n\t\t *\n\t\t * * `string` - Set the URL from where the data should be loaded from.\n\t\t * * `object` - Define properties for `jQuery.ajax`.\n\t\t * * `function` - Custom data get function\n\t\t *\n\t\t * `string`\n\t\t * --------\n\t\t *\n\t\t * As a string, the `ajax` property simply defines the URL from which\n\t\t * DataTables will load data.\n\t\t *\n\t\t * `object`\n\t\t * --------\n\t\t *\n\t\t * As an object, the parameters in the object are passed to\n\t\t * [jQuery.ajax](http://api.jquery.com/jQuery.ajax/) allowing fine control\n\t\t * of the Ajax request. DataTables has a number of default parameters which\n\t\t * you can override using this option. Please refer to the jQuery\n\t\t * documentation for a full description of the options available, although\n\t\t * the following parameters provide additional options in DataTables or\n\t\t * require special consideration:\n\t\t *\n\t\t * * `data` - As with jQuery, `data` can be provided as an object, but it\n\t\t *   can also be used as a function to manipulate the data DataTables sends\n\t\t *   to the server. The function takes a single parameter, an object of\n\t\t *   parameters with the values that DataTables has readied for sending. An\n\t\t *   object may be returned which will be merged into the DataTables\n\t\t *   defaults, or you can add the items to the object that was passed in and\n\t\t *   not return anything from the function. This supersedes `fnServerParams`\n\t\t *   from DataTables 1.9-.\n\t\t *\n\t\t * * `dataSrc` - By default DataTables will look for the property `data` (or\n\t\t *   `aaData` for compatibility with DataTables 1.9-) when obtaining data\n\t\t *   from an Ajax source or for server-side processing - this parameter\n\t\t *   allows that property to be changed. You can use Javascript dotted\n\t\t *   object notation to get a data source for multiple levels of nesting, or\n\t\t *   it my be used as a function. As a function it takes a single parameter,\n\t\t *   the JSON returned from the server, which can be manipulated as\n\t\t *   required, with the returned value being that used by DataTables as the\n\t\t *   data source for the table. This supersedes `sAjaxDataProp` from\n\t\t *   DataTables 1.9-.\n\t\t *\n\t\t * * `success` - Should not be overridden it is used internally in\n\t\t *   DataTables. To manipulate / transform the data returned by the server\n\t\t *   use `ajax.dataSrc`, or use `ajax` as a function (see below).\n\t\t *\n\t\t * `function`\n\t\t * ----------\n\t\t *\n\t\t * As a function, making the Ajax call is left up to yourself allowing\n\t\t * complete control of the Ajax request. Indeed, if desired, a method other\n\t\t * than Ajax could be used to obtain the required data, such as Web storage\n\t\t * or an AIR database.\n\t\t *\n\t\t * The function is given four parameters and no return is required. The\n\t\t * parameters are:\n\t\t *\n\t\t * 1. _object_ - Data to send to the server\n\t\t * 2. _function_ - Callback function that must be executed when the required\n\t\t *    data has been obtained. That data should be passed into the callback\n\t\t *    as the only parameter\n\t\t * 3. _object_ - DataTables settings object for the table\n\t\t *\n\t\t * Note that this supersedes `fnServerData` from DataTables 1.9-.\n\t\t *\n\t\t *  @type string|object|function\n\t\t *  @default null\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.ajax\n\t\t *  @since 1.10.0\n\t\t *\n\t\t * @example\n\t\t *   // Get JSON data from a file via Ajax.\n\t\t *   // Note DataTables expects data in the form `{ data: [ ...data... ] }` by default).\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": \"data.json\"\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Get JSON data from a file via Ajax, using `dataSrc` to change\n\t\t *   // `data` to `tableData` (i.e. `{ tableData: [ ...data... ] }`)\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": {\n\t\t *       \"url\": \"data.json\",\n\t\t *       \"dataSrc\": \"tableData\"\n\t\t *     }\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Get JSON data from a file via Ajax, using `dataSrc` to read data\n\t\t *   // from a plain array rather than an array in an object\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": {\n\t\t *       \"url\": \"data.json\",\n\t\t *       \"dataSrc\": \"\"\n\t\t *     }\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Manipulate the data returned from the server - add a link to data\n\t\t *   // (note this can, should, be done using `render` for the column - this\n\t\t *   // is just a simple example of how the data can be manipulated).\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": {\n\t\t *       \"url\": \"data.json\",\n\t\t *       \"dataSrc\": function ( json ) {\n\t\t *         for ( var i=0, ien=json.length ; i<ien ; i++ ) {\n\t\t *           json[i][0] = '<a href=\"/message/'+json[i][0]+'>View message</a>';\n\t\t *         }\n\t\t *         return json;\n\t\t *       }\n\t\t *     }\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Add data to the request\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": {\n\t\t *       \"url\": \"data.json\",\n\t\t *       \"data\": function ( d ) {\n\t\t *         return {\n\t\t *           \"extra_search\": $('#extra').val()\n\t\t *         };\n\t\t *       }\n\t\t *     }\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Send request as POST\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": {\n\t\t *       \"url\": \"data.json\",\n\t\t *       \"type\": \"POST\"\n\t\t *     }\n\t\t *   } );\n\t\t *\n\t\t * @example\n\t\t *   // Get the data from localStorage (could interface with a form for\n\t\t *   // adding, editing and removing rows).\n\t\t *   $('#example').dataTable( {\n\t\t *     \"ajax\": function (data, callback, settings) {\n\t\t *       callback(\n\t\t *         JSON.parse( localStorage.getItem('dataTablesData') )\n\t\t *       );\n\t\t *     }\n\t\t *   } );\n\t\t */\n\t\t\"ajax\": null,\n\t\n\t\n\t\t/**\n\t\t * This parameter allows you to readily specify the entries in the length drop\n\t\t * down menu that DataTables shows when pagination is enabled. It can be\n\t\t * either a 1D array of options which will be used for both the displayed\n\t\t * option and the value, or a 2D array which will use the array in the first\n\t\t * position as the value, and the array in the second position as the\n\t\t * displayed options (useful for language strings such as 'All').\n\t\t *\n\t\t * Note that the `pageLength` property will be automatically set to the\n\t\t * first value given in this array, unless `pageLength` is also provided.\n\t\t *  @type array\n\t\t *  @default [ 10, 25, 50, 100 ]\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.lengthMenu\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"lengthMenu\": [[10, 25, 50, -1], [10, 25, 50, \"All\"]]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"aLengthMenu\": [ 10, 25, 50, 100 ],\n\t\n\t\n\t\t/**\n\t\t * The `columns` option in the initialisation parameter allows you to define\n\t\t * details about the way individual columns behave. For a full list of\n\t\t * column options that can be set, please see\n\t\t * {@link DataTable.defaults.column}. Note that if you use `columns` to\n\t\t * define your columns, you must have an entry in the array for every single\n\t\t * column that you have in your table (these can be null if you don't which\n\t\t * to specify any options).\n\t\t *  @member\n\t\t *\n\t\t *  @name DataTable.defaults.column\n\t\t */\n\t\t\"aoColumns\": null,\n\t\n\t\t/**\n\t\t * Very similar to `columns`, `columnDefs` allows you to target a specific\n\t\t * column, multiple columns, or all columns, using the `targets` property of\n\t\t * each object in the array. This allows great flexibility when creating\n\t\t * tables, as the `columnDefs` arrays can be of any length, targeting the\n\t\t * columns you specifically want. `columnDefs` may use any of the column\n\t\t * options available: {@link DataTable.defaults.column}, but it _must_\n\t\t * have `targets` defined in each object in the array. Values in the `targets`\n\t\t * array may be:\n\t\t *   <ul>\n\t\t *     <li>a string - class name will be matched on the TH for the column</li>\n\t\t *     <li>0 or a positive integer - column index counting from the left</li>\n\t\t *     <li>a negative integer - column index counting from the right</li>\n\t\t *     <li>the string \"_all\" - all columns (i.e. assign a default)</li>\n\t\t *   </ul>\n\t\t *  @member\n\t\t *\n\t\t *  @name DataTable.defaults.columnDefs\n\t\t */\n\t\t\"aoColumnDefs\": null,\n\t\n\t\n\t\t/**\n\t\t * Basically the same as `search`, this parameter defines the individual column\n\t\t * filtering state at initialisation time. The array must be of the same size\n\t\t * as the number of columns, and each element be an object with the parameters\n\t\t * `search` and `escapeRegex` (the latter is optional). 'null' is also\n\t\t * accepted and the default will be used.\n\t\t *  @type array\n\t\t *  @default []\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.searchCols\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"searchCols\": [\n\t\t *          null,\n\t\t *          { \"search\": \"My filter\" },\n\t\t *          null,\n\t\t *          { \"search\": \"^[0-9]\", \"escapeRegex\": false }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"aoSearchCols\": [],\n\t\n\t\n\t\t/**\n\t\t * An array of CSS classes that should be applied to displayed rows. This\n\t\t * array may be of any length, and DataTables will apply each class\n\t\t * sequentially, looping when required.\n\t\t *  @type array\n\t\t *  @default null <i>Will take the values determined by the `oClasses.stripe*`\n\t\t *    options</i>\n\t\t *\n\t\t *  @dtopt Option\n\t\t *  @name DataTable.defaults.stripeClasses\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stripeClasses\": [ 'strip1', 'strip2', 'strip3' ]\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"asStripeClasses\": null,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable automatic column width calculation. This can be disabled\n\t\t * as an optimisation (it takes some time to calculate the widths) if the\n\t\t * tables widths are passed in using `columns`.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.autoWidth\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"autoWidth\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bAutoWidth\": true,\n\t\n\t\n\t\t/**\n\t\t * Deferred rendering can provide DataTables with a huge speed boost when you\n\t\t * are using an Ajax or JS data source for the table. This option, when set to\n\t\t * true, will cause DataTables to defer the creation of the table elements for\n\t\t * each row until they are needed for a draw - saving a significant amount of\n\t\t * time.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.deferRender\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"ajax\": \"sources/arrays.txt\",\n\t\t *        \"deferRender\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bDeferRender\": false,\n\t\n\t\n\t\t/**\n\t\t * Replace a DataTable which matches the given selector and replace it with\n\t\t * one which has the properties of the new initialisation object passed. If no\n\t\t * table matches the selector, then the new DataTable will be constructed as\n\t\t * per normal.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.destroy\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"srollY\": \"200px\",\n\t\t *        \"paginate\": false\n\t\t *      } );\n\t\t *\n\t\t *      // Some time later....\n\t\t *      $('#example').dataTable( {\n\t\t *        \"filter\": false,\n\t\t *        \"destroy\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bDestroy\": false,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable filtering of data. Filtering in DataTables is \"smart\" in\n\t\t * that it allows the end user to input multiple words (space separated) and\n\t\t * will match a row containing those words, even if not in the order that was\n\t\t * specified (this allow matching across multiple columns). Note that if you\n\t\t * wish to use filtering in DataTables this must remain 'true' - to remove the\n\t\t * default filtering input box and retain filtering abilities, please use\n\t\t * {@link DataTable.defaults.dom}.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.searching\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"searching\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bFilter\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable the table information display. This shows information\n\t\t * about the data that is currently visible on the page, including information\n\t\t * about filtered data if that action is being performed.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.info\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"info\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bInfo\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable jQuery UI ThemeRoller support (required as ThemeRoller requires some\n\t\t * slightly different and additional mark-up from what DataTables has\n\t\t * traditionally used).\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.jQueryUI\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"jQueryUI\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bJQueryUI\": false,\n\t\n\t\n\t\t/**\n\t\t * Allows the end user to select the size of a formatted page from a select\n\t\t * menu (sizes are 10, 25, 50 and 100). Requires pagination (`paginate`).\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.lengthChange\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"lengthChange\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bLengthChange\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable pagination.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.paging\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"paging\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bPaginate\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable the display of a 'processing' indicator when the table is\n\t\t * being processed (e.g. a sort). This is particularly useful for tables with\n\t\t * large amounts of data where it can take a noticeable amount of time to sort\n\t\t * the entries.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.processing\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"processing\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bProcessing\": false,\n\t\n\t\n\t\t/**\n\t\t * Retrieve the DataTables object for the given selector. Note that if the\n\t\t * table has already been initialised, this parameter will cause DataTables\n\t\t * to simply return the object that has already been set up - it will not take\n\t\t * account of any changes you might have made to the initialisation object\n\t\t * passed to DataTables (setting this parameter to true is an acknowledgement\n\t\t * that you understand this). `destroy` can be used to reinitialise a table if\n\t\t * you need.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.retrieve\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      initTable();\n\t\t *      tableActions();\n\t\t *    } );\n\t\t *\n\t\t *    function initTable ()\n\t\t *    {\n\t\t *      return $('#example').dataTable( {\n\t\t *        \"scrollY\": \"200px\",\n\t\t *        \"paginate\": false,\n\t\t *        \"retrieve\": true\n\t\t *      } );\n\t\t *    }\n\t\t *\n\t\t *    function tableActions ()\n\t\t *    {\n\t\t *      var table = initTable();\n\t\t *      // perform API operations with oTable\n\t\t *    }\n\t\t */\n\t\t\"bRetrieve\": false,\n\t\n\t\n\t\t/**\n\t\t * When vertical (y) scrolling is enabled, DataTables will force the height of\n\t\t * the table's viewport to the given height at all times (useful for layout).\n\t\t * However, this can look odd when filtering data down to a small data set,\n\t\t * and the footer is left \"floating\" further down. This parameter (when\n\t\t * enabled) will cause DataTables to collapse the table's viewport down when\n\t\t * the result set will fit within the given Y height.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.scrollCollapse\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"scrollY\": \"200\",\n\t\t *        \"scrollCollapse\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bScrollCollapse\": false,\n\t\n\t\n\t\t/**\n\t\t * Configure DataTables to use server-side processing. Note that the\n\t\t * `ajax` parameter must also be given in order to give DataTables a\n\t\t * source to obtain the required data for each draw.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.serverSide\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"serverSide\": true,\n\t\t *        \"ajax\": \"xhr.php\"\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bServerSide\": false,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable sorting of columns. Sorting of individual columns can be\n\t\t * disabled by the `sortable` option for each column.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.ordering\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"ordering\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bSort\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or display DataTables' ability to sort multiple columns at the\n\t\t * same time (activated by shift-click by the user).\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.orderMulti\n\t\t *\n\t\t *  @example\n\t\t *    // Disable multiple column sorting ability\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"orderMulti\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bSortMulti\": true,\n\t\n\t\n\t\t/**\n\t\t * Allows control over whether DataTables should use the top (true) unique\n\t\t * cell that is found for a single column, or the bottom (false - default).\n\t\t * This is useful when using complex headers.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.orderCellsTop\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"orderCellsTop\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bSortCellsTop\": false,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable the addition of the classes `sorting\\_1`, `sorting\\_2` and\n\t\t * `sorting\\_3` to the columns which are currently being sorted on. This is\n\t\t * presented as a feature switch as it can increase processing time (while\n\t\t * classes are removed and added) so for large data sets you might want to\n\t\t * turn this off.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.orderClasses\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"orderClasses\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bSortClasses\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable state saving. When enabled HTML5 `localStorage` will be\n\t\t * used to save table display information such as pagination information,\n\t\t * display length, filtering and sorting. As such when the end user reloads\n\t\t * the page the display display will match what thy had previously set up.\n\t\t *\n\t\t * Due to the use of `localStorage` the default state saving is not supported\n\t\t * in IE6 or 7. If state saving is required in those browsers, use\n\t\t * `stateSaveCallback` to provide a storage solution such as cookies.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.stateSave\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function () {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"bStateSave\": false,\n\t\n\t\n\t\t/**\n\t\t * This function is called when a TR element is created (and all TD child\n\t\t * elements have been inserted), or registered if using a DOM source, allowing\n\t\t * manipulation of the TR element (adding classes etc).\n\t\t *  @type function\n\t\t *  @param {node} row \"TR\" element for the current row\n\t\t *  @param {array} data Raw data array for this row\n\t\t *  @param {int} dataIndex The index of this row in the internal aoData array\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.createdRow\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"createdRow\": function( row, data, dataIndex ) {\n\t\t *          // Bold the grade for all 'A' grade browsers\n\t\t *          if ( data[4] == \"A\" )\n\t\t *          {\n\t\t *            $('td:eq(4)', row).html( '<b>A</b>' );\n\t\t *          }\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnCreatedRow\": null,\n\t\n\t\n\t\t/**\n\t\t * This function is called on every 'draw' event, and allows you to\n\t\t * dynamically modify any aspect you want about the created DOM.\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.drawCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"drawCallback\": function( settings ) {\n\t\t *          alert( 'DataTables has redrawn the table' );\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnDrawCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * Identical to fnHeaderCallback() but for the table footer this function\n\t\t * allows you to modify the table footer on every 'draw' event.\n\t\t *  @type function\n\t\t *  @param {node} foot \"TR\" element for the footer\n\t\t *  @param {array} data Full table data (as derived from the original HTML)\n\t\t *  @param {int} start Index for the current display starting point in the\n\t\t *    display array\n\t\t *  @param {int} end Index for the current display ending point in the\n\t\t *    display array\n\t\t *  @param {array int} display Index array to translate the visual position\n\t\t *    to the full data array\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.footerCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"footerCallback\": function( tfoot, data, start, end, display ) {\n\t\t *          tfoot.getElementsByTagName('th')[0].innerHTML = \"Starting index is \"+start;\n\t\t *        }\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"fnFooterCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * When rendering large numbers in the information element for the table\n\t\t * (i.e. \"Showing 1 to 10 of 57 entries\") DataTables will render large numbers\n\t\t * to have a comma separator for the 'thousands' units (e.g. 1 million is\n\t\t * rendered as \"1,000,000\") to help readability for the end user. This\n\t\t * function will override the default method DataTables uses.\n\t\t *  @type function\n\t\t *  @member\n\t\t *  @param {int} toFormat number to be formatted\n\t\t *  @returns {string} formatted string for DataTables to show the number\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.formatNumber\n\t\t *\n\t\t *  @example\n\t\t *    // Format a number using a single quote for the separator (note that\n\t\t *    // this can also be done with the language.thousands option)\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"formatNumber\": function ( toFormat ) {\n\t\t *          return toFormat.toString().replace(\n\t\t *            /\\B(?=(\\d{3})+(?!\\d))/g, \"'\"\n\t\t *          );\n\t\t *        };\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnFormatNumber\": function ( toFormat ) {\n\t\t\treturn toFormat.toString().replace(\n\t\t\t\t/\\B(?=(\\d{3})+(?!\\d))/g,\n\t\t\t\tthis.oLanguage.sThousands\n\t\t\t);\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * This function is called on every 'draw' event, and allows you to\n\t\t * dynamically modify the header row. This can be used to calculate and\n\t\t * display useful information about the table.\n\t\t *  @type function\n\t\t *  @param {node} head \"TR\" element for the header\n\t\t *  @param {array} data Full table data (as derived from the original HTML)\n\t\t *  @param {int} start Index for the current display starting point in the\n\t\t *    display array\n\t\t *  @param {int} end Index for the current display ending point in the\n\t\t *    display array\n\t\t *  @param {array int} display Index array to translate the visual position\n\t\t *    to the full data array\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.headerCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"fheaderCallback\": function( head, data, start, end, display ) {\n\t\t *          head.getElementsByTagName('th')[0].innerHTML = \"Displaying \"+(end-start)+\" records\";\n\t\t *        }\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"fnHeaderCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * The information element can be used to convey information about the current\n\t\t * state of the table. Although the internationalisation options presented by\n\t\t * DataTables are quite capable of dealing with most customisations, there may\n\t\t * be times where you wish to customise the string further. This callback\n\t\t * allows you to do exactly that.\n\t\t *  @type function\n\t\t *  @param {object} oSettings DataTables settings object\n\t\t *  @param {int} start Starting position in data for the draw\n\t\t *  @param {int} end End position in data for the draw\n\t\t *  @param {int} max Total number of rows in the table (regardless of\n\t\t *    filtering)\n\t\t *  @param {int} total Total number of rows in the data set, after filtering\n\t\t *  @param {string} pre The string that DataTables has formatted using it's\n\t\t *    own rules\n\t\t *  @returns {string} The string to be displayed in the information element.\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.infoCallback\n\t\t *\n\t\t *  @example\n\t\t *    $('#example').dataTable( {\n\t\t *      \"infoCallback\": function( settings, start, end, max, total, pre ) {\n\t\t *        return start +\" to \"+ end;\n\t\t *      }\n\t\t *    } );\n\t\t */\n\t\t\"fnInfoCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * Called when the table has been initialised. Normally DataTables will\n\t\t * initialise sequentially and there will be no need for this function,\n\t\t * however, this does not hold true when using external language information\n\t\t * since that is obtained using an async XHR call.\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @param {object} json The JSON object request from the server - only\n\t\t *    present if client-side Ajax sourced data is used\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.initComplete\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"initComplete\": function(settings, json) {\n\t\t *          alert( 'DataTables has finished its initialisation.' );\n\t\t *        }\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"fnInitComplete\": null,\n\t\n\t\n\t\t/**\n\t\t * Called at the very start of each table draw and can be used to cancel the\n\t\t * draw by returning false, any other return (including undefined) results in\n\t\t * the full draw occurring).\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @returns {boolean} False will cancel the draw, anything else (including no\n\t\t *    return) will allow it to complete.\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.preDrawCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"preDrawCallback\": function( settings ) {\n\t\t *          if ( $('#test').val() == 1 ) {\n\t\t *            return false;\n\t\t *          }\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnPreDrawCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * This function allows you to 'post process' each row after it have been\n\t\t * generated for each table draw, but before it is rendered on screen. This\n\t\t * function might be used for setting the row class name etc.\n\t\t *  @type function\n\t\t *  @param {node} row \"TR\" element for the current row\n\t\t *  @param {array} data Raw data array for this row\n\t\t *  @param {int} displayIndex The display index for the current table draw\n\t\t *  @param {int} displayIndexFull The index of the data in the full list of\n\t\t *    rows (after filtering)\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.rowCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"rowCallback\": function( row, data, displayIndex, displayIndexFull ) {\n\t\t *          // Bold the grade for all 'A' grade browsers\n\t\t *          if ( data[4] == \"A\" ) {\n\t\t *            $('td:eq(4)', row).html( '<b>A</b>' );\n\t\t *          }\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnRowCallback\": null,\n\t\n\t\n\t\t/**\n\t\t * __Deprecated__ The functionality provided by this parameter has now been\n\t\t * superseded by that provided through `ajax`, which should be used instead.\n\t\t *\n\t\t * This parameter allows you to override the default function which obtains\n\t\t * the data from the server so something more suitable for your application.\n\t\t * For example you could use POST data, or pull information from a Gears or\n\t\t * AIR database.\n\t\t *  @type function\n\t\t *  @member\n\t\t *  @param {string} source HTTP source to obtain the data from (`ajax`)\n\t\t *  @param {array} data A key/value pair object containing the data to send\n\t\t *    to the server\n\t\t *  @param {function} callback to be called on completion of the data get\n\t\t *    process that will draw the data on the page.\n\t\t *  @param {object} settings DataTables settings object\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.serverData\n\t\t *\n\t\t *  @deprecated 1.10. Please use `ajax` for this functionality now.\n\t\t */\n\t\t\"fnServerData\": null,\n\t\n\t\n\t\t/**\n\t\t * __Deprecated__ The functionality provided by this parameter has now been\n\t\t * superseded by that provided through `ajax`, which should be used instead.\n\t\t *\n\t\t *  It is often useful to send extra data to the server when making an Ajax\n\t\t * request - for example custom filtering information, and this callback\n\t\t * function makes it trivial to send extra information to the server. The\n\t\t * passed in parameter is the data set that has been constructed by\n\t\t * DataTables, and you can add to this or modify it as you require.\n\t\t *  @type function\n\t\t *  @param {array} data Data array (array of objects which are name/value\n\t\t *    pairs) that has been constructed by DataTables and will be sent to the\n\t\t *    server. In the case of Ajax sourced data with server-side processing\n\t\t *    this will be an empty array, for server-side processing there will be a\n\t\t *    significant number of parameters!\n\t\t *  @returns {undefined} Ensure that you modify the data array passed in,\n\t\t *    as this is passed by reference.\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.serverParams\n\t\t *\n\t\t *  @deprecated 1.10. Please use `ajax` for this functionality now.\n\t\t */\n\t\t\"fnServerParams\": null,\n\t\n\t\n\t\t/**\n\t\t * Load the table state. With this function you can define from where, and how, the\n\t\t * state of a table is loaded. By default DataTables will load from `localStorage`\n\t\t * but you might wish to use a server-side database or cookies.\n\t\t *  @type function\n\t\t *  @member\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @return {object} The DataTables state object to be loaded\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.stateLoadCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateLoadCallback\": function (settings) {\n\t\t *          var o;\n\t\t *\n\t\t *          // Send an Ajax request to the server to get the data. Note that\n\t\t *          // this is a synchronous request.\n\t\t *          $.ajax( {\n\t\t *            \"url\": \"/state_load\",\n\t\t *            \"async\": false,\n\t\t *            \"dataType\": \"json\",\n\t\t *            \"success\": function (json) {\n\t\t *              o = json;\n\t\t *            }\n\t\t *          } );\n\t\t *\n\t\t *          return o;\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnStateLoadCallback\": function ( settings ) {\n\t\t\ttry {\n\t\t\t\treturn JSON.parse(\n\t\t\t\t\t(settings.iStateDuration === -1 ? sessionStorage : localStorage).getItem(\n\t\t\t\t\t\t'DataTables_'+settings.sInstance+'_'+location.pathname\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t} catch (e) {}\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Callback which allows modification of the saved state prior to loading that state.\n\t\t * This callback is called when the table is loading state from the stored data, but\n\t\t * prior to the settings object being modified by the saved state. Note that for\n\t\t * plug-in authors, you should use the `stateLoadParams` event to load parameters for\n\t\t * a plug-in.\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @param {object} data The state object that is to be loaded\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.stateLoadParams\n\t\t *\n\t\t *  @example\n\t\t *    // Remove a saved filter, so filtering is never loaded\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateLoadParams\": function (settings, data) {\n\t\t *          data.oSearch.sSearch = \"\";\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Disallow state loading by returning false\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateLoadParams\": function (settings, data) {\n\t\t *          return false;\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnStateLoadParams\": null,\n\t\n\t\n\t\t/**\n\t\t * Callback that is called when the state has been loaded from the state saving method\n\t\t * and the DataTables settings object has been modified as a result of the loaded state.\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @param {object} data The state object that was loaded\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.stateLoaded\n\t\t *\n\t\t *  @example\n\t\t *    // Show an alert with the filtering value that was saved\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateLoaded\": function (settings, data) {\n\t\t *          alert( 'Saved filter was: '+data.oSearch.sSearch );\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnStateLoaded\": null,\n\t\n\t\n\t\t/**\n\t\t * Save the table state. This function allows you to define where and how the state\n\t\t * information for the table is stored By default DataTables will use `localStorage`\n\t\t * but you might wish to use a server-side database or cookies.\n\t\t *  @type function\n\t\t *  @member\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @param {object} data The state object to be saved\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.stateSaveCallback\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateSaveCallback\": function (settings, data) {\n\t\t *          // Send an Ajax request to the server with the state object\n\t\t *          $.ajax( {\n\t\t *            \"url\": \"/state_save\",\n\t\t *            \"data\": data,\n\t\t *            \"dataType\": \"json\",\n\t\t *            \"method\": \"POST\"\n\t\t *            \"success\": function () {}\n\t\t *          } );\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnStateSaveCallback\": function ( settings, data ) {\n\t\t\ttry {\n\t\t\t\t(settings.iStateDuration === -1 ? sessionStorage : localStorage).setItem(\n\t\t\t\t\t'DataTables_'+settings.sInstance+'_'+location.pathname,\n\t\t\t\t\tJSON.stringify( data )\n\t\t\t\t);\n\t\t\t} catch (e) {}\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Callback which allows modification of the state to be saved. Called when the table\n\t\t * has changed state a new state save is required. This method allows modification of\n\t\t * the state saving object prior to actually doing the save, including addition or\n\t\t * other state properties or modification. Note that for plug-in authors, you should\n\t\t * use the `stateSaveParams` event to save parameters for a plug-in.\n\t\t *  @type function\n\t\t *  @param {object} settings DataTables settings object\n\t\t *  @param {object} data The state object to be saved\n\t\t *\n\t\t *  @dtopt Callbacks\n\t\t *  @name DataTable.defaults.stateSaveParams\n\t\t *\n\t\t *  @example\n\t\t *    // Remove a saved filter, so filtering is never saved\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateSave\": true,\n\t\t *        \"stateSaveParams\": function (settings, data) {\n\t\t *          data.oSearch.sSearch = \"\";\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"fnStateSaveParams\": null,\n\t\n\t\n\t\t/**\n\t\t * Duration for which the saved state information is considered valid. After this period\n\t\t * has elapsed the state will be returned to the default.\n\t\t * Value is given in seconds.\n\t\t *  @type int\n\t\t *  @default 7200 <i>(2 hours)</i>\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.stateDuration\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"stateDuration\": 60*60*24; // 1 day\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"iStateDuration\": 7200,\n\t\n\t\n\t\t/**\n\t\t * When enabled DataTables will not make a request to the server for the first\n\t\t * page draw - rather it will use the data already on the page (no sorting etc\n\t\t * will be applied to it), thus saving on an XHR at load time. `deferLoading`\n\t\t * is used to indicate that deferred loading is required, but it is also used\n\t\t * to tell DataTables how many records there are in the full table (allowing\n\t\t * the information element and pagination to be displayed correctly). In the case\n\t\t * where a filtering is applied to the table on initial load, this can be\n\t\t * indicated by giving the parameter as an array, where the first element is\n\t\t * the number of records available after filtering and the second element is the\n\t\t * number of records without filtering (allowing the table information element\n\t\t * to be shown correctly).\n\t\t *  @type int | array\n\t\t *  @default null\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.deferLoading\n\t\t *\n\t\t *  @example\n\t\t *    // 57 records available in the table, no filtering applied\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"serverSide\": true,\n\t\t *        \"ajax\": \"scripts/server_processing.php\",\n\t\t *        \"deferLoading\": 57\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // 57 records after filtering, 100 without filtering (an initial filter applied)\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"serverSide\": true,\n\t\t *        \"ajax\": \"scripts/server_processing.php\",\n\t\t *        \"deferLoading\": [ 57, 100 ],\n\t\t *        \"search\": {\n\t\t *          \"search\": \"my_filter\"\n\t\t *        }\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"iDeferLoading\": null,\n\t\n\t\n\t\t/**\n\t\t * Number of rows to display on a single page when using pagination. If\n\t\t * feature enabled (`lengthChange`) then the end user will be able to override\n\t\t * this to a custom setting using a pop-up menu.\n\t\t *  @type int\n\t\t *  @default 10\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.pageLength\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"pageLength\": 50\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"iDisplayLength\": 10,\n\t\n\t\n\t\t/**\n\t\t * Define the starting point for data display when using DataTables with\n\t\t * pagination. Note that this parameter is the number of records, rather than\n\t\t * the page number, so if you have 10 records per page and want to start on\n\t\t * the third page, it should be \"20\".\n\t\t *  @type int\n\t\t *  @default 0\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.displayStart\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"displayStart\": 20\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"iDisplayStart\": 0,\n\t\n\t\n\t\t/**\n\t\t * By default DataTables allows keyboard navigation of the table (sorting, paging,\n\t\t * and filtering) by adding a `tabindex` attribute to the required elements. This\n\t\t * allows you to tab through the controls and press the enter key to activate them.\n\t\t * The tabindex is default 0, meaning that the tab follows the flow of the document.\n\t\t * You can overrule this using this parameter if you wish. Use a value of -1 to\n\t\t * disable built-in keyboard navigation.\n\t\t *  @type int\n\t\t *  @default 0\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.tabIndex\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"tabIndex\": 1\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"iTabIndex\": 0,\n\t\n\t\n\t\t/**\n\t\t * Classes that DataTables assigns to the various components and features\n\t\t * that it adds to the HTML table. This allows classes to be configured\n\t\t * during initialisation in addition to through the static\n\t\t * {@link DataTable.ext.oStdClasses} object).\n\t\t *  @namespace\n\t\t *  @name DataTable.defaults.classes\n\t\t */\n\t\t\"oClasses\": {},\n\t\n\t\n\t\t/**\n\t\t * All strings that DataTables uses in the user interface that it creates\n\t\t * are defined in this object, allowing you to modified them individually or\n\t\t * completely replace them all as required.\n\t\t *  @namespace\n\t\t *  @name DataTable.defaults.language\n\t\t */\n\t\t\"oLanguage\": {\n\t\t\t/**\n\t\t\t * Strings that are used for WAI-ARIA labels and controls only (these are not\n\t\t\t * actually visible on the page, but will be read by screenreaders, and thus\n\t\t\t * must be internationalised as well).\n\t\t\t *  @namespace\n\t\t\t *  @name DataTable.defaults.language.aria\n\t\t\t */\n\t\t\t\"oAria\": {\n\t\t\t\t/**\n\t\t\t\t * ARIA label that is added to the table headers when the column may be\n\t\t\t\t * sorted ascending by activing the column (click or return when focused).\n\t\t\t\t * Note that the column header is prefixed to this string.\n\t\t\t\t *  @type string\n\t\t\t\t *  @default : activate to sort column ascending\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.aria.sortAscending\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"aria\": {\n\t\t\t\t *            \"sortAscending\": \" - click/return to sort ascending\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sSortAscending\": \": activate to sort column ascending\",\n\t\n\t\t\t\t/**\n\t\t\t\t * ARIA label that is added to the table headers when the column may be\n\t\t\t\t * sorted descending by activing the column (click or return when focused).\n\t\t\t\t * Note that the column header is prefixed to this string.\n\t\t\t\t *  @type string\n\t\t\t\t *  @default : activate to sort column ascending\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.aria.sortDescending\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"aria\": {\n\t\t\t\t *            \"sortDescending\": \" - click/return to sort descending\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sSortDescending\": \": activate to sort column descending\"\n\t\t\t},\n\t\n\t\t\t/**\n\t\t\t * Pagination string used by DataTables for the built-in pagination\n\t\t\t * control types.\n\t\t\t *  @namespace\n\t\t\t *  @name DataTable.defaults.language.paginate\n\t\t\t */\n\t\t\t\"oPaginate\": {\n\t\t\t\t/**\n\t\t\t\t * Text to use when using the 'full_numbers' type of pagination for the\n\t\t\t\t * button to take the user to the first page.\n\t\t\t\t *  @type string\n\t\t\t\t *  @default First\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.paginate.first\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"paginate\": {\n\t\t\t\t *            \"first\": \"First page\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sFirst\": \"First\",\n\t\n\t\n\t\t\t\t/**\n\t\t\t\t * Text to use when using the 'full_numbers' type of pagination for the\n\t\t\t\t * button to take the user to the last page.\n\t\t\t\t *  @type string\n\t\t\t\t *  @default Last\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.paginate.last\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"paginate\": {\n\t\t\t\t *            \"last\": \"Last page\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sLast\": \"Last\",\n\t\n\t\n\t\t\t\t/**\n\t\t\t\t * Text to use for the 'next' pagination button (to take the user to the\n\t\t\t\t * next page).\n\t\t\t\t *  @type string\n\t\t\t\t *  @default Next\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.paginate.next\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"paginate\": {\n\t\t\t\t *            \"next\": \"Next page\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sNext\": \"Next\",\n\t\n\t\n\t\t\t\t/**\n\t\t\t\t * Text to use for the 'previous' pagination button (to take the user to\n\t\t\t\t * the previous page).\n\t\t\t\t *  @type string\n\t\t\t\t *  @default Previous\n\t\t\t\t *\n\t\t\t\t *  @dtopt Language\n\t\t\t\t *  @name DataTable.defaults.language.paginate.previous\n\t\t\t\t *\n\t\t\t\t *  @example\n\t\t\t\t *    $(document).ready( function() {\n\t\t\t\t *      $('#example').dataTable( {\n\t\t\t\t *        \"language\": {\n\t\t\t\t *          \"paginate\": {\n\t\t\t\t *            \"previous\": \"Previous page\"\n\t\t\t\t *          }\n\t\t\t\t *        }\n\t\t\t\t *      } );\n\t\t\t\t *    } );\n\t\t\t\t */\n\t\t\t\t\"sPrevious\": \"Previous\"\n\t\t\t},\n\t\n\t\t\t/**\n\t\t\t * This string is shown in preference to `zeroRecords` when the table is\n\t\t\t * empty of data (regardless of filtering). Note that this is an optional\n\t\t\t * parameter - if it is not given, the value of `zeroRecords` will be used\n\t\t\t * instead (either the default or given value).\n\t\t\t *  @type string\n\t\t\t *  @default No data available in table\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.emptyTable\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"emptyTable\": \"No data available in table\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sEmptyTable\": \"No data available in table\",\n\t\n\t\n\t\t\t/**\n\t\t\t * This string gives information to the end user about the information\n\t\t\t * that is current on display on the page. The following tokens can be\n\t\t\t * used in the string and will be dynamically replaced as the table\n\t\t\t * display updates. This tokens can be placed anywhere in the string, or\n\t\t\t * removed as needed by the language requires:\n\t\t\t *\n\t\t\t * * `\\_START\\_` - Display index of the first record on the current page\n\t\t\t * * `\\_END\\_` - Display index of the last record on the current page\n\t\t\t * * `\\_TOTAL\\_` - Number of records in the table after filtering\n\t\t\t * * `\\_MAX\\_` - Number of records in the table without filtering\n\t\t\t * * `\\_PAGE\\_` - Current page number\n\t\t\t * * `\\_PAGES\\_` - Total number of pages of data in the table\n\t\t\t *\n\t\t\t *  @type string\n\t\t\t *  @default Showing _START_ to _END_ of _TOTAL_ entries\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.info\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"info\": \"Showing page _PAGE_ of _PAGES_\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sInfo\": \"Showing _START_ to _END_ of _TOTAL_ entries\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Display information string for when the table is empty. Typically the\n\t\t\t * format of this string should match `info`.\n\t\t\t *  @type string\n\t\t\t *  @default Showing 0 to 0 of 0 entries\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.infoEmpty\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"infoEmpty\": \"No entries to show\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sInfoEmpty\": \"Showing 0 to 0 of 0 entries\",\n\t\n\t\n\t\t\t/**\n\t\t\t * When a user filters the information in a table, this string is appended\n\t\t\t * to the information (`info`) to give an idea of how strong the filtering\n\t\t\t * is. The variable _MAX_ is dynamically updated.\n\t\t\t *  @type string\n\t\t\t *  @default (filtered from _MAX_ total entries)\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.infoFiltered\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"infoFiltered\": \" - filtering from _MAX_ records\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sInfoFiltered\": \"(filtered from _MAX_ total entries)\",\n\t\n\t\n\t\t\t/**\n\t\t\t * If can be useful to append extra information to the info string at times,\n\t\t\t * and this variable does exactly that. This information will be appended to\n\t\t\t * the `info` (`infoEmpty` and `infoFiltered` in whatever combination they are\n\t\t\t * being used) at all times.\n\t\t\t *  @type string\n\t\t\t *  @default <i>Empty string</i>\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.infoPostFix\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"infoPostFix\": \"All records shown are derived from real information.\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sInfoPostFix\": \"\",\n\t\n\t\n\t\t\t/**\n\t\t\t * This decimal place operator is a little different from the other\n\t\t\t * language options since DataTables doesn't output floating point\n\t\t\t * numbers, so it won't ever use this for display of a number. Rather,\n\t\t\t * what this parameter does is modify the sort methods of the table so\n\t\t\t * that numbers which are in a format which has a character other than\n\t\t\t * a period (`.`) as a decimal place will be sorted numerically.\n\t\t\t *\n\t\t\t * Note that numbers with different decimal places cannot be shown in\n\t\t\t * the same table and still be sortable, the table must be consistent.\n\t\t\t * However, multiple different tables on the page can use different\n\t\t\t * decimal place characters.\n\t\t\t *  @type string\n\t\t\t *  @default \n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.decimal\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"decimal\": \",\"\n\t\t\t *          \"thousands\": \".\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sDecimal\": \"\",\n\t\n\t\n\t\t\t/**\n\t\t\t * DataTables has a build in number formatter (`formatNumber`) which is\n\t\t\t * used to format large numbers that are used in the table information.\n\t\t\t * By default a comma is used, but this can be trivially changed to any\n\t\t\t * character you wish with this parameter.\n\t\t\t *  @type string\n\t\t\t *  @default ,\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.thousands\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"thousands\": \"'\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sThousands\": \",\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Detail the action that will be taken when the drop down menu for the\n\t\t\t * pagination length option is changed. The '_MENU_' variable is replaced\n\t\t\t * with a default select list of 10, 25, 50 and 100, and can be replaced\n\t\t\t * with a custom select box if required.\n\t\t\t *  @type string\n\t\t\t *  @default Show _MENU_ entries\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.lengthMenu\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Language change only\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"lengthMenu\": \"Display _MENU_ records\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Language and options change\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"lengthMenu\": 'Display <select>'+\n\t\t\t *            '<option value=\"10\">10</option>'+\n\t\t\t *            '<option value=\"20\">20</option>'+\n\t\t\t *            '<option value=\"30\">30</option>'+\n\t\t\t *            '<option value=\"40\">40</option>'+\n\t\t\t *            '<option value=\"50\">50</option>'+\n\t\t\t *            '<option value=\"-1\">All</option>'+\n\t\t\t *            '</select> records'\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sLengthMenu\": \"Show _MENU_ entries\",\n\t\n\t\n\t\t\t/**\n\t\t\t * When using Ajax sourced data and during the first draw when DataTables is\n\t\t\t * gathering the data, this message is shown in an empty row in the table to\n\t\t\t * indicate to the end user the the data is being loaded. Note that this\n\t\t\t * parameter is not used when loading data by server-side processing, just\n\t\t\t * Ajax sourced data with client-side processing.\n\t\t\t *  @type string\n\t\t\t *  @default Loading...\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.loadingRecords\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"loadingRecords\": \"Please wait - loading...\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sLoadingRecords\": \"Loading...\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Text which is displayed when the table is processing a user action\n\t\t\t * (usually a sort command or similar).\n\t\t\t *  @type string\n\t\t\t *  @default Processing...\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.processing\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"processing\": \"DataTables is currently busy\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sProcessing\": \"Processing...\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Details the actions that will be taken when the user types into the\n\t\t\t * filtering input text box. The variable \"_INPUT_\", if used in the string,\n\t\t\t * is replaced with the HTML text box for the filtering input allowing\n\t\t\t * control over where it appears in the string. If \"_INPUT_\" is not given\n\t\t\t * then the input box is appended to the string automatically.\n\t\t\t *  @type string\n\t\t\t *  @default Search:\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.search\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Input text box will be appended at the end automatically\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"search\": \"Filter records:\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Specify where the filter should appear\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"search\": \"Apply filter _INPUT_ to table\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sSearch\": \"Search:\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Assign a `placeholder` attribute to the search `input` element\n\t\t\t *  @type string\n\t\t\t *  @default \n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.searchPlaceholder\n\t\t\t */\n\t\t\t\"sSearchPlaceholder\": \"\",\n\t\n\t\n\t\t\t/**\n\t\t\t * All of the language information can be stored in a file on the\n\t\t\t * server-side, which DataTables will look up if this parameter is passed.\n\t\t\t * It must store the URL of the language file, which is in a JSON format,\n\t\t\t * and the object has the same properties as the oLanguage object in the\n\t\t\t * initialiser object (i.e. the above parameters). Please refer to one of\n\t\t\t * the example language files to see how this works in action.\n\t\t\t *  @type string\n\t\t\t *  @default <i>Empty string - i.e. disabled</i>\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.url\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"url\": \"http://www.sprymedia.co.uk/dataTables/lang.txt\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sUrl\": \"\",\n\t\n\t\n\t\t\t/**\n\t\t\t * Text shown inside the table records when the is no information to be\n\t\t\t * displayed after filtering. `emptyTable` is shown when there is simply no\n\t\t\t * information in the table at all (regardless of filtering).\n\t\t\t *  @type string\n\t\t\t *  @default No matching records found\n\t\t\t *\n\t\t\t *  @dtopt Language\n\t\t\t *  @name DataTable.defaults.language.zeroRecords\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $(document).ready( function() {\n\t\t\t *      $('#example').dataTable( {\n\t\t\t *        \"language\": {\n\t\t\t *          \"zeroRecords\": \"No records to display\"\n\t\t\t *        }\n\t\t\t *      } );\n\t\t\t *    } );\n\t\t\t */\n\t\t\t\"sZeroRecords\": \"No matching records found\"\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * This parameter allows you to have define the global filtering state at\n\t\t * initialisation time. As an object the `search` parameter must be\n\t\t * defined, but all other parameters are optional. When `regex` is true,\n\t\t * the search string will be treated as a regular expression, when false\n\t\t * (default) it will be treated as a straight string. When `smart`\n\t\t * DataTables will use it's smart filtering methods (to word match at\n\t\t * any point in the data), when false this will not be done.\n\t\t *  @namespace\n\t\t *  @extends DataTable.models.oSearch\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.search\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"search\": {\"search\": \"Initial search\"}\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"oSearch\": $.extend( {}, DataTable.models.oSearch ),\n\t\n\t\n\t\t/**\n\t\t * __Deprecated__ The functionality provided by this parameter has now been\n\t\t * superseded by that provided through `ajax`, which should be used instead.\n\t\t *\n\t\t * By default DataTables will look for the property `data` (or `aaData` for\n\t\t * compatibility with DataTables 1.9-) when obtaining data from an Ajax\n\t\t * source or for server-side processing - this parameter allows that\n\t\t * property to be changed. You can use Javascript dotted object notation to\n\t\t * get a data source for multiple levels of nesting.\n\t\t *  @type string\n\t\t *  @default data\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.ajaxDataProp\n\t\t *\n\t\t *  @deprecated 1.10. Please use `ajax` for this functionality now.\n\t\t */\n\t\t\"sAjaxDataProp\": \"data\",\n\t\n\t\n\t\t/**\n\t\t * __Deprecated__ The functionality provided by this parameter has now been\n\t\t * superseded by that provided through `ajax`, which should be used instead.\n\t\t *\n\t\t * You can instruct DataTables to load data from an external\n\t\t * source using this parameter (use aData if you want to pass data in you\n\t\t * already have). Simply provide a url a JSON object can be obtained from.\n\t\t *  @type string\n\t\t *  @default null\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.ajaxSource\n\t\t *\n\t\t *  @deprecated 1.10. Please use `ajax` for this functionality now.\n\t\t */\n\t\t\"sAjaxSource\": null,\n\t\n\t\n\t\t/**\n\t\t * This initialisation variable allows you to specify exactly where in the\n\t\t * DOM you want DataTables to inject the various controls it adds to the page\n\t\t * (for example you might want the pagination controls at the top of the\n\t\t * table). DIV elements (with or without a custom class) can also be added to\n\t\t * aid styling. The follow syntax is used:\n\t\t *   <ul>\n\t\t *     <li>The following options are allowed:\n\t\t *       <ul>\n\t\t *         <li>'l' - Length changing</li>\n\t\t *         <li>'f' - Filtering input</li>\n\t\t *         <li>'t' - The table!</li>\n\t\t *         <li>'i' - Information</li>\n\t\t *         <li>'p' - Pagination</li>\n\t\t *         <li>'r' - pRocessing</li>\n\t\t *       </ul>\n\t\t *     </li>\n\t\t *     <li>The following constants are allowed:\n\t\t *       <ul>\n\t\t *         <li>'H' - jQueryUI theme \"header\" classes ('fg-toolbar ui-widget-header ui-corner-tl ui-corner-tr ui-helper-clearfix')</li>\n\t\t *         <li>'F' - jQueryUI theme \"footer\" classes ('fg-toolbar ui-widget-header ui-corner-bl ui-corner-br ui-helper-clearfix')</li>\n\t\t *       </ul>\n\t\t *     </li>\n\t\t *     <li>The following syntax is expected:\n\t\t *       <ul>\n\t\t *         <li>'&lt;' and '&gt;' - div elements</li>\n\t\t *         <li>'&lt;\"class\" and '&gt;' - div with a class</li>\n\t\t *         <li>'&lt;\"#id\" and '&gt;' - div with an ID</li>\n\t\t *       </ul>\n\t\t *     </li>\n\t\t *     <li>Examples:\n\t\t *       <ul>\n\t\t *         <li>'&lt;\"wrapper\"flipt&gt;'</li>\n\t\t *         <li>'&lt;lf&lt;t&gt;ip&gt;'</li>\n\t\t *       </ul>\n\t\t *     </li>\n\t\t *   </ul>\n\t\t *  @type string\n\t\t *  @default lfrtip <i>(when `jQueryUI` is false)</i> <b>or</b>\n\t\t *    <\"H\"lfr>t<\"F\"ip> <i>(when `jQueryUI` is true)</i>\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.dom\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"dom\": '&lt;\"top\"i&gt;rt&lt;\"bottom\"flp&gt;&lt;\"clear\"&gt;'\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sDom\": \"lfrtip\",\n\t\n\t\n\t\t/**\n\t\t * Search delay option. This will throttle full table searches that use the\n\t\t * DataTables provided search input element (it does not effect calls to\n\t\t * `dt-api search()`, providing a delay before the search is made.\n\t\t *  @type integer\n\t\t *  @default 0\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.searchDelay\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"searchDelay\": 200\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"searchDelay\": null,\n\t\n\t\n\t\t/**\n\t\t * DataTables features four different built-in options for the buttons to\n\t\t * display for pagination control:\n\t\t *\n\t\t * * `simple` - 'Previous' and 'Next' buttons only\n\t\t * * 'simple_numbers` - 'Previous' and 'Next' buttons, plus page numbers\n\t\t * * `full` - 'First', 'Previous', 'Next' and 'Last' buttons\n\t\t * * `full_numbers` - 'First', 'Previous', 'Next' and 'Last' buttons, plus\n\t\t *   page numbers\n\t\t *  \n\t\t * Further methods can be added using {@link DataTable.ext.oPagination}.\n\t\t *  @type string\n\t\t *  @default simple_numbers\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.pagingType\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"pagingType\": \"full_numbers\"\n\t\t *      } );\n\t\t *    } )\n\t\t */\n\t\t\"sPaginationType\": \"simple_numbers\",\n\t\n\t\n\t\t/**\n\t\t * Enable horizontal scrolling. When a table is too wide to fit into a\n\t\t * certain layout, or you have a large number of columns in the table, you\n\t\t * can enable x-scrolling to show the table in a viewport, which can be\n\t\t * scrolled. This property can be `true` which will allow the table to\n\t\t * scroll horizontally when needed, or any CSS unit, or a number (in which\n\t\t * case it will be treated as a pixel measurement). Setting as simply `true`\n\t\t * is recommended.\n\t\t *  @type boolean|string\n\t\t *  @default <i>blank string - i.e. disabled</i>\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.scrollX\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"scrollX\": true,\n\t\t *        \"scrollCollapse\": true\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sScrollX\": \"\",\n\t\n\t\n\t\t/**\n\t\t * This property can be used to force a DataTable to use more width than it\n\t\t * might otherwise do when x-scrolling is enabled. For example if you have a\n\t\t * table which requires to be well spaced, this parameter is useful for\n\t\t * \"over-sizing\" the table, and thus forcing scrolling. This property can by\n\t\t * any CSS unit, or a number (in which case it will be treated as a pixel\n\t\t * measurement).\n\t\t *  @type string\n\t\t *  @default <i>blank string - i.e. disabled</i>\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @name DataTable.defaults.scrollXInner\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"scrollX\": \"100%\",\n\t\t *        \"scrollXInner\": \"110%\"\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sScrollXInner\": \"\",\n\t\n\t\n\t\t/**\n\t\t * Enable vertical scrolling. Vertical scrolling will constrain the DataTable\n\t\t * to the given height, and enable scrolling for any data which overflows the\n\t\t * current viewport. This can be used as an alternative to paging to display\n\t\t * a lot of data in a small area (although paging and scrolling can both be\n\t\t * enabled at the same time). This property can be any CSS unit, or a number\n\t\t * (in which case it will be treated as a pixel measurement).\n\t\t *  @type string\n\t\t *  @default <i>blank string - i.e. disabled</i>\n\t\t *\n\t\t *  @dtopt Features\n\t\t *  @name DataTable.defaults.scrollY\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"scrollY\": \"200px\",\n\t\t *        \"paginate\": false\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sScrollY\": \"\",\n\t\n\t\n\t\t/**\n\t\t * __Deprecated__ The functionality provided by this parameter has now been\n\t\t * superseded by that provided through `ajax`, which should be used instead.\n\t\t *\n\t\t * Set the HTTP method that is used to make the Ajax call for server-side\n\t\t * processing or Ajax sourced data.\n\t\t *  @type string\n\t\t *  @default GET\n\t\t *\n\t\t *  @dtopt Options\n\t\t *  @dtopt Server-side\n\t\t *  @name DataTable.defaults.serverMethod\n\t\t *\n\t\t *  @deprecated 1.10. Please use `ajax` for this functionality now.\n\t\t */\n\t\t\"sServerMethod\": \"GET\",\n\t\n\t\n\t\t/**\n\t\t * DataTables makes use of renderers when displaying HTML elements for\n\t\t * a table. These renderers can be added or modified by plug-ins to\n\t\t * generate suitable mark-up for a site. For example the Bootstrap\n\t\t * integration plug-in for DataTables uses a paging button renderer to\n\t\t * display pagination buttons in the mark-up required by Bootstrap.\n\t\t *\n\t\t * For further information about the renderers available see\n\t\t * DataTable.ext.renderer\n\t\t *  @type string|object\n\t\t *  @default null\n\t\t *\n\t\t *  @name DataTable.defaults.renderer\n\t\t *\n\t\t */\n\t\t\"renderer\": null,\n\t\n\t\n\t\t/**\n\t\t * Set the data property name that DataTables should use to get a row's id\n\t\t * to set as the `id` property in the node.\n\t\t *  @type string\n\t\t *  @default DT_RowId\n\t\t *\n\t\t *  @name DataTable.defaults.rowId\n\t\t */\n\t\t\"rowId\": \"DT_RowId\"\n\t};\n\t\n\t_fnHungarianMap( DataTable.defaults );\n\t\n\t\n\t\n\t/*\n\t * Developer note - See note in model.defaults.js about the use of Hungarian\n\t * notation and camel case.\n\t */\n\t\n\t/**\n\t * Column options that can be given to DataTables at initialisation time.\n\t *  @namespace\n\t */\n\tDataTable.defaults.column = {\n\t\t/**\n\t\t * Define which column(s) an order will occur on for this column. This\n\t\t * allows a column's ordering to take multiple columns into account when\n\t\t * doing a sort or use the data from a different column. For example first\n\t\t * name / last name columns make sense to do a multi-column sort over the\n\t\t * two columns.\n\t\t *  @type array|int\n\t\t *  @default null <i>Takes the value of the column index automatically</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.orderData\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"orderData\": [ 0, 1 ], \"targets\": [ 0 ] },\n\t\t *          { \"orderData\": [ 1, 0 ], \"targets\": [ 1 ] },\n\t\t *          { \"orderData\": 2, \"targets\": [ 2 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"orderData\": [ 0, 1 ] },\n\t\t *          { \"orderData\": [ 1, 0 ] },\n\t\t *          { \"orderData\": 2 },\n\t\t *          null,\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"aDataSort\": null,\n\t\t\"iDataSort\": -1,\n\t\n\t\n\t\t/**\n\t\t * You can control the default ordering direction, and even alter the\n\t\t * behaviour of the sort handler (i.e. only allow ascending ordering etc)\n\t\t * using this parameter.\n\t\t *  @type array\n\t\t *  @default [ 'asc', 'desc' ]\n\t\t *\n\t\t *  @name DataTable.defaults.column.orderSequence\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"orderSequence\": [ \"asc\" ], \"targets\": [ 1 ] },\n\t\t *          { \"orderSequence\": [ \"desc\", \"asc\", \"asc\" ], \"targets\": [ 2 ] },\n\t\t *          { \"orderSequence\": [ \"desc\" ], \"targets\": [ 3 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          null,\n\t\t *          { \"orderSequence\": [ \"asc\" ] },\n\t\t *          { \"orderSequence\": [ \"desc\", \"asc\", \"asc\" ] },\n\t\t *          { \"orderSequence\": [ \"desc\" ] },\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"asSorting\": [ 'asc', 'desc' ],\n\t\n\t\n\t\t/**\n\t\t * Enable or disable filtering on the data in this column.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @name DataTable.defaults.column.searchable\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"searchable\": false, \"targets\": [ 0 ] }\n\t\t *        ] } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"searchable\": false },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ] } );\n\t\t *    } );\n\t\t */\n\t\t\"bSearchable\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable ordering on this column.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @name DataTable.defaults.column.orderable\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"orderable\": false, \"targets\": [ 0 ] }\n\t\t *        ] } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"orderable\": false },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ] } );\n\t\t *    } );\n\t\t */\n\t\t\"bSortable\": true,\n\t\n\t\n\t\t/**\n\t\t * Enable or disable the display of this column.\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t *\n\t\t *  @name DataTable.defaults.column.visible\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"visible\": false, \"targets\": [ 0 ] }\n\t\t *        ] } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"visible\": false },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ] } );\n\t\t *    } );\n\t\t */\n\t\t\"bVisible\": true,\n\t\n\t\n\t\t/**\n\t\t * Developer definable function that is called whenever a cell is created (Ajax source,\n\t\t * etc) or processed for input (DOM source). This can be used as a compliment to mRender\n\t\t * allowing you to modify the DOM element (add background colour for example) when the\n\t\t * element is available.\n\t\t *  @type function\n\t\t *  @param {element} td The TD node that has been created\n\t\t *  @param {*} cellData The Data for the cell\n\t\t *  @param {array|object} rowData The data for the whole row\n\t\t *  @param {int} row The row index for the aoData data store\n\t\t *  @param {int} col The column index for aoColumns\n\t\t *\n\t\t *  @name DataTable.defaults.column.createdCell\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [3],\n\t\t *          \"createdCell\": function (td, cellData, rowData, row, col) {\n\t\t *            if ( cellData == \"1.7\" ) {\n\t\t *              $(td).css('color', 'blue')\n\t\t *            }\n\t\t *          }\n\t\t *        } ]\n\t\t *      });\n\t\t *    } );\n\t\t */\n\t\t\"fnCreatedCell\": null,\n\t\n\t\n\t\t/**\n\t\t * This parameter has been replaced by `data` in DataTables to ensure naming\n\t\t * consistency. `dataProp` can still be used, as there is backwards\n\t\t * compatibility in DataTables for this option, but it is strongly\n\t\t * recommended that you use `data` in preference to `dataProp`.\n\t\t *  @name DataTable.defaults.column.dataProp\n\t\t */\n\t\n\t\n\t\t/**\n\t\t * This property can be used to read data from any data source property,\n\t\t * including deeply nested objects / properties. `data` can be given in a\n\t\t * number of different ways which effect its behaviour:\n\t\t *\n\t\t * * `integer` - treated as an array index for the data source. This is the\n\t\t *   default that DataTables uses (incrementally increased for each column).\n\t\t * * `string` - read an object property from the data source. There are\n\t\t *   three 'special' options that can be used in the string to alter how\n\t\t *   DataTables reads the data from the source object:\n\t\t *    * `.` - Dotted Javascript notation. Just as you use a `.` in\n\t\t *      Javascript to read from nested objects, so to can the options\n\t\t *      specified in `data`. For example: `browser.version` or\n\t\t *      `browser.name`. If your object parameter name contains a period, use\n\t\t *      `\\\\` to escape it - i.e. `first\\\\.name`.\n\t\t *    * `[]` - Array notation. DataTables can automatically combine data\n\t\t *      from and array source, joining the data with the characters provided\n\t\t *      between the two brackets. For example: `name[, ]` would provide a\n\t\t *      comma-space separated list from the source array. If no characters\n\t\t *      are provided between the brackets, the original array source is\n\t\t *      returned.\n\t\t *    * `()` - Function notation. Adding `()` to the end of a parameter will\n\t\t *      execute a function of the name given. For example: `browser()` for a\n\t\t *      simple function on the data source, `browser.version()` for a\n\t\t *      function in a nested property or even `browser().version` to get an\n\t\t *      object property if the function called returns an object. Note that\n\t\t *      function notation is recommended for use in `render` rather than\n\t\t *      `data` as it is much simpler to use as a renderer.\n\t\t * * `null` - use the original data source for the row rather than plucking\n\t\t *   data directly from it. This action has effects on two other\n\t\t *   initialisation options:\n\t\t *    * `defaultContent` - When null is given as the `data` option and\n\t\t *      `defaultContent` is specified for the column, the value defined by\n\t\t *      `defaultContent` will be used for the cell.\n\t\t *    * `render` - When null is used for the `data` option and the `render`\n\t\t *      option is specified for the column, the whole data source for the\n\t\t *      row is used for the renderer.\n\t\t * * `function` - the function given will be executed whenever DataTables\n\t\t *   needs to set or get the data for a cell in the column. The function\n\t\t *   takes three parameters:\n\t\t *    * Parameters:\n\t\t *      * `{array|object}` The data source for the row\n\t\t *      * `{string}` The type call data requested - this will be 'set' when\n\t\t *        setting data or 'filter', 'display', 'type', 'sort' or undefined\n\t\t *        when gathering data. Note that when `undefined` is given for the\n\t\t *        type DataTables expects to get the raw data for the object back<\n\t\t *      * `{*}` Data to set when the second parameter is 'set'.\n\t\t *    * Return:\n\t\t *      * The return value from the function is not required when 'set' is\n\t\t *        the type of call, but otherwise the return is what will be used\n\t\t *        for the data requested.\n\t\t *\n\t\t * Note that `data` is a getter and setter option. If you just require\n\t\t * formatting of data for output, you will likely want to use `render` which\n\t\t * is simply a getter and thus simpler to use.\n\t\t *\n\t\t * Note that prior to DataTables 1.9.2 `data` was called `mDataProp`. The\n\t\t * name change reflects the flexibility of this property and is consistent\n\t\t * with the naming of mRender. If 'mDataProp' is given, then it will still\n\t\t * be used by DataTables, as it automatically maps the old name to the new\n\t\t * if required.\n\t\t *\n\t\t *  @type string|int|function|null\n\t\t *  @default null <i>Use automatically calculated column index</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.data\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Read table data from objects\n\t\t *    // JSON structure for each row:\n\t\t *    //   {\n\t\t *    //      \"engine\": {value},\n\t\t *    //      \"browser\": {value},\n\t\t *    //      \"platform\": {value},\n\t\t *    //      \"version\": {value},\n\t\t *    //      \"grade\": {value}\n\t\t *    //   }\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"ajaxSource\": \"sources/objects.txt\",\n\t\t *        \"columns\": [\n\t\t *          { \"data\": \"engine\" },\n\t\t *          { \"data\": \"browser\" },\n\t\t *          { \"data\": \"platform\" },\n\t\t *          { \"data\": \"version\" },\n\t\t *          { \"data\": \"grade\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Read information from deeply nested objects\n\t\t *    // JSON structure for each row:\n\t\t *    //   {\n\t\t *    //      \"engine\": {value},\n\t\t *    //      \"browser\": {value},\n\t\t *    //      \"platform\": {\n\t\t *    //         \"inner\": {value}\n\t\t *    //      },\n\t\t *    //      \"details\": [\n\t\t *    //         {value}, {value}\n\t\t *    //      ]\n\t\t *    //   }\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"ajaxSource\": \"sources/deep.txt\",\n\t\t *        \"columns\": [\n\t\t *          { \"data\": \"engine\" },\n\t\t *          { \"data\": \"browser\" },\n\t\t *          { \"data\": \"platform.inner\" },\n\t\t *          { \"data\": \"platform.details.0\" },\n\t\t *          { \"data\": \"platform.details.1\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `data` as a function to provide different information for\n\t\t *    // sorting, filtering and display. In this case, currency (price)\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": function ( source, type, val ) {\n\t\t *            if (type === 'set') {\n\t\t *              source.price = val;\n\t\t *              // Store the computed dislay and filter values for efficiency\n\t\t *              source.price_display = val==\"\" ? \"\" : \"$\"+numberFormat(val);\n\t\t *              source.price_filter  = val==\"\" ? \"\" : \"$\"+numberFormat(val)+\" \"+val;\n\t\t *              return;\n\t\t *            }\n\t\t *            else if (type === 'display') {\n\t\t *              return source.price_display;\n\t\t *            }\n\t\t *            else if (type === 'filter') {\n\t\t *              return source.price_filter;\n\t\t *            }\n\t\t *            // 'sort', 'type' and undefined all just use the integer\n\t\t *            return source.price;\n\t\t *          }\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using default content\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": null,\n\t\t *          \"defaultContent\": \"Click to edit\"\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using array notation - outputting a list from an array\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": \"name[, ]\"\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t */\n\t\t\"mData\": null,\n\t\n\t\n\t\t/**\n\t\t * This property is the rendering partner to `data` and it is suggested that\n\t\t * when you want to manipulate data for display (including filtering,\n\t\t * sorting etc) without altering the underlying data for the table, use this\n\t\t * property. `render` can be considered to be the the read only companion to\n\t\t * `data` which is read / write (then as such more complex). Like `data`\n\t\t * this option can be given in a number of different ways to effect its\n\t\t * behaviour:\n\t\t *\n\t\t * * `integer` - treated as an array index for the data source. This is the\n\t\t *   default that DataTables uses (incrementally increased for each column).\n\t\t * * `string` - read an object property from the data source. There are\n\t\t *   three 'special' options that can be used in the string to alter how\n\t\t *   DataTables reads the data from the source object:\n\t\t *    * `.` - Dotted Javascript notation. Just as you use a `.` in\n\t\t *      Javascript to read from nested objects, so to can the options\n\t\t *      specified in `data`. For example: `browser.version` or\n\t\t *      `browser.name`. If your object parameter name contains a period, use\n\t\t *      `\\\\` to escape it - i.e. `first\\\\.name`.\n\t\t *    * `[]` - Array notation. DataTables can automatically combine data\n\t\t *      from and array source, joining the data with the characters provided\n\t\t *      between the two brackets. For example: `name[, ]` would provide a\n\t\t *      comma-space separated list from the source array. If no characters\n\t\t *      are provided between the brackets, the original array source is\n\t\t *      returned.\n\t\t *    * `()` - Function notation. Adding `()` to the end of a parameter will\n\t\t *      execute a function of the name given. For example: `browser()` for a\n\t\t *      simple function on the data source, `browser.version()` for a\n\t\t *      function in a nested property or even `browser().version` to get an\n\t\t *      object property if the function called returns an object.\n\t\t * * `object` - use different data for the different data types requested by\n\t\t *   DataTables ('filter', 'display', 'type' or 'sort'). The property names\n\t\t *   of the object is the data type the property refers to and the value can\n\t\t *   defined using an integer, string or function using the same rules as\n\t\t *   `render` normally does. Note that an `_` option _must_ be specified.\n\t\t *   This is the default value to use if you haven't specified a value for\n\t\t *   the data type requested by DataTables.\n\t\t * * `function` - the function given will be executed whenever DataTables\n\t\t *   needs to set or get the data for a cell in the column. The function\n\t\t *   takes three parameters:\n\t\t *    * Parameters:\n\t\t *      * {array|object} The data source for the row (based on `data`)\n\t\t *      * {string} The type call data requested - this will be 'filter',\n\t\t *        'display', 'type' or 'sort'.\n\t\t *      * {array|object} The full data source for the row (not based on\n\t\t *        `data`)\n\t\t *    * Return:\n\t\t *      * The return value from the function is what will be used for the\n\t\t *        data requested.\n\t\t *\n\t\t *  @type string|int|function|object|null\n\t\t *  @default null Use the data source value.\n\t\t *\n\t\t *  @name DataTable.defaults.column.render\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Create a comma separated list from an array of objects\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"ajaxSource\": \"sources/deep.txt\",\n\t\t *        \"columns\": [\n\t\t *          { \"data\": \"engine\" },\n\t\t *          { \"data\": \"browser\" },\n\t\t *          {\n\t\t *            \"data\": \"platform\",\n\t\t *            \"render\": \"[, ].name\"\n\t\t *          }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Execute a function to obtain data\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": null, // Use the full data source object for the renderer's source\n\t\t *          \"render\": \"browserName()\"\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // As an object, extracting different data for the different types\n\t\t *    // This would be used with a data source such as:\n\t\t *    //   { \"phone\": 5552368, \"phone_filter\": \"5552368 555-2368\", \"phone_display\": \"555-2368\" }\n\t\t *    // Here the `phone` integer is used for sorting and type detection, while `phone_filter`\n\t\t *    // (which has both forms) is used for filtering for if a user inputs either format, while\n\t\t *    // the formatted phone number is the one that is shown in the table.\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": null, // Use the full data source object for the renderer's source\n\t\t *          \"render\": {\n\t\t *            \"_\": \"phone\",\n\t\t *            \"filter\": \"phone_filter\",\n\t\t *            \"display\": \"phone_display\"\n\t\t *          }\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Use as a function to create a link from the data source\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"data\": \"download_link\",\n\t\t *          \"render\": function ( data, type, full ) {\n\t\t *            return '<a href=\"'+data+'\">Download</a>';\n\t\t *          }\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"mRender\": null,\n\t\n\t\n\t\t/**\n\t\t * Change the cell type created for the column - either TD cells or TH cells. This\n\t\t * can be useful as TH cells have semantic meaning in the table body, allowing them\n\t\t * to act as a header for a row (you may wish to add scope='row' to the TH elements).\n\t\t *  @type string\n\t\t *  @default td\n\t\t *\n\t\t *  @name DataTable.defaults.column.cellType\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Make the first column use TH cells\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [ {\n\t\t *          \"targets\": [ 0 ],\n\t\t *          \"cellType\": \"th\"\n\t\t *        } ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sCellType\": \"td\",\n\t\n\t\n\t\t/**\n\t\t * Class to give to each cell in this column.\n\t\t *  @type string\n\t\t *  @default <i>Empty string</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.class\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"class\": \"my_class\", \"targets\": [ 0 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"class\": \"my_class\" },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sClass\": \"\",\n\t\n\t\t/**\n\t\t * When DataTables calculates the column widths to assign to each column,\n\t\t * it finds the longest string in each column and then constructs a\n\t\t * temporary table and reads the widths from that. The problem with this\n\t\t * is that \"mmm\" is much wider then \"iiii\", but the latter is a longer\n\t\t * string - thus the calculation can go wrong (doing it properly and putting\n\t\t * it into an DOM object and measuring that is horribly(!) slow). Thus as\n\t\t * a \"work around\" we provide this option. It will append its value to the\n\t\t * text that is found to be the longest string for the column - i.e. padding.\n\t\t * Generally you shouldn't need this!\n\t\t *  @type string\n\t\t *  @default <i>Empty string<i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.contentPadding\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          {\n\t\t *            \"contentPadding\": \"mmm\"\n\t\t *          }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sContentPadding\": \"\",\n\t\n\t\n\t\t/**\n\t\t * Allows a default value to be given for a column's data, and will be used\n\t\t * whenever a null data source is encountered (this can be because `data`\n\t\t * is set to null, or because the data source itself is null).\n\t\t *  @type string\n\t\t *  @default null\n\t\t *\n\t\t *  @name DataTable.defaults.column.defaultContent\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          {\n\t\t *            \"data\": null,\n\t\t *            \"defaultContent\": \"Edit\",\n\t\t *            \"targets\": [ -1 ]\n\t\t *          }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          {\n\t\t *            \"data\": null,\n\t\t *            \"defaultContent\": \"Edit\"\n\t\t *          }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sDefaultContent\": null,\n\t\n\t\n\t\t/**\n\t\t * This parameter is only used in DataTables' server-side processing. It can\n\t\t * be exceptionally useful to know what columns are being displayed on the\n\t\t * client side, and to map these to database fields. When defined, the names\n\t\t * also allow DataTables to reorder information from the server if it comes\n\t\t * back in an unexpected order (i.e. if you switch your columns around on the\n\t\t * client-side, your server-side code does not also need updating).\n\t\t *  @type string\n\t\t *  @default <i>Empty string</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.name\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"name\": \"engine\", \"targets\": [ 0 ] },\n\t\t *          { \"name\": \"browser\", \"targets\": [ 1 ] },\n\t\t *          { \"name\": \"platform\", \"targets\": [ 2 ] },\n\t\t *          { \"name\": \"version\", \"targets\": [ 3 ] },\n\t\t *          { \"name\": \"grade\", \"targets\": [ 4 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"name\": \"engine\" },\n\t\t *          { \"name\": \"browser\" },\n\t\t *          { \"name\": \"platform\" },\n\t\t *          { \"name\": \"version\" },\n\t\t *          { \"name\": \"grade\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sName\": \"\",\n\t\n\t\n\t\t/**\n\t\t * Defines a data source type for the ordering which can be used to read\n\t\t * real-time information from the table (updating the internally cached\n\t\t * version) prior to ordering. This allows ordering to occur on user\n\t\t * editable elements such as form inputs.\n\t\t *  @type string\n\t\t *  @default std\n\t\t *\n\t\t *  @name DataTable.defaults.column.orderDataType\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"orderDataType\": \"dom-text\", \"targets\": [ 2, 3 ] },\n\t\t *          { \"type\": \"numeric\", \"targets\": [ 3 ] },\n\t\t *          { \"orderDataType\": \"dom-select\", \"targets\": [ 4 ] },\n\t\t *          { \"orderDataType\": \"dom-checkbox\", \"targets\": [ 5 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          null,\n\t\t *          null,\n\t\t *          { \"orderDataType\": \"dom-text\" },\n\t\t *          { \"orderDataType\": \"dom-text\", \"type\": \"numeric\" },\n\t\t *          { \"orderDataType\": \"dom-select\" },\n\t\t *          { \"orderDataType\": \"dom-checkbox\" }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sSortDataType\": \"std\",\n\t\n\t\n\t\t/**\n\t\t * The title of this column.\n\t\t *  @type string\n\t\t *  @default null <i>Derived from the 'TH' value for this column in the\n\t\t *    original HTML table.</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.title\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"title\": \"My column title\", \"targets\": [ 0 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"title\": \"My column title\" },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sTitle\": null,\n\t\n\t\n\t\t/**\n\t\t * The type allows you to specify how the data for this column will be\n\t\t * ordered. Four types (string, numeric, date and html (which will strip\n\t\t * HTML tags before ordering)) are currently available. Note that only date\n\t\t * formats understood by Javascript's Date() object will be accepted as type\n\t\t * date. For example: \"Mar 26, 2008 5:03 PM\". May take the values: 'string',\n\t\t * 'numeric', 'date' or 'html' (by default). Further types can be adding\n\t\t * through plug-ins.\n\t\t *  @type string\n\t\t *  @default null <i>Auto-detected from raw data</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.type\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"type\": \"html\", \"targets\": [ 0 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"type\": \"html\" },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sType\": null,\n\t\n\t\n\t\t/**\n\t\t * Defining the width of the column, this parameter may take any CSS value\n\t\t * (3em, 20px etc). DataTables applies 'smart' widths to columns which have not\n\t\t * been given a specific width through this interface ensuring that the table\n\t\t * remains readable.\n\t\t *  @type string\n\t\t *  @default null <i>Automatic</i>\n\t\t *\n\t\t *  @name DataTable.defaults.column.width\n\t\t *  @dtopt Columns\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columnDefs`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columnDefs\": [\n\t\t *          { \"width\": \"20%\", \"targets\": [ 0 ] }\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t *\n\t\t *  @example\n\t\t *    // Using `columns`\n\t\t *    $(document).ready( function() {\n\t\t *      $('#example').dataTable( {\n\t\t *        \"columns\": [\n\t\t *          { \"width\": \"20%\" },\n\t\t *          null,\n\t\t *          null,\n\t\t *          null,\n\t\t *          null\n\t\t *        ]\n\t\t *      } );\n\t\t *    } );\n\t\t */\n\t\t\"sWidth\": null\n\t};\n\t\n\t_fnHungarianMap( DataTable.defaults.column );\n\t\n\t\n\t\n\t/**\n\t * DataTables settings object - this holds all the information needed for a\n\t * given table, including configuration, data and current application of the\n\t * table options. DataTables does not have a single instance for each DataTable\n\t * with the settings attached to that instance, but rather instances of the\n\t * DataTable \"class\" are created on-the-fly as needed (typically by a\n\t * $().dataTable() call) and the settings object is then applied to that\n\t * instance.\n\t *\n\t * Note that this object is related to {@link DataTable.defaults} but this\n\t * one is the internal data store for DataTables's cache of columns. It should\n\t * NOT be manipulated outside of DataTables. Any configuration should be done\n\t * through the initialisation options.\n\t *  @namespace\n\t *  @todo Really should attach the settings object to individual instances so we\n\t *    don't need to create new instances on each $().dataTable() call (if the\n\t *    table already exists). It would also save passing oSettings around and\n\t *    into every single function. However, this is a very significant\n\t *    architecture change for DataTables and will almost certainly break\n\t *    backwards compatibility with older installations. This is something that\n\t *    will be done in 2.0.\n\t */\n\tDataTable.models.oSettings = {\n\t\t/**\n\t\t * Primary features of DataTables and their enablement state.\n\t\t *  @namespace\n\t\t */\n\t\t\"oFeatures\": {\n\t\n\t\t\t/**\n\t\t\t * Flag to say if DataTables should automatically try to calculate the\n\t\t\t * optimum table and columns widths (true) or not (false).\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bAutoWidth\": null,\n\t\n\t\t\t/**\n\t\t\t * Delay the creation of TR and TD elements until they are actually\n\t\t\t * needed by a driven page draw. This can give a significant speed\n\t\t\t * increase for Ajax source and Javascript source data, but makes no\n\t\t\t * difference at all fro DOM and server-side processing tables.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bDeferRender\": null,\n\t\n\t\t\t/**\n\t\t\t * Enable filtering on the table or not. Note that if this is disabled\n\t\t\t * then there is no filtering at all on the table, including fnFilter.\n\t\t\t * To just remove the filtering input use sDom and remove the 'f' option.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bFilter\": null,\n\t\n\t\t\t/**\n\t\t\t * Table information element (the 'Showing x of y records' div) enable\n\t\t\t * flag.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bInfo\": null,\n\t\n\t\t\t/**\n\t\t\t * Present a user control allowing the end user to change the page size\n\t\t\t * when pagination is enabled.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bLengthChange\": null,\n\t\n\t\t\t/**\n\t\t\t * Pagination enabled or not. Note that if this is disabled then length\n\t\t\t * changing must also be disabled.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bPaginate\": null,\n\t\n\t\t\t/**\n\t\t\t * Processing indicator enable flag whenever DataTables is enacting a\n\t\t\t * user request - typically an Ajax request for server-side processing.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bProcessing\": null,\n\t\n\t\t\t/**\n\t\t\t * Server-side processing enabled flag - when enabled DataTables will\n\t\t\t * get all data from the server for every draw - there is no filtering,\n\t\t\t * sorting or paging done on the client-side.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bServerSide\": null,\n\t\n\t\t\t/**\n\t\t\t * Sorting enablement flag.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bSort\": null,\n\t\n\t\t\t/**\n\t\t\t * Multi-column sorting\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bSortMulti\": null,\n\t\n\t\t\t/**\n\t\t\t * Apply a class to the columns which are being sorted to provide a\n\t\t\t * visual highlight or not. This can slow things down when enabled since\n\t\t\t * there is a lot of DOM interaction.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bSortClasses\": null,\n\t\n\t\t\t/**\n\t\t\t * State saving enablement flag.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bStateSave\": null\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Scrolling settings for a table.\n\t\t *  @namespace\n\t\t */\n\t\t\"oScroll\": {\n\t\t\t/**\n\t\t\t * When the table is shorter in height than sScrollY, collapse the\n\t\t\t * table container down to the height of the table (when true).\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type boolean\n\t\t\t */\n\t\t\t\"bCollapse\": null,\n\t\n\t\t\t/**\n\t\t\t * Width of the scrollbar for the web-browser's platform. Calculated\n\t\t\t * during table initialisation.\n\t\t\t *  @type int\n\t\t\t *  @default 0\n\t\t\t */\n\t\t\t\"iBarWidth\": 0,\n\t\n\t\t\t/**\n\t\t\t * Viewport width for horizontal scrolling. Horizontal scrolling is\n\t\t\t * disabled if an empty string.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type string\n\t\t\t */\n\t\t\t\"sX\": null,\n\t\n\t\t\t/**\n\t\t\t * Width to expand the table to when using x-scrolling. Typically you\n\t\t\t * should not need to use this.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type string\n\t\t\t *  @deprecated\n\t\t\t */\n\t\t\t\"sXInner\": null,\n\t\n\t\t\t/**\n\t\t\t * Viewport height for vertical scrolling. Vertical scrolling is disabled\n\t\t\t * if an empty string.\n\t\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t\t * set a default use {@link DataTable.defaults}.\n\t\t\t *  @type string\n\t\t\t */\n\t\t\t\"sY\": null\n\t\t},\n\t\n\t\t/**\n\t\t * Language information for the table.\n\t\t *  @namespace\n\t\t *  @extends DataTable.defaults.oLanguage\n\t\t */\n\t\t\"oLanguage\": {\n\t\t\t/**\n\t\t\t * Information callback function. See\n\t\t\t * {@link DataTable.defaults.fnInfoCallback}\n\t\t\t *  @type function\n\t\t\t *  @default null\n\t\t\t */\n\t\t\t\"fnInfoCallback\": null\n\t\t},\n\t\n\t\t/**\n\t\t * Browser support parameters\n\t\t *  @namespace\n\t\t */\n\t\t\"oBrowser\": {\n\t\t\t/**\n\t\t\t * Indicate if the browser incorrectly calculates width:100% inside a\n\t\t\t * scrolling element (IE6/7)\n\t\t\t *  @type boolean\n\t\t\t *  @default false\n\t\t\t */\n\t\t\t\"bScrollOversize\": false,\n\t\n\t\t\t/**\n\t\t\t * Determine if the vertical scrollbar is on the right or left of the\n\t\t\t * scrolling container - needed for rtl language layout, although not\n\t\t\t * all browsers move the scrollbar (Safari).\n\t\t\t *  @type boolean\n\t\t\t *  @default false\n\t\t\t */\n\t\t\t\"bScrollbarLeft\": false,\n\t\n\t\t\t/**\n\t\t\t * Flag for if `getBoundingClientRect` is fully supported or not\n\t\t\t *  @type boolean\n\t\t\t *  @default false\n\t\t\t */\n\t\t\t\"bBounding\": false,\n\t\n\t\t\t/**\n\t\t\t * Browser scrollbar width\n\t\t\t *  @type integer\n\t\t\t *  @default 0\n\t\t\t */\n\t\t\t\"barWidth\": 0\n\t\t},\n\t\n\t\n\t\t\"ajax\": null,\n\t\n\t\n\t\t/**\n\t\t * Array referencing the nodes which are used for the features. The\n\t\t * parameters of this object match what is allowed by sDom - i.e.\n\t\t *   <ul>\n\t\t *     <li>'l' - Length changing</li>\n\t\t *     <li>'f' - Filtering input</li>\n\t\t *     <li>'t' - The table!</li>\n\t\t *     <li>'i' - Information</li>\n\t\t *     <li>'p' - Pagination</li>\n\t\t *     <li>'r' - pRocessing</li>\n\t\t *   </ul>\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aanFeatures\": [],\n\t\n\t\t/**\n\t\t * Store data information - see {@link DataTable.models.oRow} for detailed\n\t\t * information.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoData\": [],\n\t\n\t\t/**\n\t\t * Array of indexes which are in the current display (after filtering etc)\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aiDisplay\": [],\n\t\n\t\t/**\n\t\t * Array of indexes for display - no filtering\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aiDisplayMaster\": [],\n\t\n\t\t/**\n\t\t * Map of row ids to data indexes\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\t\"aIds\": {},\n\t\n\t\t/**\n\t\t * Store information about each column that is in use\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoColumns\": [],\n\t\n\t\t/**\n\t\t * Store information about the table's header\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoHeader\": [],\n\t\n\t\t/**\n\t\t * Store information about the table's footer\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoFooter\": [],\n\t\n\t\t/**\n\t\t * Store the applied global search information in case we want to force a\n\t\t * research or compare the old search to a new one.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @namespace\n\t\t *  @extends DataTable.models.oSearch\n\t\t */\n\t\t\"oPreviousSearch\": {},\n\t\n\t\t/**\n\t\t * Store the applied search for each column - see\n\t\t * {@link DataTable.models.oSearch} for the format that is used for the\n\t\t * filtering information for each column.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoPreSearchCols\": [],\n\t\n\t\t/**\n\t\t * Sorting that is applied to the table. Note that the inner arrays are\n\t\t * used in the following manner:\n\t\t * <ul>\n\t\t *   <li>Index 0 - column number</li>\n\t\t *   <li>Index 1 - current sorting direction</li>\n\t\t * </ul>\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type array\n\t\t *  @todo These inner arrays should really be objects\n\t\t */\n\t\t\"aaSorting\": null,\n\t\n\t\t/**\n\t\t * Sorting that is always applied to the table (i.e. prefixed in front of\n\t\t * aaSorting).\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aaSortingFixed\": [],\n\t\n\t\t/**\n\t\t * Classes to use for the striping of a table.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"asStripeClasses\": null,\n\t\n\t\t/**\n\t\t * If restoring a table - we should restore its striping classes as well\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"asDestroyStripes\": [],\n\t\n\t\t/**\n\t\t * If restoring a table - we should restore its width\n\t\t *  @type int\n\t\t *  @default 0\n\t\t */\n\t\t\"sDestroyWidth\": 0,\n\t\n\t\t/**\n\t\t * Callback functions array for every time a row is inserted (i.e. on a draw).\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoRowCallback\": [],\n\t\n\t\t/**\n\t\t * Callback functions for the header on each draw.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoHeaderCallback\": [],\n\t\n\t\t/**\n\t\t * Callback function for the footer on each draw.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoFooterCallback\": [],\n\t\n\t\t/**\n\t\t * Array of callback functions for draw callback functions\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoDrawCallback\": [],\n\t\n\t\t/**\n\t\t * Array of callback functions for row created function\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoRowCreatedCallback\": [],\n\t\n\t\t/**\n\t\t * Callback functions for just before the table is redrawn. A return of\n\t\t * false will be used to cancel the draw.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoPreDrawCallback\": [],\n\t\n\t\t/**\n\t\t * Callback functions for when the table has been initialised.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoInitComplete\": [],\n\t\n\t\n\t\t/**\n\t\t * Callbacks for modifying the settings to be stored for state saving, prior to\n\t\t * saving state.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoStateSaveParams\": [],\n\t\n\t\t/**\n\t\t * Callbacks for modifying the settings that have been stored for state saving\n\t\t * prior to using the stored values to restore the state.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoStateLoadParams\": [],\n\t\n\t\t/**\n\t\t * Callbacks for operating on the settings object once the saved state has been\n\t\t * loaded\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoStateLoaded\": [],\n\t\n\t\t/**\n\t\t * Cache the table ID for quick access\n\t\t *  @type string\n\t\t *  @default <i>Empty string</i>\n\t\t */\n\t\t\"sTableId\": \"\",\n\t\n\t\t/**\n\t\t * The TABLE node for the main table\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTable\": null,\n\t\n\t\t/**\n\t\t * Permanent ref to the thead element\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTHead\": null,\n\t\n\t\t/**\n\t\t * Permanent ref to the tfoot element - if it exists\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTFoot\": null,\n\t\n\t\t/**\n\t\t * Permanent ref to the tbody element\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTBody\": null,\n\t\n\t\t/**\n\t\t * Cache the wrapper node (contains all DataTables controlled elements)\n\t\t *  @type node\n\t\t *  @default null\n\t\t */\n\t\t\"nTableWrapper\": null,\n\t\n\t\t/**\n\t\t * Indicate if when using server-side processing the loading of data\n\t\t * should be deferred until the second draw.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t */\n\t\t\"bDeferLoading\": false,\n\t\n\t\t/**\n\t\t * Indicate if all required information has been read in\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t */\n\t\t\"bInitialised\": false,\n\t\n\t\t/**\n\t\t * Information about open rows. Each object in the array has the parameters\n\t\t * 'nTr' and 'nParent'\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoOpenRows\": [],\n\t\n\t\t/**\n\t\t * Dictate the positioning of DataTables' control elements - see\n\t\t * {@link DataTable.model.oInit.sDom}.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sDom\": null,\n\t\n\t\t/**\n\t\t * Search delay (in mS)\n\t\t *  @type integer\n\t\t *  @default null\n\t\t */\n\t\t\"searchDelay\": null,\n\t\n\t\t/**\n\t\t * Which type of pagination should be used.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type string\n\t\t *  @default two_button\n\t\t */\n\t\t\"sPaginationType\": \"two_button\",\n\t\n\t\t/**\n\t\t * The state duration (for `stateSave`) in seconds.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type int\n\t\t *  @default 0\n\t\t */\n\t\t\"iStateDuration\": 0,\n\t\n\t\t/**\n\t\t * Array of callback functions for state saving. Each array element is an\n\t\t * object with the following parameters:\n\t\t *   <ul>\n\t\t *     <li>function:fn - function to call. Takes two parameters, oSettings\n\t\t *       and the JSON string to save that has been thus far created. Returns\n\t\t *       a JSON string to be inserted into a json object\n\t\t *       (i.e. '\"param\": [ 0, 1, 2]')</li>\n\t\t *     <li>string:sName - name of callback</li>\n\t\t *   </ul>\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoStateSave\": [],\n\t\n\t\t/**\n\t\t * Array of callback functions for state loading. Each array element is an\n\t\t * object with the following parameters:\n\t\t *   <ul>\n\t\t *     <li>function:fn - function to call. Takes two parameters, oSettings\n\t\t *       and the object stored. May return false to cancel state loading</li>\n\t\t *     <li>string:sName - name of callback</li>\n\t\t *   </ul>\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoStateLoad\": [],\n\t\n\t\t/**\n\t\t * State that was saved. Useful for back reference\n\t\t *  @type object\n\t\t *  @default null\n\t\t */\n\t\t\"oSavedState\": null,\n\t\n\t\t/**\n\t\t * State that was loaded. Useful for back reference\n\t\t *  @type object\n\t\t *  @default null\n\t\t */\n\t\t\"oLoadedState\": null,\n\t\n\t\t/**\n\t\t * Source url for AJAX data for the table.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sAjaxSource\": null,\n\t\n\t\t/**\n\t\t * Property from a given object from which to read the table data from. This\n\t\t * can be an empty string (when not server-side processing), in which case\n\t\t * it is  assumed an an array is given directly.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type string\n\t\t */\n\t\t\"sAjaxDataProp\": null,\n\t\n\t\t/**\n\t\t * Note if draw should be blocked while getting data\n\t\t *  @type boolean\n\t\t *  @default true\n\t\t */\n\t\t\"bAjaxDataGet\": true,\n\t\n\t\t/**\n\t\t * The last jQuery XHR object that was used for server-side data gathering.\n\t\t * This can be used for working with the XHR information in one of the\n\t\t * callbacks\n\t\t *  @type object\n\t\t *  @default null\n\t\t */\n\t\t\"jqXHR\": null,\n\t\n\t\t/**\n\t\t * JSON returned from the server in the last Ajax request\n\t\t *  @type object\n\t\t *  @default undefined\n\t\t */\n\t\t\"json\": undefined,\n\t\n\t\t/**\n\t\t * Data submitted as part of the last Ajax request\n\t\t *  @type object\n\t\t *  @default undefined\n\t\t */\n\t\t\"oAjaxData\": undefined,\n\t\n\t\t/**\n\t\t * Function to get the server-side data.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type function\n\t\t */\n\t\t\"fnServerData\": null,\n\t\n\t\t/**\n\t\t * Functions which are called prior to sending an Ajax request so extra\n\t\t * parameters can easily be sent to the server\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoServerParams\": [],\n\t\n\t\t/**\n\t\t * Send the XHR HTTP method - GET or POST (could be PUT or DELETE if\n\t\t * required).\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type string\n\t\t */\n\t\t\"sServerMethod\": null,\n\t\n\t\t/**\n\t\t * Format numbers for display.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type function\n\t\t */\n\t\t\"fnFormatNumber\": null,\n\t\n\t\t/**\n\t\t * List of options that can be used for the user selectable length menu.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aLengthMenu\": null,\n\t\n\t\t/**\n\t\t * Counter for the draws that the table does. Also used as a tracker for\n\t\t * server-side processing\n\t\t *  @type int\n\t\t *  @default 0\n\t\t */\n\t\t\"iDraw\": 0,\n\t\n\t\t/**\n\t\t * Indicate if a redraw is being done - useful for Ajax\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t */\n\t\t\"bDrawing\": false,\n\t\n\t\t/**\n\t\t * Draw index (iDraw) of the last error when parsing the returned data\n\t\t *  @type int\n\t\t *  @default -1\n\t\t */\n\t\t\"iDrawError\": -1,\n\t\n\t\t/**\n\t\t * Paging display length\n\t\t *  @type int\n\t\t *  @default 10\n\t\t */\n\t\t\"_iDisplayLength\": 10,\n\t\n\t\t/**\n\t\t * Paging start point - aiDisplay index\n\t\t *  @type int\n\t\t *  @default 0\n\t\t */\n\t\t\"_iDisplayStart\": 0,\n\t\n\t\t/**\n\t\t * Server-side processing - number of records in the result set\n\t\t * (i.e. before filtering), Use fnRecordsTotal rather than\n\t\t * this property to get the value of the number of records, regardless of\n\t\t * the server-side processing setting.\n\t\t *  @type int\n\t\t *  @default 0\n\t\t *  @private\n\t\t */\n\t\t\"_iRecordsTotal\": 0,\n\t\n\t\t/**\n\t\t * Server-side processing - number of records in the current display set\n\t\t * (i.e. after filtering). Use fnRecordsDisplay rather than\n\t\t * this property to get the value of the number of records, regardless of\n\t\t * the server-side processing setting.\n\t\t *  @type boolean\n\t\t *  @default 0\n\t\t *  @private\n\t\t */\n\t\t\"_iRecordsDisplay\": 0,\n\t\n\t\t/**\n\t\t * Flag to indicate if jQuery UI marking and classes should be used.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type boolean\n\t\t */\n\t\t\"bJUI\": null,\n\t\n\t\t/**\n\t\t * The classes to use for the table\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\t\"oClasses\": {},\n\t\n\t\t/**\n\t\t * Flag attached to the settings object so you can check in the draw\n\t\t * callback if filtering has been done in the draw. Deprecated in favour of\n\t\t * events.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *  @deprecated\n\t\t */\n\t\t\"bFiltered\": false,\n\t\n\t\t/**\n\t\t * Flag attached to the settings object so you can check in the draw\n\t\t * callback if sorting has been done in the draw. Deprecated in favour of\n\t\t * events.\n\t\t *  @type boolean\n\t\t *  @default false\n\t\t *  @deprecated\n\t\t */\n\t\t\"bSorted\": false,\n\t\n\t\t/**\n\t\t * Indicate that if multiple rows are in the header and there is more than\n\t\t * one unique cell per column, if the top one (true) or bottom one (false)\n\t\t * should be used for sorting / title by DataTables.\n\t\t * Note that this parameter will be set by the initialisation routine. To\n\t\t * set a default use {@link DataTable.defaults}.\n\t\t *  @type boolean\n\t\t */\n\t\t\"bSortCellsTop\": null,\n\t\n\t\t/**\n\t\t * Initialisation object that is used for the table\n\t\t *  @type object\n\t\t *  @default null\n\t\t */\n\t\t\"oInit\": null,\n\t\n\t\t/**\n\t\t * Destroy callback functions - for plug-ins to attach themselves to the\n\t\t * destroy so they can clean up markup and events.\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aoDestroyCallback\": [],\n\t\n\t\n\t\t/**\n\t\t * Get the number of records in the current record set, before filtering\n\t\t *  @type function\n\t\t */\n\t\t\"fnRecordsTotal\": function ()\n\t\t{\n\t\t\treturn _fnDataSource( this ) == 'ssp' ?\n\t\t\t\tthis._iRecordsTotal * 1 :\n\t\t\t\tthis.aiDisplayMaster.length;\n\t\t},\n\t\n\t\t/**\n\t\t * Get the number of records in the current record set, after filtering\n\t\t *  @type function\n\t\t */\n\t\t\"fnRecordsDisplay\": function ()\n\t\t{\n\t\t\treturn _fnDataSource( this ) == 'ssp' ?\n\t\t\t\tthis._iRecordsDisplay * 1 :\n\t\t\t\tthis.aiDisplay.length;\n\t\t},\n\t\n\t\t/**\n\t\t * Get the display end point - aiDisplay index\n\t\t *  @type function\n\t\t */\n\t\t\"fnDisplayEnd\": function ()\n\t\t{\n\t\t\tvar\n\t\t\t\tlen      = this._iDisplayLength,\n\t\t\t\tstart    = this._iDisplayStart,\n\t\t\t\tcalc     = start + len,\n\t\t\t\trecords  = this.aiDisplay.length,\n\t\t\t\tfeatures = this.oFeatures,\n\t\t\t\tpaginate = features.bPaginate;\n\t\n\t\t\tif ( features.bServerSide ) {\n\t\t\t\treturn paginate === false || len === -1 ?\n\t\t\t\t\tstart + records :\n\t\t\t\t\tMath.min( start+len, this._iRecordsDisplay );\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn ! paginate || calc>records || len===-1 ?\n\t\t\t\t\trecords :\n\t\t\t\t\tcalc;\n\t\t\t}\n\t\t},\n\t\n\t\t/**\n\t\t * The DataTables object for this table\n\t\t *  @type object\n\t\t *  @default null\n\t\t */\n\t\t\"oInstance\": null,\n\t\n\t\t/**\n\t\t * Unique identifier for each instance of the DataTables object. If there\n\t\t * is an ID on the table node, then it takes that value, otherwise an\n\t\t * incrementing internal counter is used.\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"sInstance\": null,\n\t\n\t\t/**\n\t\t * tabindex attribute value that is added to DataTables control elements, allowing\n\t\t * keyboard navigation of the table and its controls.\n\t\t */\n\t\t\"iTabIndex\": 0,\n\t\n\t\t/**\n\t\t * DIV container for the footer scrolling table if scrolling\n\t\t */\n\t\t\"nScrollHead\": null,\n\t\n\t\t/**\n\t\t * DIV container for the footer scrolling table if scrolling\n\t\t */\n\t\t\"nScrollFoot\": null,\n\t\n\t\t/**\n\t\t * Last applied sort\n\t\t *  @type array\n\t\t *  @default []\n\t\t */\n\t\t\"aLastSort\": [],\n\t\n\t\t/**\n\t\t * Stored plug-in instances\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\t\"oPlugins\": {},\n\t\n\t\t/**\n\t\t * Function used to get a row's id from the row's data\n\t\t *  @type function\n\t\t *  @default null\n\t\t */\n\t\t\"rowIdFn\": null,\n\t\n\t\t/**\n\t\t * Data location where to store a row's id\n\t\t *  @type string\n\t\t *  @default null\n\t\t */\n\t\t\"rowId\": null\n\t};\n\n\t/**\n\t * Extension object for DataTables that is used to provide all extension\n\t * options.\n\t *\n\t * Note that the `DataTable.ext` object is available through\n\t * `jQuery.fn.dataTable.ext` where it may be accessed and manipulated. It is\n\t * also aliased to `jQuery.fn.dataTableExt` for historic reasons.\n\t *  @namespace\n\t *  @extends DataTable.models.ext\n\t */\n\t\n\t\n\t/**\n\t * DataTables extensions\n\t * \n\t * This namespace acts as a collection area for plug-ins that can be used to\n\t * extend DataTables capabilities. Indeed many of the build in methods\n\t * use this method to provide their own capabilities (sorting methods for\n\t * example).\n\t *\n\t * Note that this namespace is aliased to `jQuery.fn.dataTableExt` for legacy\n\t * reasons\n\t *\n\t *  @namespace\n\t */\n\tDataTable.ext = _ext = {\n\t\t/**\n\t\t * Buttons. For use with the Buttons extension for DataTables. This is\n\t\t * defined here so other extensions can define buttons regardless of load\n\t\t * order. It is _not_ used by DataTables core.\n\t\t *\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\tbuttons: {},\n\t\n\t\n\t\t/**\n\t\t * Element class names\n\t\t *\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\tclasses: {},\n\t\n\t\n\t\t/**\n\t\t * DataTables build type (expanded by the download builder)\n\t\t *\n\t\t *  @type string\n\t\t */\n\t\tbuilder: \"-source-\",\n\t\n\t\n\t\t/**\n\t\t * Error reporting.\n\t\t * \n\t\t * How should DataTables report an error. Can take the value 'alert',\n\t\t * 'throw', 'none' or a function.\n\t\t *\n\t\t *  @type string|function\n\t\t *  @default alert\n\t\t */\n\t\terrMode: \"alert\",\n\t\n\t\n\t\t/**\n\t\t * Feature plug-ins.\n\t\t * \n\t\t * This is an array of objects which describe the feature plug-ins that are\n\t\t * available to DataTables. These feature plug-ins are then available for\n\t\t * use through the `dom` initialisation option.\n\t\t * \n\t\t * Each feature plug-in is described by an object which must have the\n\t\t * following properties:\n\t\t * \n\t\t * * `fnInit` - function that is used to initialise the plug-in,\n\t\t * * `cFeature` - a character so the feature can be enabled by the `dom`\n\t\t *   instillation option. This is case sensitive.\n\t\t *\n\t\t * The `fnInit` function has the following input parameters:\n\t\t *\n\t\t * 1. `{object}` DataTables settings object: see\n\t\t *    {@link DataTable.models.oSettings}\n\t\t *\n\t\t * And the following return is expected:\n\t\t * \n\t\t * * {node|null} The element which contains your feature. Note that the\n\t\t *   return may also be void if your plug-in does not require to inject any\n\t\t *   DOM elements into DataTables control (`dom`) - for example this might\n\t\t *   be useful when developing a plug-in which allows table control via\n\t\t *   keyboard entry\n\t\t *\n\t\t *  @type array\n\t\t *\n\t\t *  @example\n\t\t *    $.fn.dataTable.ext.features.push( {\n\t\t *      \"fnInit\": function( oSettings ) {\n\t\t *        return new TableTools( { \"oDTSettings\": oSettings } );\n\t\t *      },\n\t\t *      \"cFeature\": \"T\"\n\t\t *    } );\n\t\t */\n\t\tfeature: [],\n\t\n\t\n\t\t/**\n\t\t * Row searching.\n\t\t * \n\t\t * This method of searching is complimentary to the default type based\n\t\t * searching, and a lot more comprehensive as it allows you complete control\n\t\t * over the searching logic. Each element in this array is a function\n\t\t * (parameters described below) that is called for every row in the table,\n\t\t * and your logic decides if it should be included in the searching data set\n\t\t * or not.\n\t\t *\n\t\t * Searching functions have the following input parameters:\n\t\t *\n\t\t * 1. `{object}` DataTables settings object: see\n\t\t *    {@link DataTable.models.oSettings}\n\t\t * 2. `{array|object}` Data for the row to be processed (same as the\n\t\t *    original format that was passed in as the data source, or an array\n\t\t *    from a DOM data source\n\t\t * 3. `{int}` Row index ({@link DataTable.models.oSettings.aoData}), which\n\t\t *    can be useful to retrieve the `TR` element if you need DOM interaction.\n\t\t *\n\t\t * And the following return is expected:\n\t\t *\n\t\t * * {boolean} Include the row in the searched result set (true) or not\n\t\t *   (false)\n\t\t *\n\t\t * Note that as with the main search ability in DataTables, technically this\n\t\t * is \"filtering\", since it is subtractive. However, for consistency in\n\t\t * naming we call it searching here.\n\t\t *\n\t\t *  @type array\n\t\t *  @default []\n\t\t *\n\t\t *  @example\n\t\t *    // The following example shows custom search being applied to the\n\t\t *    // fourth column (i.e. the data[3] index) based on two input values\n\t\t *    // from the end-user, matching the data in a certain range.\n\t\t *    $.fn.dataTable.ext.search.push(\n\t\t *      function( settings, data, dataIndex ) {\n\t\t *        var min = document.getElementById('min').value * 1;\n\t\t *        var max = document.getElementById('max').value * 1;\n\t\t *        var version = data[3] == \"-\" ? 0 : data[3]*1;\n\t\t *\n\t\t *        if ( min == \"\" && max == \"\" ) {\n\t\t *          return true;\n\t\t *        }\n\t\t *        else if ( min == \"\" && version < max ) {\n\t\t *          return true;\n\t\t *        }\n\t\t *        else if ( min < version && \"\" == max ) {\n\t\t *          return true;\n\t\t *        }\n\t\t *        else if ( min < version && version < max ) {\n\t\t *          return true;\n\t\t *        }\n\t\t *        return false;\n\t\t *      }\n\t\t *    );\n\t\t */\n\t\tsearch: [],\n\t\n\t\n\t\t/**\n\t\t * Selector extensions\n\t\t *\n\t\t * The `selector` option can be used to extend the options available for the\n\t\t * selector modifier options (`selector-modifier` object data type) that\n\t\t * each of the three built in selector types offer (row, column and cell +\n\t\t * their plural counterparts). For example the Select extension uses this\n\t\t * mechanism to provide an option to select only rows, columns and cells\n\t\t * that have been marked as selected by the end user (`{selected: true}`),\n\t\t * which can be used in conjunction with the existing built in selector\n\t\t * options.\n\t\t *\n\t\t * Each property is an array to which functions can be pushed. The functions\n\t\t * take three attributes:\n\t\t *\n\t\t * * Settings object for the host table\n\t\t * * Options object (`selector-modifier` object type)\n\t\t * * Array of selected item indexes\n\t\t *\n\t\t * The return is an array of the resulting item indexes after the custom\n\t\t * selector has been applied.\n\t\t *\n\t\t *  @type object\n\t\t */\n\t\tselector: {\n\t\t\tcell: [],\n\t\t\tcolumn: [],\n\t\t\trow: []\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Internal functions, exposed for used in plug-ins.\n\t\t * \n\t\t * Please note that you should not need to use the internal methods for\n\t\t * anything other than a plug-in (and even then, try to avoid if possible).\n\t\t * The internal function may change between releases.\n\t\t *\n\t\t *  @type object\n\t\t *  @default {}\n\t\t */\n\t\tinternal: {},\n\t\n\t\n\t\t/**\n\t\t * Legacy configuration options. Enable and disable legacy options that\n\t\t * are available in DataTables.\n\t\t *\n\t\t *  @type object\n\t\t */\n\t\tlegacy: {\n\t\t\t/**\n\t\t\t * Enable / disable DataTables 1.9 compatible server-side processing\n\t\t\t * requests\n\t\t\t *\n\t\t\t *  @type boolean\n\t\t\t *  @default null\n\t\t\t */\n\t\t\tajax: null\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Pagination plug-in methods.\n\t\t * \n\t\t * Each entry in this object is a function and defines which buttons should\n\t\t * be shown by the pagination rendering method that is used for the table:\n\t\t * {@link DataTable.ext.renderer.pageButton}. The renderer addresses how the\n\t\t * buttons are displayed in the document, while the functions here tell it\n\t\t * what buttons to display. This is done by returning an array of button\n\t\t * descriptions (what each button will do).\n\t\t *\n\t\t * Pagination types (the four built in options and any additional plug-in\n\t\t * options defined here) can be used through the `paginationType`\n\t\t * initialisation parameter.\n\t\t *\n\t\t * The functions defined take two parameters:\n\t\t *\n\t\t * 1. `{int} page` The current page index\n\t\t * 2. `{int} pages` The number of pages in the table\n\t\t *\n\t\t * Each function is expected to return an array where each element of the\n\t\t * array can be one of:\n\t\t *\n\t\t * * `first` - Jump to first page when activated\n\t\t * * `last` - Jump to last page when activated\n\t\t * * `previous` - Show previous page when activated\n\t\t * * `next` - Show next page when activated\n\t\t * * `{int}` - Show page of the index given\n\t\t * * `{array}` - A nested array containing the above elements to add a\n\t\t *   containing 'DIV' element (might be useful for styling).\n\t\t *\n\t\t * Note that DataTables v1.9- used this object slightly differently whereby\n\t\t * an object with two functions would be defined for each plug-in. That\n\t\t * ability is still supported by DataTables 1.10+ to provide backwards\n\t\t * compatibility, but this option of use is now decremented and no longer\n\t\t * documented in DataTables 1.10+.\n\t\t *\n\t\t *  @type object\n\t\t *  @default {}\n\t\t *\n\t\t *  @example\n\t\t *    // Show previous, next and current page buttons only\n\t\t *    $.fn.dataTableExt.oPagination.current = function ( page, pages ) {\n\t\t *      return [ 'previous', page, 'next' ];\n\t\t *    };\n\t\t */\n\t\tpager: {},\n\t\n\t\n\t\trenderer: {\n\t\t\tpageButton: {},\n\t\t\theader: {}\n\t\t},\n\t\n\t\n\t\t/**\n\t\t * Ordering plug-ins - custom data source\n\t\t * \n\t\t * The extension options for ordering of data available here is complimentary\n\t\t * to the default type based ordering that DataTables typically uses. It\n\t\t * allows much greater control over the the data that is being used to\n\t\t * order a column, but is necessarily therefore more complex.\n\t\t * \n\t\t * This type of ordering is useful if you want to do ordering based on data\n\t\t * live from the DOM (for example the contents of an 'input' element) rather\n\t\t * than just the static string that DataTables knows of.\n\t\t * \n\t\t * The way these plug-ins work is that you create an array of the values you\n\t\t * wish to be ordering for the column in question and then return that\n\t\t * array. The data in the array much be in the index order of the rows in\n\t\t * the table (not the currently ordering order!). Which order data gathering\n\t\t * function is run here depends on the `dt-init columns.orderDataType`\n\t\t * parameter that is used for the column (if any).\n\t\t *\n\t\t * The functions defined take two parameters:\n\t\t *\n\t\t * 1. `{object}` DataTables settings object: see\n\t\t *    {@link DataTable.models.oSettings}\n\t\t * 2. `{int}` Target column index\n\t\t *\n\t\t * Each function is expected to return an array:\n\t\t *\n\t\t * * `{array}` Data for the column to be ordering upon\n\t\t *\n\t\t *  @type array\n\t\t *\n\t\t *  @example\n\t\t *    // Ordering using `input` node values\n\t\t *    $.fn.dataTable.ext.order['dom-text'] = function  ( settings, col )\n\t\t *    {\n\t\t *      return this.api().column( col, {order:'index'} ).nodes().map( function ( td, i ) {\n\t\t *        return $('input', td).val();\n\t\t *      } );\n\t\t *    }\n\t\t */\n\t\torder: {},\n\t\n\t\n\t\t/**\n\t\t * Type based plug-ins.\n\t\t *\n\t\t * Each column in DataTables has a type assigned to it, either by automatic\n\t\t * detection or by direct assignment using the `type` option for the column.\n\t\t * The type of a column will effect how it is ordering and search (plug-ins\n\t\t * can also make use of the column type if required).\n\t\t *\n\t\t * @namespace\n\t\t */\n\t\ttype: {\n\t\t\t/**\n\t\t\t * Type detection functions.\n\t\t\t *\n\t\t\t * The functions defined in this object are used to automatically detect\n\t\t\t * a column's type, making initialisation of DataTables super easy, even\n\t\t\t * when complex data is in the table.\n\t\t\t *\n\t\t\t * The functions defined take two parameters:\n\t\t\t *\n\t\t     *  1. `{*}` Data from the column cell to be analysed\n\t\t     *  2. `{settings}` DataTables settings object. This can be used to\n\t\t     *     perform context specific type detection - for example detection\n\t\t     *     based on language settings such as using a comma for a decimal\n\t\t     *     place. Generally speaking the options from the settings will not\n\t\t     *     be required\n\t\t\t *\n\t\t\t * Each function is expected to return:\n\t\t\t *\n\t\t\t * * `{string|null}` Data type detected, or null if unknown (and thus\n\t\t\t *   pass it on to the other type detection functions.\n\t\t\t *\n\t\t\t *  @type array\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Currency type detection plug-in:\n\t\t\t *    $.fn.dataTable.ext.type.detect.push(\n\t\t\t *      function ( data, settings ) {\n\t\t\t *        // Check the numeric part\n\t\t\t *        if ( ! $.isNumeric( data.substring(1) ) ) {\n\t\t\t *          return null;\n\t\t\t *        }\n\t\t\t *\n\t\t\t *        // Check prefixed by currency\n\t\t\t *        if ( data.charAt(0) == '$' || data.charAt(0) == '&pound;' ) {\n\t\t\t *          return 'currency';\n\t\t\t *        }\n\t\t\t *        return null;\n\t\t\t *      }\n\t\t\t *    );\n\t\t\t */\n\t\t\tdetect: [],\n\t\n\t\n\t\t\t/**\n\t\t\t * Type based search formatting.\n\t\t\t *\n\t\t\t * The type based searching functions can be used to pre-format the\n\t\t\t * data to be search on. For example, it can be used to strip HTML\n\t\t\t * tags or to de-format telephone numbers for numeric only searching.\n\t\t\t *\n\t\t\t * Note that is a search is not defined for a column of a given type,\n\t\t\t * no search formatting will be performed.\n\t\t\t * \n\t\t\t * Pre-processing of searching data plug-ins - When you assign the sType\n\t\t\t * for a column (or have it automatically detected for you by DataTables\n\t\t\t * or a type detection plug-in), you will typically be using this for\n\t\t\t * custom sorting, but it can also be used to provide custom searching\n\t\t\t * by allowing you to pre-processing the data and returning the data in\n\t\t\t * the format that should be searched upon. This is done by adding\n\t\t\t * functions this object with a parameter name which matches the sType\n\t\t\t * for that target column. This is the corollary of <i>afnSortData</i>\n\t\t\t * for searching data.\n\t\t\t *\n\t\t\t * The functions defined take a single parameter:\n\t\t\t *\n\t\t     *  1. `{*}` Data from the column cell to be prepared for searching\n\t\t\t *\n\t\t\t * Each function is expected to return:\n\t\t\t *\n\t\t\t * * `{string|null}` Formatted string that will be used for the searching.\n\t\t\t *\n\t\t\t *  @type object\n\t\t\t *  @default {}\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    $.fn.dataTable.ext.type.search['title-numeric'] = function ( d ) {\n\t\t\t *      return d.replace(/\\n/g,\" \").replace( /<.*?>/g, \"\" );\n\t\t\t *    }\n\t\t\t */\n\t\t\tsearch: {},\n\t\n\t\n\t\t\t/**\n\t\t\t * Type based ordering.\n\t\t\t *\n\t\t\t * The column type tells DataTables what ordering to apply to the table\n\t\t\t * when a column is sorted upon. The order for each type that is defined,\n\t\t\t * is defined by the functions available in this object.\n\t\t\t *\n\t\t\t * Each ordering option can be described by three properties added to\n\t\t\t * this object:\n\t\t\t *\n\t\t\t * * `{type}-pre` - Pre-formatting function\n\t\t\t * * `{type}-asc` - Ascending order function\n\t\t\t * * `{type}-desc` - Descending order function\n\t\t\t *\n\t\t\t * All three can be used together, only `{type}-pre` or only\n\t\t\t * `{type}-asc` and `{type}-desc` together. It is generally recommended\n\t\t\t * that only `{type}-pre` is used, as this provides the optimal\n\t\t\t * implementation in terms of speed, although the others are provided\n\t\t\t * for compatibility with existing Javascript sort functions.\n\t\t\t *\n\t\t\t * `{type}-pre`: Functions defined take a single parameter:\n\t\t\t *\n\t\t     *  1. `{*}` Data from the column cell to be prepared for ordering\n\t\t\t *\n\t\t\t * And return:\n\t\t\t *\n\t\t\t * * `{*}` Data to be sorted upon\n\t\t\t *\n\t\t\t * `{type}-asc` and `{type}-desc`: Functions are typical Javascript sort\n\t\t\t * functions, taking two parameters:\n\t\t\t *\n\t\t     *  1. `{*}` Data to compare to the second parameter\n\t\t     *  2. `{*}` Data to compare to the first parameter\n\t\t\t *\n\t\t\t * And returning:\n\t\t\t *\n\t\t\t * * `{*}` Ordering match: <0 if first parameter should be sorted lower\n\t\t\t *   than the second parameter, ===0 if the two parameters are equal and\n\t\t\t *   >0 if the first parameter should be sorted height than the second\n\t\t\t *   parameter.\n\t\t\t * \n\t\t\t *  @type object\n\t\t\t *  @default {}\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Numeric ordering of formatted numbers with a pre-formatter\n\t\t\t *    $.extend( $.fn.dataTable.ext.type.order, {\n\t\t\t *      \"string-pre\": function(x) {\n\t\t\t *        a = (a === \"-\" || a === \"\") ? 0 : a.replace( /[^\\d\\-\\.]/g, \"\" );\n\t\t\t *        return parseFloat( a );\n\t\t\t *      }\n\t\t\t *    } );\n\t\t\t *\n\t\t\t *  @example\n\t\t\t *    // Case-sensitive string ordering, with no pre-formatting method\n\t\t\t *    $.extend( $.fn.dataTable.ext.order, {\n\t\t\t *      \"string-case-asc\": function(x,y) {\n\t\t\t *        return ((x < y) ? -1 : ((x > y) ? 1 : 0));\n\t\t\t *      },\n\t\t\t *      \"string-case-desc\": function(x,y) {\n\t\t\t *        return ((x < y) ? 1 : ((x > y) ? -1 : 0));\n\t\t\t *      }\n\t\t\t *    } );\n\t\t\t */\n\t\t\torder: {}\n\t\t},\n\t\n\t\t/**\n\t\t * Unique DataTables instance counter\n\t\t *\n\t\t * @type int\n\t\t * @private\n\t\t */\n\t\t_unique: 0,\n\t\n\t\n\t\t//\n\t\t// Depreciated\n\t\t// The following properties are retained for backwards compatiblity only.\n\t\t// The should not be used in new projects and will be removed in a future\n\t\t// version\n\t\t//\n\t\n\t\t/**\n\t\t * Version check function.\n\t\t *  @type function\n\t\t *  @depreciated Since 1.10\n\t\t */\n\t\tfnVersionCheck: DataTable.fnVersionCheck,\n\t\n\t\n\t\t/**\n\t\t * Index for what 'this' index API functions should use\n\t\t *  @type int\n\t\t *  @deprecated Since v1.10\n\t\t */\n\t\tiApiIndex: 0,\n\t\n\t\n\t\t/**\n\t\t * jQuery UI class container\n\t\t *  @type object\n\t\t *  @deprecated Since v1.10\n\t\t */\n\t\toJUIClasses: {},\n\t\n\t\n\t\t/**\n\t\t * Software version\n\t\t *  @type string\n\t\t *  @deprecated Since v1.10\n\t\t */\n\t\tsVersion: DataTable.version\n\t};\n\t\n\t\n\t//\n\t// Backwards compatibility. Alias to pre 1.10 Hungarian notation counter parts\n\t//\n\t$.extend( _ext, {\n\t\tafnFiltering: _ext.search,\n\t\taTypes:       _ext.type.detect,\n\t\tofnSearch:    _ext.type.search,\n\t\toSort:        _ext.type.order,\n\t\tafnSortData:  _ext.order,\n\t\taoFeatures:   _ext.feature,\n\t\toApi:         _ext.internal,\n\t\toStdClasses:  _ext.classes,\n\t\toPagination:  _ext.pager\n\t} );\n\t\n\t\n\t$.extend( DataTable.ext.classes, {\n\t\t\"sTable\": \"dataTable\",\n\t\t\"sNoFooter\": \"no-footer\",\n\t\n\t\t/* Paging buttons */\n\t\t\"sPageButton\": \"paginate_button\",\n\t\t\"sPageButtonActive\": \"current\",\n\t\t\"sPageButtonDisabled\": \"disabled\",\n\t\n\t\t/* Striping classes */\n\t\t\"sStripeOdd\": \"odd\",\n\t\t\"sStripeEven\": \"even\",\n\t\n\t\t/* Empty row */\n\t\t\"sRowEmpty\": \"dataTables_empty\",\n\t\n\t\t/* Features */\n\t\t\"sWrapper\": \"dataTables_wrapper\",\n\t\t\"sFilter\": \"dataTables_filter\",\n\t\t\"sInfo\": \"dataTables_info\",\n\t\t\"sPaging\": \"dataTables_paginate paging_\", /* Note that the type is postfixed */\n\t\t\"sLength\": \"dataTables_length\",\n\t\t\"sProcessing\": \"dataTables_processing\",\n\t\n\t\t/* Sorting */\n\t\t\"sSortAsc\": \"sorting_asc\",\n\t\t\"sSortDesc\": \"sorting_desc\",\n\t\t\"sSortable\": \"sorting\", /* Sortable in both directions */\n\t\t\"sSortableAsc\": \"sorting_asc_disabled\",\n\t\t\"sSortableDesc\": \"sorting_desc_disabled\",\n\t\t\"sSortableNone\": \"sorting_disabled\",\n\t\t\"sSortColumn\": \"sorting_\", /* Note that an int is postfixed for the sorting order */\n\t\n\t\t/* Filtering */\n\t\t\"sFilterInput\": \"\",\n\t\n\t\t/* Page length */\n\t\t\"sLengthSelect\": \"\",\n\t\n\t\t/* Scrolling */\n\t\t\"sScrollWrapper\": \"dataTables_scroll\",\n\t\t\"sScrollHead\": \"dataTables_scrollHead\",\n\t\t\"sScrollHeadInner\": \"dataTables_scrollHeadInner\",\n\t\t\"sScrollBody\": \"dataTables_scrollBody\",\n\t\t\"sScrollFoot\": \"dataTables_scrollFoot\",\n\t\t\"sScrollFootInner\": \"dataTables_scrollFootInner\",\n\t\n\t\t/* Misc */\n\t\t\"sHeaderTH\": \"\",\n\t\t\"sFooterTH\": \"\",\n\t\n\t\t// Deprecated\n\t\t\"sSortJUIAsc\": \"\",\n\t\t\"sSortJUIDesc\": \"\",\n\t\t\"sSortJUI\": \"\",\n\t\t\"sSortJUIAscAllowed\": \"\",\n\t\t\"sSortJUIDescAllowed\": \"\",\n\t\t\"sSortJUIWrapper\": \"\",\n\t\t\"sSortIcon\": \"\",\n\t\t\"sJUIHeader\": \"\",\n\t\t\"sJUIFooter\": \"\"\n\t} );\n\t\n\t\n\t(function() {\n\t\n\t// Reused strings for better compression. Closure compiler appears to have a\n\t// weird edge case where it is trying to expand strings rather than use the\n\t// variable version. This results in about 200 bytes being added, for very\n\t// little preference benefit since it this run on script load only.\n\tvar _empty = '';\n\t_empty = '';\n\t\n\tvar _stateDefault = _empty + 'ui-state-default';\n\tvar _sortIcon     = _empty + 'css_right ui-icon ui-icon-';\n\tvar _headerFooter = _empty + 'fg-toolbar ui-toolbar ui-widget-header ui-helper-clearfix';\n\t\n\t$.extend( DataTable.ext.oJUIClasses, DataTable.ext.classes, {\n\t\t/* Full numbers paging buttons */\n\t\t\"sPageButton\":         \"fg-button ui-button \"+_stateDefault,\n\t\t\"sPageButtonActive\":   \"ui-state-disabled\",\n\t\t\"sPageButtonDisabled\": \"ui-state-disabled\",\n\t\n\t\t/* Features */\n\t\t\"sPaging\": \"dataTables_paginate fg-buttonset ui-buttonset fg-buttonset-multi \"+\n\t\t\t\"ui-buttonset-multi paging_\", /* Note that the type is postfixed */\n\t\n\t\t/* Sorting */\n\t\t\"sSortAsc\":            _stateDefault+\" sorting_asc\",\n\t\t\"sSortDesc\":           _stateDefault+\" sorting_desc\",\n\t\t\"sSortable\":           _stateDefault+\" sorting\",\n\t\t\"sSortableAsc\":        _stateDefault+\" sorting_asc_disabled\",\n\t\t\"sSortableDesc\":       _stateDefault+\" sorting_desc_disabled\",\n\t\t\"sSortableNone\":       _stateDefault+\" sorting_disabled\",\n\t\t\"sSortJUIAsc\":         _sortIcon+\"triangle-1-n\",\n\t\t\"sSortJUIDesc\":        _sortIcon+\"triangle-1-s\",\n\t\t\"sSortJUI\":            _sortIcon+\"carat-2-n-s\",\n\t\t\"sSortJUIAscAllowed\":  _sortIcon+\"carat-1-n\",\n\t\t\"sSortJUIDescAllowed\": _sortIcon+\"carat-1-s\",\n\t\t\"sSortJUIWrapper\":     \"DataTables_sort_wrapper\",\n\t\t\"sSortIcon\":           \"DataTables_sort_icon\",\n\t\n\t\t/* Scrolling */\n\t\t\"sScrollHead\": \"dataTables_scrollHead \"+_stateDefault,\n\t\t\"sScrollFoot\": \"dataTables_scrollFoot \"+_stateDefault,\n\t\n\t\t/* Misc */\n\t\t\"sHeaderTH\":  _stateDefault,\n\t\t\"sFooterTH\":  _stateDefault,\n\t\t\"sJUIHeader\": _headerFooter+\" ui-corner-tl ui-corner-tr\",\n\t\t\"sJUIFooter\": _headerFooter+\" ui-corner-bl ui-corner-br\"\n\t} );\n\t\n\t}());\n\t\n\t\n\t\n\tvar extPagination = DataTable.ext.pager;\n\t\n\tfunction _numbers ( page, pages ) {\n\t\tvar\n\t\t\tnumbers = [],\n\t\t\tbuttons = extPagination.numbers_length,\n\t\t\thalf = Math.floor( buttons / 2 ),\n\t\t\ti = 1;\n\t\n\t\tif ( pages <= buttons ) {\n\t\t\tnumbers = _range( 0, pages );\n\t\t}\n\t\telse if ( page <= half ) {\n\t\t\tnumbers = _range( 0, buttons-2 );\n\t\t\tnumbers.push( 'ellipsis' );\n\t\t\tnumbers.push( pages-1 );\n\t\t}\n\t\telse if ( page >= pages - 1 - half ) {\n\t\t\tnumbers = _range( pages-(buttons-2), pages );\n\t\t\tnumbers.splice( 0, 0, 'ellipsis' ); // no unshift in ie6\n\t\t\tnumbers.splice( 0, 0, 0 );\n\t\t}\n\t\telse {\n\t\t\tnumbers = _range( page-half+2, page+half-1 );\n\t\t\tnumbers.push( 'ellipsis' );\n\t\t\tnumbers.push( pages-1 );\n\t\t\tnumbers.splice( 0, 0, 'ellipsis' );\n\t\t\tnumbers.splice( 0, 0, 0 );\n\t\t}\n\t\n\t\tnumbers.DT_el = 'span';\n\t\treturn numbers;\n\t}\n\t\n\t\n\t$.extend( extPagination, {\n\t\tsimple: function ( page, pages ) {\n\t\t\treturn [ 'previous', 'next' ];\n\t\t},\n\t\n\t\tfull: function ( page, pages ) {\n\t\t\treturn [  'first', 'previous', 'next', 'last' ];\n\t\t},\n\t\n\t\tnumbers: function ( page, pages ) {\n\t\t\treturn [ _numbers(page, pages) ];\n\t\t},\n\t\n\t\tsimple_numbers: function ( page, pages ) {\n\t\t\treturn [ 'previous', _numbers(page, pages), 'next' ];\n\t\t},\n\t\n\t\tfull_numbers: function ( page, pages ) {\n\t\t\treturn [ 'first', 'previous', _numbers(page, pages), 'next', 'last' ];\n\t\t},\n\t\n\t\t// For testing and plug-ins to use\n\t\t_numbers: _numbers,\n\t\n\t\t// Number of number buttons (including ellipsis) to show. _Must be odd!_\n\t\tnumbers_length: 7\n\t} );\n\t\n\t\n\t$.extend( true, DataTable.ext.renderer, {\n\t\tpageButton: {\n\t\t\t_: function ( settings, host, idx, buttons, page, pages ) {\n\t\t\t\tvar classes = settings.oClasses;\n\t\t\t\tvar lang = settings.oLanguage.oPaginate;\n\t\t\t\tvar aria = settings.oLanguage.oAria.paginate || {};\n\t\t\t\tvar btnDisplay, btnClass, counter=0;\n\t\n\t\t\t\tvar attach = function( container, buttons ) {\n\t\t\t\t\tvar i, ien, node, button;\n\t\t\t\t\tvar clickHandler = function ( e ) {\n\t\t\t\t\t\t_fnPageChange( settings, e.data.action, true );\n\t\t\t\t\t};\n\t\n\t\t\t\t\tfor ( i=0, ien=buttons.length ; i<ien ; i++ ) {\n\t\t\t\t\t\tbutton = buttons[i];\n\t\n\t\t\t\t\t\tif ( $.isArray( button ) ) {\n\t\t\t\t\t\t\tvar inner = $( '<'+(button.DT_el || 'div')+'/>' )\n\t\t\t\t\t\t\t\t.appendTo( container );\n\t\t\t\t\t\t\tattach( inner, button );\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tbtnDisplay = null;\n\t\t\t\t\t\t\tbtnClass = '';\n\t\n\t\t\t\t\t\t\tswitch ( button ) {\n\t\t\t\t\t\t\t\tcase 'ellipsis':\n\t\t\t\t\t\t\t\t\tcontainer.append('<span class=\"ellipsis\">&#x2026;</span>');\n\t\t\t\t\t\t\t\t\tbreak;\n\t\n\t\t\t\t\t\t\t\tcase 'first':\n\t\t\t\t\t\t\t\t\tbtnDisplay = lang.sFirst;\n\t\t\t\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t\t\t\t'' : ' '+classes.sPageButtonDisabled);\n\t\t\t\t\t\t\t\t\tbreak;\n\t\n\t\t\t\t\t\t\t\tcase 'previous':\n\t\t\t\t\t\t\t\t\tbtnDisplay = lang.sPrevious;\n\t\t\t\t\t\t\t\t\tbtnClass = button + (page > 0 ?\n\t\t\t\t\t\t\t\t\t\t'' : ' '+classes.sPageButtonDisabled);\n\t\t\t\t\t\t\t\t\tbreak;\n\t\n\t\t\t\t\t\t\t\tcase 'next':\n\t\t\t\t\t\t\t\t\tbtnDisplay = lang.sNext;\n\t\t\t\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t\t\t\t'' : ' '+classes.sPageButtonDisabled);\n\t\t\t\t\t\t\t\t\tbreak;\n\t\n\t\t\t\t\t\t\t\tcase 'last':\n\t\t\t\t\t\t\t\t\tbtnDisplay = lang.sLast;\n\t\t\t\t\t\t\t\t\tbtnClass = button + (page < pages-1 ?\n\t\t\t\t\t\t\t\t\t\t'' : ' '+classes.sPageButtonDisabled);\n\t\t\t\t\t\t\t\t\tbreak;\n\t\n\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\tbtnDisplay = button + 1;\n\t\t\t\t\t\t\t\t\tbtnClass = page === button ?\n\t\t\t\t\t\t\t\t\t\tclasses.sPageButtonActive : '';\n\t\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\n\t\t\t\t\t\t\tif ( btnDisplay !== null ) {\n\t\t\t\t\t\t\t\tnode = $('<a>', {\n\t\t\t\t\t\t\t\t\t\t'class': classes.sPageButton+' '+btnClass,\n\t\t\t\t\t\t\t\t\t\t'aria-controls': settings.sTableId,\n\t\t\t\t\t\t\t\t\t\t'aria-label': aria[ button ],\n\t\t\t\t\t\t\t\t\t\t'data-dt-idx': counter,\n\t\t\t\t\t\t\t\t\t\t'tabindex': settings.iTabIndex,\n\t\t\t\t\t\t\t\t\t\t'id': idx === 0 && typeof button === 'string' ?\n\t\t\t\t\t\t\t\t\t\t\tsettings.sTableId +'_'+ button :\n\t\t\t\t\t\t\t\t\t\t\tnull\n\t\t\t\t\t\t\t\t\t} )\n\t\t\t\t\t\t\t\t\t.html( btnDisplay )\n\t\t\t\t\t\t\t\t\t.appendTo( container );\n\t\n\t\t\t\t\t\t\t\t_fnBindAction(\n\t\t\t\t\t\t\t\t\tnode, {action: button}, clickHandler\n\t\t\t\t\t\t\t\t);\n\t\n\t\t\t\t\t\t\t\tcounter++;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t};\n\t\n\t\t\t\t// IE9 throws an 'unknown error' if document.activeElement is used\n\t\t\t\t// inside an iframe or frame. Try / catch the error. Not good for\n\t\t\t\t// accessibility, but neither are frames.\n\t\t\t\tvar activeEl;\n\t\n\t\t\t\ttry {\n\t\t\t\t\t// Because this approach is destroying and recreating the paging\n\t\t\t\t\t// elements, focus is lost on the select button which is bad for\n\t\t\t\t\t// accessibility. So we want to restore focus once the draw has\n\t\t\t\t\t// completed\n\t\t\t\t\tactiveEl = $(host).find(document.activeElement).data('dt-idx');\n\t\t\t\t}\n\t\t\t\tcatch (e) {}\n\t\n\t\t\t\tattach( $(host).empty(), buttons );\n\t\n\t\t\t\tif ( activeEl ) {\n\t\t\t\t\t$(host).find( '[data-dt-idx='+activeEl+']' ).focus();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} );\n\t\n\t\n\t\n\t// Built in type detection. See model.ext.aTypes for information about\n\t// what is required from this methods.\n\t$.extend( DataTable.ext.type.detect, [\n\t\t// Plain numbers - first since V8 detects some plain numbers as dates\n\t\t// e.g. Date.parse('55') (but not all, e.g. Date.parse('22')...).\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\tvar decimal = settings.oLanguage.sDecimal;\n\t\t\treturn _isNumber( d, decimal ) ? 'num'+decimal : null;\n\t\t},\n\t\n\t\t// Dates (only those recognised by the browser's Date.parse)\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\t// V8 will remove any unknown characters at the start and end of the\n\t\t\t// expression, leading to false matches such as `$245.12` or `10%` being\n\t\t\t// a valid date. See forum thread 18941 for detail.\n\t\t\tif ( d && !(d instanceof Date) && ( ! _re_date_start.test(d) || ! _re_date_end.test(d) ) ) {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t\tvar parsed = Date.parse(d);\n\t\t\treturn (parsed !== null && !isNaN(parsed)) || _empty(d) ? 'date' : null;\n\t\t},\n\t\n\t\t// Formatted numbers\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\tvar decimal = settings.oLanguage.sDecimal;\n\t\t\treturn _isNumber( d, decimal, true ) ? 'num-fmt'+decimal : null;\n\t\t},\n\t\n\t\t// HTML numeric\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\tvar decimal = settings.oLanguage.sDecimal;\n\t\t\treturn _htmlNumeric( d, decimal ) ? 'html-num'+decimal : null;\n\t\t},\n\t\n\t\t// HTML numeric, formatted\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\tvar decimal = settings.oLanguage.sDecimal;\n\t\t\treturn _htmlNumeric( d, decimal, true ) ? 'html-num-fmt'+decimal : null;\n\t\t},\n\t\n\t\t// HTML (this is strict checking - there must be html)\n\t\tfunction ( d, settings )\n\t\t{\n\t\t\treturn _empty( d ) || (typeof d === 'string' && d.indexOf('<') !== -1) ?\n\t\t\t\t'html' : null;\n\t\t}\n\t] );\n\t\n\t\n\t\n\t// Filter formatting functions. See model.ext.ofnSearch for information about\n\t// what is required from these methods.\n\t// \n\t// Note that additional search methods are added for the html numbers and\n\t// html formatted numbers by `_addNumericSort()` when we know what the decimal\n\t// place is\n\t\n\t\n\t$.extend( DataTable.ext.type.search, {\n\t\thtml: function ( data ) {\n\t\t\treturn _empty(data) ?\n\t\t\t\tdata :\n\t\t\t\ttypeof data === 'string' ?\n\t\t\t\t\tdata\n\t\t\t\t\t\t.replace( _re_new_lines, \" \" )\n\t\t\t\t\t\t.replace( _re_html, \"\" ) :\n\t\t\t\t\t'';\n\t\t},\n\t\n\t\tstring: function ( data ) {\n\t\t\treturn _empty(data) ?\n\t\t\t\tdata :\n\t\t\t\ttypeof data === 'string' ?\n\t\t\t\t\tdata.replace( _re_new_lines, \" \" ) :\n\t\t\t\t\tdata;\n\t\t}\n\t} );\n\t\n\t\n\t\n\tvar __numericReplace = function ( d, decimalPlace, re1, re2 ) {\n\t\tif ( d !== 0 && (!d || d === '-') ) {\n\t\t\treturn -Infinity;\n\t\t}\n\t\n\t\t// If a decimal place other than `.` is used, it needs to be given to the\n\t\t// function so we can detect it and replace with a `.` which is the only\n\t\t// decimal place Javascript recognises - it is not locale aware.\n\t\tif ( decimalPlace ) {\n\t\t\td = _numToDecimal( d, decimalPlace );\n\t\t}\n\t\n\t\tif ( d.replace ) {\n\t\t\tif ( re1 ) {\n\t\t\t\td = d.replace( re1, '' );\n\t\t\t}\n\t\n\t\t\tif ( re2 ) {\n\t\t\t\td = d.replace( re2, '' );\n\t\t\t}\n\t\t}\n\t\n\t\treturn d * 1;\n\t};\n\t\n\t\n\t// Add the numeric 'deformatting' functions for sorting and search. This is done\n\t// in a function to provide an easy ability for the language options to add\n\t// additional methods if a non-period decimal place is used.\n\tfunction _addNumericSort ( decimalPlace ) {\n\t\t$.each(\n\t\t\t{\n\t\t\t\t// Plain numbers\n\t\t\t\t\"num\": function ( d ) {\n\t\t\t\t\treturn __numericReplace( d, decimalPlace );\n\t\t\t\t},\n\t\n\t\t\t\t// Formatted numbers\n\t\t\t\t\"num-fmt\": function ( d ) {\n\t\t\t\t\treturn __numericReplace( d, decimalPlace, _re_formatted_numeric );\n\t\t\t\t},\n\t\n\t\t\t\t// HTML numeric\n\t\t\t\t\"html-num\": function ( d ) {\n\t\t\t\t\treturn __numericReplace( d, decimalPlace, _re_html );\n\t\t\t\t},\n\t\n\t\t\t\t// HTML numeric, formatted\n\t\t\t\t\"html-num-fmt\": function ( d ) {\n\t\t\t\t\treturn __numericReplace( d, decimalPlace, _re_html, _re_formatted_numeric );\n\t\t\t\t}\n\t\t\t},\n\t\t\tfunction ( key, fn ) {\n\t\t\t\t// Add the ordering method\n\t\t\t\t_ext.type.order[ key+decimalPlace+'-pre' ] = fn;\n\t\n\t\t\t\t// For HTML types add a search formatter that will strip the HTML\n\t\t\t\tif ( key.match(/^html\\-/) ) {\n\t\t\t\t\t_ext.type.search[ key+decimalPlace ] = _ext.type.search.html;\n\t\t\t\t}\n\t\t\t}\n\t\t);\n\t}\n\t\n\t\n\t// Default sort methods\n\t$.extend( _ext.type.order, {\n\t\t// Dates\n\t\t\"date-pre\": function ( d ) {\n\t\t\treturn Date.parse( d ) || 0;\n\t\t},\n\t\n\t\t// html\n\t\t\"html-pre\": function ( a ) {\n\t\t\treturn _empty(a) ?\n\t\t\t\t'' :\n\t\t\t\ta.replace ?\n\t\t\t\t\ta.replace( /<.*?>/g, \"\" ).toLowerCase() :\n\t\t\t\t\ta+'';\n\t\t},\n\t\n\t\t// string\n\t\t\"string-pre\": function ( a ) {\n\t\t\t// This is a little complex, but faster than always calling toString,\n\t\t\t// http://jsperf.com/tostring-v-check\n\t\t\treturn _empty(a) ?\n\t\t\t\t'' :\n\t\t\t\ttypeof a === 'string' ?\n\t\t\t\t\ta.toLowerCase() :\n\t\t\t\t\t! a.toString ?\n\t\t\t\t\t\t'' :\n\t\t\t\t\t\ta.toString();\n\t\t},\n\t\n\t\t// string-asc and -desc are retained only for compatibility with the old\n\t\t// sort methods\n\t\t\"string-asc\": function ( x, y ) {\n\t\t\treturn ((x < y) ? -1 : ((x > y) ? 1 : 0));\n\t\t},\n\t\n\t\t\"string-desc\": function ( x, y ) {\n\t\t\treturn ((x < y) ? 1 : ((x > y) ? -1 : 0));\n\t\t}\n\t} );\n\t\n\t\n\t// Numeric sorting types - order doesn't matter here\n\t_addNumericSort( '' );\n\t\n\t\n\t$.extend( true, DataTable.ext.renderer, {\n\t\theader: {\n\t\t\t_: function ( settings, cell, column, classes ) {\n\t\t\t\t// No additional mark-up required\n\t\t\t\t// Attach a sort listener to update on sort - note that using the\n\t\t\t\t// `DT` namespace will allow the event to be removed automatically\n\t\t\t\t// on destroy, while the `dt` namespaced event is the one we are\n\t\t\t\t// listening for\n\t\t\t\t$(settings.nTable).on( 'order.dt.DT', function ( e, ctx, sorting, columns ) {\n\t\t\t\t\tif ( settings !== ctx ) { // need to check this this is the host\n\t\t\t\t\t\treturn;               // table, not a nested one\n\t\t\t\t\t}\n\t\n\t\t\t\t\tvar colIdx = column.idx;\n\t\n\t\t\t\t\tcell\n\t\t\t\t\t\t.removeClass(\n\t\t\t\t\t\t\tcolumn.sSortingClass +' '+\n\t\t\t\t\t\t\tclasses.sSortAsc +' '+\n\t\t\t\t\t\t\tclasses.sSortDesc\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.addClass( columns[ colIdx ] == 'asc' ?\n\t\t\t\t\t\t\tclasses.sSortAsc : columns[ colIdx ] == 'desc' ?\n\t\t\t\t\t\t\t\tclasses.sSortDesc :\n\t\t\t\t\t\t\t\tcolumn.sSortingClass\n\t\t\t\t\t\t);\n\t\t\t\t} );\n\t\t\t},\n\t\n\t\t\tjqueryui: function ( settings, cell, column, classes ) {\n\t\t\t\t$('<div/>')\n\t\t\t\t\t.addClass( classes.sSortJUIWrapper )\n\t\t\t\t\t.append( cell.contents() )\n\t\t\t\t\t.append( $('<span/>')\n\t\t\t\t\t\t.addClass( classes.sSortIcon+' '+column.sSortingClassJUI )\n\t\t\t\t\t)\n\t\t\t\t\t.appendTo( cell );\n\t\n\t\t\t\t// Attach a sort listener to update on sort\n\t\t\t\t$(settings.nTable).on( 'order.dt.DT', function ( e, ctx, sorting, columns ) {\n\t\t\t\t\tif ( settings !== ctx ) {\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\t\n\t\t\t\t\tvar colIdx = column.idx;\n\t\n\t\t\t\t\tcell\n\t\t\t\t\t\t.removeClass( classes.sSortAsc +\" \"+classes.sSortDesc )\n\t\t\t\t\t\t.addClass( columns[ colIdx ] == 'asc' ?\n\t\t\t\t\t\t\tclasses.sSortAsc : columns[ colIdx ] == 'desc' ?\n\t\t\t\t\t\t\t\tclasses.sSortDesc :\n\t\t\t\t\t\t\t\tcolumn.sSortingClass\n\t\t\t\t\t\t);\n\t\n\t\t\t\t\tcell\n\t\t\t\t\t\t.find( 'span.'+classes.sSortIcon )\n\t\t\t\t\t\t.removeClass(\n\t\t\t\t\t\t\tclasses.sSortJUIAsc +\" \"+\n\t\t\t\t\t\t\tclasses.sSortJUIDesc +\" \"+\n\t\t\t\t\t\t\tclasses.sSortJUI +\" \"+\n\t\t\t\t\t\t\tclasses.sSortJUIAscAllowed +\" \"+\n\t\t\t\t\t\t\tclasses.sSortJUIDescAllowed\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.addClass( columns[ colIdx ] == 'asc' ?\n\t\t\t\t\t\t\tclasses.sSortJUIAsc : columns[ colIdx ] == 'desc' ?\n\t\t\t\t\t\t\t\tclasses.sSortJUIDesc :\n\t\t\t\t\t\t\t\tcolumn.sSortingClassJUI\n\t\t\t\t\t\t);\n\t\t\t\t} );\n\t\t\t}\n\t\t}\n\t} );\n\t\n\t/*\n\t * Public helper functions. These aren't used internally by DataTables, or\n\t * called by any of the options passed into DataTables, but they can be used\n\t * externally by developers working with DataTables. They are helper functions\n\t * to make working with DataTables a little bit easier.\n\t */\n\t\n\tvar __htmlEscapeEntities = function ( d ) {\n\t\treturn typeof d === 'string' ?\n\t\t\td.replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/\"/g, '&quot;') :\n\t\t\td;\n\t};\n\t\n\t/**\n\t * Helpers for `columns.render`.\n\t *\n\t * The options defined here can be used with the `columns.render` initialisation\n\t * option to provide a display renderer. The following functions are defined:\n\t *\n\t * * `number` - Will format numeric data (defined by `columns.data`) for\n\t *   display, retaining the original unformatted data for sorting and filtering.\n\t *   It takes 5 parameters:\n\t *   * `string` - Thousands grouping separator\n\t *   * `string` - Decimal point indicator\n\t *   * `integer` - Number of decimal points to show\n\t *   * `string` (optional) - Prefix.\n\t *   * `string` (optional) - Postfix (/suffix).\n\t * * `text` - Escape HTML to help prevent XSS attacks. It has no optional\n\t *   parameters.\n\t *\n\t * @example\n\t *   // Column definition using the number renderer\n\t *   {\n\t *     data: \"salary\",\n\t *     render: $.fn.dataTable.render.number( '\\'', '.', 0, '$' )\n\t *   }\n\t *\n\t * @namespace\n\t */\n\tDataTable.render = {\n\t\tnumber: function ( thousands, decimal, precision, prefix, postfix ) {\n\t\t\treturn {\n\t\t\t\tdisplay: function ( d ) {\n\t\t\t\t\tif ( typeof d !== 'number' && typeof d !== 'string' ) {\n\t\t\t\t\t\treturn d;\n\t\t\t\t\t}\n\t\n\t\t\t\t\tvar negative = d < 0 ? '-' : '';\n\t\t\t\t\tvar flo = parseFloat( d );\n\t\n\t\t\t\t\t// If NaN then there isn't much formatting that we can do - just\n\t\t\t\t\t// return immediately, escaping any HTML (this was supposed to\n\t\t\t\t\t// be a number after all)\n\t\t\t\t\tif ( isNaN( flo ) ) {\n\t\t\t\t\t\treturn __htmlEscapeEntities( d );\n\t\t\t\t\t}\n\t\n\t\t\t\t\td = Math.abs( flo );\n\t\n\t\t\t\t\tvar intPart = parseInt( d, 10 );\n\t\t\t\t\tvar floatPart = precision ?\n\t\t\t\t\t\tdecimal+(d - intPart).toFixed( precision ).substring( 2 ):\n\t\t\t\t\t\t'';\n\t\n\t\t\t\t\treturn negative + (prefix||'') +\n\t\t\t\t\t\tintPart.toString().replace(\n\t\t\t\t\t\t\t/\\B(?=(\\d{3})+(?!\\d))/g, thousands\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tfloatPart +\n\t\t\t\t\t\t(postfix||'');\n\t\t\t\t}\n\t\t\t};\n\t\t},\n\t\n\t\ttext: function () {\n\t\t\treturn {\n\t\t\t\tdisplay: __htmlEscapeEntities\n\t\t\t};\n\t\t}\n\t};\n\t\n\t\n\t/*\n\t * This is really a good bit rubbish this method of exposing the internal methods\n\t * publicly... - To be fixed in 2.0 using methods on the prototype\n\t */\n\t\n\t\n\t/**\n\t * Create a wrapper function for exporting an internal functions to an external API.\n\t *  @param {string} fn API function name\n\t *  @returns {function} wrapped function\n\t *  @memberof DataTable#internal\n\t */\n\tfunction _fnExternApiFunc (fn)\n\t{\n\t\treturn function() {\n\t\t\tvar args = [_fnSettingsFromNode( this[DataTable.ext.iApiIndex] )].concat(\n\t\t\t\tArray.prototype.slice.call(arguments)\n\t\t\t);\n\t\t\treturn DataTable.ext.internal[fn].apply( this, args );\n\t\t};\n\t}\n\t\n\t\n\t/**\n\t * Reference to internal functions for use by plug-in developers. Note that\n\t * these methods are references to internal functions and are considered to be\n\t * private. If you use these methods, be aware that they are liable to change\n\t * between versions.\n\t *  @namespace\n\t */\n\t$.extend( DataTable.ext.internal, {\n\t\t_fnExternApiFunc: _fnExternApiFunc,\n\t\t_fnBuildAjax: _fnBuildAjax,\n\t\t_fnAjaxUpdate: _fnAjaxUpdate,\n\t\t_fnAjaxParameters: _fnAjaxParameters,\n\t\t_fnAjaxUpdateDraw: _fnAjaxUpdateDraw,\n\t\t_fnAjaxDataSrc: _fnAjaxDataSrc,\n\t\t_fnAddColumn: _fnAddColumn,\n\t\t_fnColumnOptions: _fnColumnOptions,\n\t\t_fnAdjustColumnSizing: _fnAdjustColumnSizing,\n\t\t_fnVisibleToColumnIndex: _fnVisibleToColumnIndex,\n\t\t_fnColumnIndexToVisible: _fnColumnIndexToVisible,\n\t\t_fnVisbleColumns: _fnVisbleColumns,\n\t\t_fnGetColumns: _fnGetColumns,\n\t\t_fnColumnTypes: _fnColumnTypes,\n\t\t_fnApplyColumnDefs: _fnApplyColumnDefs,\n\t\t_fnHungarianMap: _fnHungarianMap,\n\t\t_fnCamelToHungarian: _fnCamelToHungarian,\n\t\t_fnLanguageCompat: _fnLanguageCompat,\n\t\t_fnBrowserDetect: _fnBrowserDetect,\n\t\t_fnAddData: _fnAddData,\n\t\t_fnAddTr: _fnAddTr,\n\t\t_fnNodeToDataIndex: _fnNodeToDataIndex,\n\t\t_fnNodeToColumnIndex: _fnNodeToColumnIndex,\n\t\t_fnGetCellData: _fnGetCellData,\n\t\t_fnSetCellData: _fnSetCellData,\n\t\t_fnSplitObjNotation: _fnSplitObjNotation,\n\t\t_fnGetObjectDataFn: _fnGetObjectDataFn,\n\t\t_fnSetObjectDataFn: _fnSetObjectDataFn,\n\t\t_fnGetDataMaster: _fnGetDataMaster,\n\t\t_fnClearTable: _fnClearTable,\n\t\t_fnDeleteIndex: _fnDeleteIndex,\n\t\t_fnInvalidate: _fnInvalidate,\n\t\t_fnGetRowElements: _fnGetRowElements,\n\t\t_fnCreateTr: _fnCreateTr,\n\t\t_fnBuildHead: _fnBuildHead,\n\t\t_fnDrawHead: _fnDrawHead,\n\t\t_fnDraw: _fnDraw,\n\t\t_fnReDraw: _fnReDraw,\n\t\t_fnAddOptionsHtml: _fnAddOptionsHtml,\n\t\t_fnDetectHeader: _fnDetectHeader,\n\t\t_fnGetUniqueThs: _fnGetUniqueThs,\n\t\t_fnFeatureHtmlFilter: _fnFeatureHtmlFilter,\n\t\t_fnFilterComplete: _fnFilterComplete,\n\t\t_fnFilterCustom: _fnFilterCustom,\n\t\t_fnFilterColumn: _fnFilterColumn,\n\t\t_fnFilter: _fnFilter,\n\t\t_fnFilterCreateSearch: _fnFilterCreateSearch,\n\t\t_fnEscapeRegex: _fnEscapeRegex,\n\t\t_fnFilterData: _fnFilterData,\n\t\t_fnFeatureHtmlInfo: _fnFeatureHtmlInfo,\n\t\t_fnUpdateInfo: _fnUpdateInfo,\n\t\t_fnInfoMacros: _fnInfoMacros,\n\t\t_fnInitialise: _fnInitialise,\n\t\t_fnInitComplete: _fnInitComplete,\n\t\t_fnLengthChange: _fnLengthChange,\n\t\t_fnFeatureHtmlLength: _fnFeatureHtmlLength,\n\t\t_fnFeatureHtmlPaginate: _fnFeatureHtmlPaginate,\n\t\t_fnPageChange: _fnPageChange,\n\t\t_fnFeatureHtmlProcessing: _fnFeatureHtmlProcessing,\n\t\t_fnProcessingDisplay: _fnProcessingDisplay,\n\t\t_fnFeatureHtmlTable: _fnFeatureHtmlTable,\n\t\t_fnScrollDraw: _fnScrollDraw,\n\t\t_fnApplyToChildren: _fnApplyToChildren,\n\t\t_fnCalculateColumnWidths: _fnCalculateColumnWidths,\n\t\t_fnThrottle: _fnThrottle,\n\t\t_fnConvertToWidth: _fnConvertToWidth,\n\t\t_fnGetWidestNode: _fnGetWidestNode,\n\t\t_fnGetMaxLenString: _fnGetMaxLenString,\n\t\t_fnStringToCss: _fnStringToCss,\n\t\t_fnSortFlatten: _fnSortFlatten,\n\t\t_fnSort: _fnSort,\n\t\t_fnSortAria: _fnSortAria,\n\t\t_fnSortListener: _fnSortListener,\n\t\t_fnSortAttachListener: _fnSortAttachListener,\n\t\t_fnSortingClasses: _fnSortingClasses,\n\t\t_fnSortData: _fnSortData,\n\t\t_fnSaveState: _fnSaveState,\n\t\t_fnLoadState: _fnLoadState,\n\t\t_fnSettingsFromNode: _fnSettingsFromNode,\n\t\t_fnLog: _fnLog,\n\t\t_fnMap: _fnMap,\n\t\t_fnBindAction: _fnBindAction,\n\t\t_fnCallbackReg: _fnCallbackReg,\n\t\t_fnCallbackFire: _fnCallbackFire,\n\t\t_fnLengthOverflow: _fnLengthOverflow,\n\t\t_fnRenderer: _fnRenderer,\n\t\t_fnDataSource: _fnDataSource,\n\t\t_fnRowAttributes: _fnRowAttributes,\n\t\t_fnCalculateEnd: function () {} // Used by a lot of plug-ins, but redundant\n\t\t                                // in 1.10, so this dead-end function is\n\t\t                                // added to prevent errors\n\t} );\n\t\n\n\t// jQuery access\n\t$.fn.dataTable = DataTable;\n\n\t// Provide access to the host jQuery object (circular reference)\n\tDataTable.$ = $;\n\n\t// Legacy aliases\n\t$.fn.dataTableSettings = DataTable.settings;\n\t$.fn.dataTableExt = DataTable.ext;\n\n\t// With a capital `D` we return a DataTables API instance rather than a\n\t// jQuery object\n\t$.fn.DataTable = function ( opts ) {\n\t\treturn $(this).dataTable( opts ).api();\n\t};\n\n\t// All properties that are available to $.fn.dataTable should also be\n\t// available on $.fn.DataTable\n\t$.each( DataTable, function ( prop, val ) {\n\t\t$.fn.DataTable[ prop ] = val;\n\t} );\n\n\n\t// Information about events fired by DataTables - for documentation.\n\t/**\n\t * Draw event, fired whenever the table is redrawn on the page, at the same\n\t * point as fnDrawCallback. This may be useful for binding events or\n\t * performing calculations when the table is altered at all.\n\t *  @name DataTable#draw.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * Search event, fired when the searching applied to the table (using the\n\t * built-in global search, or column filters) is altered.\n\t *  @name DataTable#search.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * Page change event, fired when the paging of the table is altered.\n\t *  @name DataTable#page.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * Order event, fired when the ordering applied to the table is altered.\n\t *  @name DataTable#order.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * DataTables initialisation complete event, fired when the table is fully\n\t * drawn, including Ajax data loaded, if Ajax data is required.\n\t *  @name DataTable#init.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} oSettings DataTables settings object\n\t *  @param {object} json The JSON object request from the server - only\n\t *    present if client-side Ajax sourced data is used</li></ol>\n\t */\n\n\t/**\n\t * State save event, fired when the table has changed state a new state save\n\t * is required. This event allows modification of the state saving object\n\t * prior to actually doing the save, including addition or other state\n\t * properties (for plug-ins) or modification of a DataTables core property.\n\t *  @name DataTable#stateSaveParams.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} oSettings DataTables settings object\n\t *  @param {object} json The state information to be saved\n\t */\n\n\t/**\n\t * State load event, fired when the table is loading state from the stored\n\t * data, but prior to the settings object being modified by the saved state\n\t * - allowing modification of the saved state is required or loading of\n\t * state for a plug-in.\n\t *  @name DataTable#stateLoadParams.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} oSettings DataTables settings object\n\t *  @param {object} json The saved state information\n\t */\n\n\t/**\n\t * State loaded event, fired when state has been loaded from stored data and\n\t * the settings object has been modified by the loaded data.\n\t *  @name DataTable#stateLoaded.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} oSettings DataTables settings object\n\t *  @param {object} json The saved state information\n\t */\n\n\t/**\n\t * Processing event, fired when DataTables is doing some kind of processing\n\t * (be it, order, searcg or anything else). It can be used to indicate to\n\t * the end user that there is something happening, or that something has\n\t * finished.\n\t *  @name DataTable#processing.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} oSettings DataTables settings object\n\t *  @param {boolean} bShow Flag for if DataTables is doing processing or not\n\t */\n\n\t/**\n\t * Ajax (XHR) event, fired whenever an Ajax request is completed from a\n\t * request to made to the server for new data. This event is called before\n\t * DataTables processed the returned data, so it can also be used to pre-\n\t * process the data returned from the server, if needed.\n\t *\n\t * Note that this trigger is called in `fnServerData`, if you override\n\t * `fnServerData` and which to use this event, you need to trigger it in you\n\t * success function.\n\t *  @name DataTable#xhr.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t *  @param {object} json JSON returned from the server\n\t *\n\t *  @example\n\t *     // Use a custom property returned from the server in another DOM element\n\t *     $('#table').dataTable().on('xhr.dt', function (e, settings, json) {\n\t *       $('#status').html( json.status );\n\t *     } );\n\t *\n\t *  @example\n\t *     // Pre-process the data returned from the server\n\t *     $('#table').dataTable().on('xhr.dt', function (e, settings, json) {\n\t *       for ( var i=0, ien=json.aaData.length ; i<ien ; i++ ) {\n\t *         json.aaData[i].sum = json.aaData[i].one + json.aaData[i].two;\n\t *       }\n\t *       // Note no return - manipulate the data directly in the JSON object.\n\t *     } );\n\t */\n\n\t/**\n\t * Destroy event, fired when the DataTable is destroyed by calling fnDestroy\n\t * or passing the bDestroy:true parameter in the initialisation object. This\n\t * can be used to remove bound events, added DOM nodes, etc.\n\t *  @name DataTable#destroy.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * Page length change event, fired when number of records to show on each\n\t * page (the length) is changed.\n\t *  @name DataTable#length.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t *  @param {integer} len New length\n\t */\n\n\t/**\n\t * Column sizing has changed.\n\t *  @name DataTable#column-sizing.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t */\n\n\t/**\n\t * Column visibility has changed.\n\t *  @name DataTable#column-visibility.dt\n\t *  @event\n\t *  @param {event} e jQuery event object\n\t *  @param {object} o DataTables settings object {@link DataTable.models.oSettings}\n\t *  @param {int} column Column index\n\t *  @param {bool} vis `false` if column now hidden, or `true` if visible\n\t */\n\n\treturn $.fn.dataTable;\n}));\n"
  },
  {
    "path": "web_gui/gui_v3/js/jquery.js",
    "content": "/*! jQuery v1.12.0 | (c) jQuery Foundation | jquery.org/license */\n!function(a,b){\"object\"==typeof module&&\"object\"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error(\"jQuery requires a window with a document\");return b(a)}:b(a)}(\"undefined\"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m=\"1.12.0\",n=function(a,b){return new n.fn.init(a,b)},o=/^[\\s\\uFEFF\\xA0]+|[\\s\\uFEFF\\xA0]+$/g,p=/^-ms-/,q=/-([\\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:\"\",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for(\"boolean\"==typeof g&&(j=g,g=arguments[h]||{},h++),\"object\"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&&n.isArray(a)?a:[]):f=a&&n.isPlainObject(a)?a:{},g[d]=n.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},n.extend({expando:\"jQuery\"+(m+Math.random()).replace(/\\D/g,\"\"),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return\"function\"===n.type(a)},isArray:Array.isArray||function(a){return\"array\"===n.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||\"object\"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!k.call(a,\"constructor\")&&!k.call(a.constructor.prototype,\"isPrototypeOf\"))return!1}catch(c){return!1}if(!l.ownFirst)for(b in a)return k.call(a,b);for(b in a);return void 0===b||k.call(a,b)},type:function(a){return null==a?a+\"\":\"object\"==typeof a||\"function\"==typeof a?i[j.call(a)]||\"object\":typeof a},globalEval:function(b){b&&n.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(p,\"ms-\").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?\"\":(a+\"\").replace(o,\"\")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,\"string\"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(h)return h.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return\"string\"==typeof b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=e.call(arguments,2),d=function(){return a.apply(b||this,c.concat(e.call(arguments)))},d.guid=a.guid=a.guid||n.guid++,d):void 0},now:function(){return+new Date},support:l}),\"function\"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each(\"Boolean Number String Function Array Date RegExp Object Error Symbol\".split(\" \"),function(a,b){i[\"[object \"+b+\"]\"]=b.toLowerCase()});function s(a){var b=!!a&&\"length\"in a&&a.length,c=n.type(a);return\"function\"===c||n.isWindow(a)?!1:\"array\"===c||0===b||\"number\"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u=\"sizzle\"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K=\"checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped\",L=\"[\\\\x20\\\\t\\\\r\\\\n\\\\f]\",M=\"(?:\\\\\\\\.|[\\\\w-]|[^\\\\x00-\\\\xa0])+\",N=\"\\\\[\"+L+\"*(\"+M+\")(?:\"+L+\"*([*^$|!~]?=)\"+L+\"*(?:'((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\"|(\"+M+\"))|)\"+L+\"*\\\\]\",O=\":(\"+M+\")(?:\\\\((('((?:\\\\\\\\.|[^\\\\\\\\'])*)'|\\\"((?:\\\\\\\\.|[^\\\\\\\\\\\"])*)\\\")|((?:\\\\\\\\.|[^\\\\\\\\()[\\\\]]|\"+N+\")*)|.*)\\\\)|)\",P=new RegExp(L+\"+\",\"g\"),Q=new RegExp(\"^\"+L+\"+|((?:^|[^\\\\\\\\])(?:\\\\\\\\.)*)\"+L+\"+$\",\"g\"),R=new RegExp(\"^\"+L+\"*,\"+L+\"*\"),S=new RegExp(\"^\"+L+\"*([>+~]|\"+L+\")\"+L+\"*\"),T=new RegExp(\"=\"+L+\"*([^\\\\]'\\\"]*?)\"+L+\"*\\\\]\",\"g\"),U=new RegExp(O),V=new RegExp(\"^\"+M+\"$\"),W={ID:new RegExp(\"^#(\"+M+\")\"),CLASS:new RegExp(\"^\\\\.(\"+M+\")\"),TAG:new RegExp(\"^(\"+M+\"|[*])\"),ATTR:new RegExp(\"^\"+N),PSEUDO:new RegExp(\"^\"+O),CHILD:new RegExp(\"^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\\\(\"+L+\"*(even|odd|(([+-]|)(\\\\d*)n|)\"+L+\"*(?:([+-]|)\"+L+\"*(\\\\d+)|))\"+L+\"*\\\\)|)\",\"i\"),bool:new RegExp(\"^(?:\"+K+\")$\",\"i\"),needsContext:new RegExp(\"^\"+L+\"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\\\(\"+L+\"*((?:-\\\\d)?\\\\d*)\"+L+\"*\\\\)|)(?=[^-]|$)\",\"i\")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\\d$/i,Z=/^[^{]+\\{\\s*\\[native \\w/,$=/^(?:#([\\w-]+)|(\\w+)|\\.([\\w-]+))$/,_=/[+~]/,aa=/'|\\\\/g,ba=new RegExp(\"\\\\\\\\([\\\\da-f]{1,6}\"+L+\"?|(\"+L+\")|.)\",\"ig\"),ca=function(a,b,c){var d=\"0x\"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],\"string\"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+\" \"]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if(\"object\"!==b.nodeName.toLowerCase()){(k=b.getAttribute(\"id\"))?k=k.replace(aa,\"\\\\$&\"):b.setAttribute(\"id\",k=u),r=g(a),h=r.length,l=V.test(k)?\"#\"+k:\"[id='\"+k+\"']\";while(h--)r[h]=l+\" \"+qa(r[h]);s=r.join(\",\"),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute(\"id\")}}}return i(a.replace(Q,\"$1\"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+\" \")>d.cacheLength&&delete b[a.shift()],b[c+\" \"]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement(\"div\");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split(\"|\"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return\"input\"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return(\"input\"===c||\"button\"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&\"undefined\"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?\"HTML\"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener(\"unload\",da,!1):e.attachEvent&&e.attachEvent(\"onunload\",da)),c.attributes=ia(function(a){return a.className=\"i\",!a.getAttribute(\"className\")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment(\"\")),!a.getElementsByTagName(\"*\").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(\"undefined\"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute(\"id\")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c=\"undefined\"!=typeof a.getAttributeNode&&a.getAttributeNode(\"id\");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return\"undefined\"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if(\"*\"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return\"undefined\"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML=\"<a id='\"+u+\"'></a><select id='\"+u+\"-\\r\\\\' msallowcapture=''><option selected=''></option></select>\",a.querySelectorAll(\"[msallowcapture^='']\").length&&q.push(\"[*^$]=\"+L+\"*(?:''|\\\"\\\")\"),a.querySelectorAll(\"[selected]\").length||q.push(\"\\\\[\"+L+\"*(?:value|\"+K+\")\"),a.querySelectorAll(\"[id~=\"+u+\"-]\").length||q.push(\"~=\"),a.querySelectorAll(\":checked\").length||q.push(\":checked\"),a.querySelectorAll(\"a#\"+u+\"+*\").length||q.push(\".#.+[+~]\")}),ia(function(a){var b=n.createElement(\"input\");b.setAttribute(\"type\",\"hidden\"),a.appendChild(b).setAttribute(\"name\",\"D\"),a.querySelectorAll(\"[name=d]\").length&&q.push(\"name\"+L+\"*[*^$|!~]?=\"),a.querySelectorAll(\":enabled\").length||q.push(\":enabled\",\":disabled\"),a.querySelectorAll(\"*,:x\"),q.push(\",.*:\")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,\"div\"),s.call(a,\"[s!='']:x\"),r.push(\"!=\",O)}),q=q.length&&new RegExp(q.join(\"|\")),r=r.length&&new RegExp(r.join(\"|\")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,\"='$1']\"),c.matchesSelector&&p&&!A[b+\" \"]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error(\"Syntax error, unrecognized expression: \"+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c=\"\",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if(\"string\"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{\">\":{dir:\"parentNode\",first:!0},\" \":{dir:\"parentNode\"},\"+\":{dir:\"previousSibling\",first:!0},\"~\":{dir:\"previousSibling\"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||\"\").replace(ba,ca),\"~=\"===a[2]&&(a[3]=\" \"+a[3]+\" \"),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),\"nth\"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*(\"even\"===a[3]||\"odd\"===a[3])),a[5]=+(a[7]+a[8]||\"odd\"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||\"\":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(\")\",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return\"*\"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+\" \"];return b||(b=new RegExp(\"(^|\"+L+\")\"+a+\"(\"+L+\"|$)\"))&&y(a,function(a){return b.test(\"string\"==typeof a.className&&a.className||\"undefined\"!=typeof a.getAttribute&&a.getAttribute(\"class\")||\"\")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?\"!=\"===b:b?(e+=\"\",\"=\"===b?e===c:\"!=\"===b?e!==c:\"^=\"===b?c&&0===e.indexOf(c):\"*=\"===b?c&&e.indexOf(c)>-1:\"$=\"===b?c&&e.slice(-c.length)===c:\"~=\"===b?(\" \"+e.replace(P,\" \")+\" \").indexOf(c)>-1:\"|=\"===b?e===c||e.slice(0,c.length+1)===c+\"-\":!1):!0}},CHILD:function(a,b,c,d,e){var f=\"nth\"!==a.slice(0,3),g=\"last\"!==a.slice(-4),h=\"of-type\"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?\"nextSibling\":\"previousSibling\",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p=\"only\"===a&&!o&&\"nextSibling\"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error(\"unsupported pseudo: \"+a);return e[u]?e(b):e.length>1?(c=[a,a,\"\",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,\"$1\"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||\"\")||fa.error(\"unsupported lang: \"+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute(\"xml:lang\")||b.getAttribute(\"lang\"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+\"-\");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&!!a.checked||\"option\"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return\"input\"===b&&\"button\"===a.type||\"button\"===b},text:function(a){var b;return\"input\"===a.nodeName.toLowerCase()&&\"text\"===a.type&&(null==(b=a.getAttribute(\"type\"))||\"text\"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=la(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=ma(b);function pa(){}pa.prototype=d.filters=d.pseudos,d.setFilters=new pa,g=fa.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+\" \"];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=R.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=S.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(Q,\" \")}),h=h.slice(c.length));for(g in d.filter)!(e=W[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fa.error(a):z(a,i).slice(0)};function qa(a){for(var b=0,c=a.length,d=\"\";c>b;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&\"parentNode\"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||\"*\",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[\" \"],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:\" \"===a[i-2].type?\"*\":\"\"})).replace(Q,\"$1\"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s=\"0\",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG(\"*\",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+\" \"];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n=\"function\"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&\"ID\"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split(\"\").sort(B).join(\"\")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement(\"div\"))}),ia(function(a){return a.innerHTML=\"<a href='#'></a>\",\"#\"===a.firstChild.getAttribute(\"href\")})||ja(\"type|href|height|width\",function(a,b,c){return c?void 0:a.getAttribute(b,\"type\"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML=\"<input/>\",a.firstChild.setAttribute(\"value\",\"\"),\"\"===a.firstChild.getAttribute(\"value\")})||ja(\"value\",function(a,b,c){return c||\"input\"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute(\"disabled\")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[\":\"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\\w-]+)\\s*\\/?>(?:<\\/\\1>|)$/,y=/^.[^:#\\[\\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if(\"string\"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return n.inArray(a,b)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=\":not(\"+a+\")\"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if(\"string\"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;e>b;b++)if(n.contains(d[b],this))return!0}));for(b=0;e>b;b++)n.find(a,d[b],c);return c=this.pushStack(e>1?n.unique(c):c),c.selector=this.selector?this.selector+\" \"+a:a,c},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,\"string\"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\\s*(<[\\w\\W]+>)[^>]*|#([\\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,\"string\"==typeof a){if(e=\"<\"===a.charAt(0)&&\">\"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}if(f=d.getElementById(e[2]),f&&f.parentNode){if(f.id!==e[2])return A.find(a);this.length=1,this[0]=f}return this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?\"undefined\"!=typeof c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b,c=n(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(n.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||\"string\"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?\"string\"==typeof a?n.inArray(this[0],n(a)):n.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,\"parentNode\")},parentsUntil:function(a,b,c){return u(a,\"parentNode\",c)},next:function(a){return F(a,\"nextSibling\")},prev:function(a){return F(a,\"previousSibling\")},nextAll:function(a){return u(a,\"nextSibling\")},prevAll:function(a){return u(a,\"previousSibling\")},nextUntil:function(a,b,c){return u(a,\"nextSibling\",c)},prevUntil:function(a,b,c){return u(a,\"previousSibling\",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return n.nodeName(a,\"iframe\")?a.contentDocument||a.contentWindow.document:n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return\"Until\"!==a.slice(-5)&&(d=c),d&&\"string\"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||(e=n.uniqueSort(e)),D.test(a)&&(e=e.reverse())),this.pushStack(e)}});var G=/\\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a=\"string\"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:\"\")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){n.each(b,function(b,c){n.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&\"string\"!==n.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return n.each(arguments,function(a,b){var c;while((c=n.inArray(b,f,c))>-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c=\"\",this},disabled:function(){return!f},lock:function(){return e=!0,c||j.disable(),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[[\"resolve\",\"done\",n.Callbacks(\"once memory\"),\"resolved\"],[\"reject\",\"fail\",n.Callbacks(\"once memory\"),\"rejected\"],[\"notify\",\"progress\",n.Callbacks(\"memory\")]],c=\"pending\",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+\"With\"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+\"With\"](this===e?d:this,arguments),this},e[f[0]+\"With\"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler(\"ready\"),n(d).off(\"ready\"))))}});function J(){d.addEventListener?(d.removeEventListener(\"DOMContentLoaded\",K),a.removeEventListener(\"load\",K)):(d.detachEvent(\"onreadystatechange\",K),a.detachEvent(\"onload\",K))}function K(){(d.addEventListener||\"load\"===a.event.type||\"complete\"===d.readyState)&&(J(),n.ready())}n.ready.promise=function(b){if(!I)if(I=n.Deferred(),\"complete\"===d.readyState)a.setTimeout(n.ready);else if(d.addEventListener)d.addEventListener(\"DOMContentLoaded\",K),a.addEventListener(\"load\",K);else{d.attachEvent(\"onreadystatechange\",K),a.attachEvent(\"onload\",K);var c=!1;try{c=null==a.frameElement&&d.documentElement}catch(e){}c&&c.doScroll&&!function f(){if(!n.isReady){try{c.doScroll(\"left\")}catch(b){return a.setTimeout(f,50)}J(),n.ready()}}()}return I.promise(b)},n.ready.promise();var L;for(L in n(l))break;l.ownFirst=\"0\"===L,l.inlineBlockNeedsLayout=!1,n(function(){var a,b,c,e;c=d.getElementsByTagName(\"body\")[0],c&&c.style&&(b=d.createElement(\"div\"),e=d.createElement(\"div\"),e.style.cssText=\"position:absolute;border:0;width:0;height:0;top:0;left:-9999px\",c.appendChild(e).appendChild(b),\"undefined\"!=typeof b.style.zoom&&(b.style.cssText=\"display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1\",l.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(e))}),function(){var a=d.createElement(\"div\");l.deleteExpando=!0;try{delete a.test}catch(b){l.deleteExpando=!1}a=null}();var M=function(a){var b=n.noData[(a.nodeName+\" \").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute(\"classid\")===b},N=/^(?:\\{[\\w\\W]*\\}|\\[[\\w\\W]*\\])$/,O=/([A-Z])/g;function P(a,b,c){if(void 0===c&&1===a.nodeType){var d=\"data-\"+b.replace(O,\"-$1\").toLowerCase();if(c=a.getAttribute(d),\"string\"==typeof c){try{c=\"true\"===c?!0:\"false\"===c?!1:\"null\"===c?null:+c+\"\"===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}n.data(a,b,c)}else c=void 0}return c}function Q(a){var b;for(b in a)if((\"data\"!==b||!n.isEmptyObject(a[b]))&&\"toJSON\"!==b)return!1;\nreturn!0}function R(a,b,d,e){if(M(a)){var f,g,h=n.expando,i=a.nodeType,j=i?n.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||\"string\"!=typeof b)return k||(k=i?a[h]=c.pop()||n.guid++:h),j[k]||(j[k]=i?{}:{toJSON:n.noop}),(\"object\"==typeof b||\"function\"==typeof b)&&(e?j[k]=n.extend(j[k],b):j[k].data=n.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[n.camelCase(b)]=d),\"string\"==typeof b?(f=g[b],null==f&&(f=g[n.camelCase(b)])):f=g,f}}function S(a,b,c){if(M(a)){var d,e,f=a.nodeType,g=f?n.cache:a,h=f?a[n.expando]:n.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){n.isArray(b)?b=b.concat(n.map(b,n.camelCase)):b in d?b=[b]:(b=n.camelCase(b),b=b in d?[b]:b.split(\" \")),e=b.length;while(e--)delete d[b[e]];if(c?!Q(d):!n.isEmptyObject(d))return}(c||(delete g[h].data,Q(g[h])))&&(f?n.cleanData([a],!0):l.deleteExpando||g!=g.window?delete g[h]:g[h]=void 0)}}}n.extend({cache:{},noData:{\"applet \":!0,\"embed \":!0,\"object \":\"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000\"},hasData:function(a){return a=a.nodeType?n.cache[a[n.expando]]:a[n.expando],!!a&&!Q(a)},data:function(a,b,c){return R(a,b,c)},removeData:function(a,b){return S(a,b)},_data:function(a,b,c){return R(a,b,c,!0)},_removeData:function(a,b){return S(a,b,!0)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=n.data(f),1===f.nodeType&&!n._data(f,\"parsedAttrs\"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf(\"data-\")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));n._data(f,\"parsedAttrs\",!0)}return e}return\"object\"==typeof a?this.each(function(){n.data(this,a)}):arguments.length>1?this.each(function(){n.data(this,a,b)}):f?P(f,a,n.data(f,a)):void 0},removeData:function(a){return this.each(function(){n.removeData(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||\"fx\")+\"queue\",d=n._data(a,b),c&&(!d||n.isArray(c)?d=n._data(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||\"fx\";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};\"inprogress\"===e&&(e=c.shift(),d--),e&&(\"fx\"===b&&c.unshift(\"inprogress\"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+\"queueHooks\";return n._data(a,c)||n._data(a,c,{empty:n.Callbacks(\"once memory\").add(function(){n._removeData(a,b+\"queue\"),n._removeData(a,c)})})}}),n.fn.extend({queue:function(a,b){var c=2;return\"string\"!=typeof a&&(b=a,a=\"fx\",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),\"fx\"===a&&\"inprogress\"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||\"fx\",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};\"string\"!=typeof a&&(b=a,a=void 0),a=a||\"fx\";while(g--)c=n._data(f[g],a+\"queueHooks\"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}}),function(){var a;l.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,e;return c=d.getElementsByTagName(\"body\")[0],c&&c.style?(b=d.createElement(\"div\"),e=d.createElement(\"div\"),e.style.cssText=\"position:absolute;border:0;width:0;height:0;top:0;left:-9999px\",c.appendChild(e).appendChild(b),\"undefined\"!=typeof b.style.zoom&&(b.style.cssText=\"-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1\",b.appendChild(d.createElement(\"div\")).style.width=\"5px\",a=3!==b.offsetWidth),c.removeChild(e),a):void 0}}();var T=/[+-]?(?:\\d*\\.|)\\d+(?:[eE][+-]?\\d+|)/.source,U=new RegExp(\"^(?:([+-])=|)(\"+T+\")([a-z%]*)$\",\"i\"),V=[\"Top\",\"Right\",\"Bottom\",\"Left\"],W=function(a,b){return a=b||a,\"none\"===n.css(a,\"display\")||!n.contains(a.ownerDocument,a)};function X(a,b,c,d){var e,f=1,g=20,h=d?function(){return d.cur()}:function(){return n.css(a,b,\"\")},i=h(),j=c&&c[3]||(n.cssNumber[b]?\"\":\"px\"),k=(n.cssNumber[b]||\"px\"!==j&&+i)&&U.exec(n.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||\".5\",k/=f,n.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var Y=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if(\"object\"===n.type(c)){e=!0;for(h in c)Y(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},Z=/^(?:checkbox|radio)$/i,$=/<([\\w:-]+)/,_=/^$|\\/(?:java|ecma)script/i,aa=/^\\s+/,ba=\"abbr|article|aside|audio|bdi|canvas|data|datalist|details|dialog|figcaption|figure|footer|header|hgroup|main|mark|meter|nav|output|picture|progress|section|summary|template|time|video\";function ca(a){var b=ba.split(\"|\"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}!function(){var a=d.createElement(\"div\"),b=d.createDocumentFragment(),c=d.createElement(\"input\");a.innerHTML=\"  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>\",l.leadingWhitespace=3===a.firstChild.nodeType,l.tbody=!a.getElementsByTagName(\"tbody\").length,l.htmlSerialize=!!a.getElementsByTagName(\"link\").length,l.html5Clone=\"<:nav></:nav>\"!==d.createElement(\"nav\").cloneNode(!0).outerHTML,c.type=\"checkbox\",c.checked=!0,b.appendChild(c),l.appendChecked=c.checked,a.innerHTML=\"<textarea>x</textarea>\",l.noCloneChecked=!!a.cloneNode(!0).lastChild.defaultValue,b.appendChild(a),c=d.createElement(\"input\"),c.setAttribute(\"type\",\"radio\"),c.setAttribute(\"checked\",\"checked\"),c.setAttribute(\"name\",\"t\"),a.appendChild(c),l.checkClone=a.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!!a.addEventListener,a[n.expando]=1,l.attributes=!a.getAttribute(n.expando)}();var da={option:[1,\"<select multiple='multiple'>\",\"</select>\"],legend:[1,\"<fieldset>\",\"</fieldset>\"],area:[1,\"<map>\",\"</map>\"],param:[1,\"<object>\",\"</object>\"],thead:[1,\"<table>\",\"</table>\"],tr:[2,\"<table><tbody>\",\"</tbody></table>\"],col:[2,\"<table><tbody></tbody><colgroup>\",\"</colgroup></table>\"],td:[3,\"<table><tbody><tr>\",\"</tr></tbody></table>\"],_default:l.htmlSerialize?[0,\"\",\"\"]:[1,\"X<div>\",\"</div>\"]};da.optgroup=da.option,da.tbody=da.tfoot=da.colgroup=da.caption=da.thead,da.th=da.td;function ea(a,b){var c,d,e=0,f=\"undefined\"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||\"*\"):\"undefined\"!=typeof a.querySelectorAll?a.querySelectorAll(b||\"*\"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,ea(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function fa(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,\"globalEval\",!b||n._data(b[d],\"globalEval\"))}var ga=/<|&#?\\w+;/,ha=/<tbody/i;function ia(a){Z.test(a.type)&&(a.defaultChecked=a.checked)}function ja(a,b,c,d,e){for(var f,g,h,i,j,k,m,o=a.length,p=ca(b),q=[],r=0;o>r;r++)if(g=a[r],g||0===g)if(\"object\"===n.type(g))n.merge(q,g.nodeType?[g]:g);else if(ga.test(g)){i=i||p.appendChild(b.createElement(\"div\")),j=($.exec(g)||[\"\",\"\"])[1].toLowerCase(),m=da[j]||da._default,i.innerHTML=m[1]+n.htmlPrefilter(g)+m[2],f=m[0];while(f--)i=i.lastChild;if(!l.leadingWhitespace&&aa.test(g)&&q.push(b.createTextNode(aa.exec(g)[0])),!l.tbody){g=\"table\"!==j||ha.test(g)?\"<table>\"!==m[1]||ha.test(g)?0:i:i.firstChild,f=g&&g.childNodes.length;while(f--)n.nodeName(k=g.childNodes[f],\"tbody\")&&!k.childNodes.length&&g.removeChild(k)}n.merge(q,i.childNodes),i.textContent=\"\";while(i.firstChild)i.removeChild(i.firstChild);i=p.lastChild}else q.push(b.createTextNode(g));i&&p.removeChild(i),l.appendChecked||n.grep(ea(q,\"input\"),ia),r=0;while(g=q[r++])if(d&&n.inArray(g,d)>-1)e&&e.push(g);else if(h=n.contains(g.ownerDocument,g),i=ea(p.appendChild(g),\"script\"),h&&fa(i),c){f=0;while(g=i[f++])_.test(g.type||\"\")&&c.push(g)}return i=null,p}!function(){var b,c,e=d.createElement(\"div\");for(b in{submit:!0,change:!0,focusin:!0})c=\"on\"+b,(l[b]=c in a)||(e.setAttribute(c,\"t\"),l[b]=e.attributes[c].expando===!1);e=null}();var ka=/^(?:input|select|textarea)$/i,la=/^key/,ma=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,na=/^(?:focusinfocus|focusoutblur)$/,oa=/^([^.]*)(?:\\.(.+)|)/;function pa(){return!0}function qa(){return!1}function ra(){try{return d.activeElement}catch(a){}}function sa(a,b,c,d,e,f){var g,h;if(\"object\"==typeof b){\"string\"!=typeof c&&(d=d||c,c=void 0);for(h in b)sa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&(\"string\"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=qa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return\"undefined\"==typeof n||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||\"\").match(G)||[\"\"],h=b.length;while(h--)f=oa.exec(b[h])||[],o=q=f[1],p=(f[2]||\"\").split(\".\").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(\".\")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent(\"on\"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||\"\").match(G)||[\"\"],j=b.length;while(j--)if(h=oa.exec(b[j])||[],o=q=h[1],p=(h[2]||\"\").split(\".\").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp(\"(^|\\\\.)\"+p.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&(\"**\"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,\"events\"))}},trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,\"type\")?b.type:b,r=k.call(b,\"namespace\")?b.namespace.split(\".\"):[];if(i=m=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!na.test(q+n.event.triggered)&&(q.indexOf(\".\")>-1&&(r=q.split(\".\"),q=r.shift(),r.sort()),h=q.indexOf(\":\")<0&&\"on\"+q,b=b[n.expando]?b:new n.Event(q,\"object\"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join(\".\"),b.rnamespace=b.namespace?new RegExp(\"(^|\\\\.)\"+r.join(\"\\\\.(?:.*\\\\.|)\")+\"(\\\\.|$)\"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),l=n.event.special[q]||{},f||!l.trigger||l.trigger.apply(e,c)!==!1)){if(!f&&!l.noBubble&&!n.isWindow(e)){for(j=l.delegateType||q,na.test(j+q)||(i=i.parentNode);i;i=i.parentNode)p.push(i),m=i;m===(e.ownerDocument||d)&&p.push(m.defaultView||m.parentWindow||a)}o=0;while((i=p[o++])&&!b.isPropagationStopped())b.type=o>1?j:l.bindType||q,g=(n._data(i,\"events\")||{})[b.type]&&n._data(i,\"handle\"),g&&g.apply(i,c),g=h&&i[h],g&&g.apply&&M(i)&&(b.result=g.apply(i,c),b.result===!1&&b.preventDefault());if(b.type=q,!f&&!b.isDefaultPrevented()&&(!l._default||l._default.apply(p.pop(),c)===!1)&&M(e)&&h&&e[q]&&!n.isWindow(e)){m=e[h],m&&(e[h]=null),n.event.triggered=q;try{e[q]()}catch(s){}n.event.triggered=void 0,m&&(e[h]=m)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(n._data(this,\"events\")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.rnamespace||a.rnamespace.test(g.namespace))&&(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(\"click\"!==a.type||isNaN(a.button)||a.button<1))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||\"click\"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+\" \",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[n.expando])return a;var b,c,e,f=a.type,g=a,h=this.fixHooks[f];h||(this.fixHooks[f]=h=ma.test(f)?this.mouseHooks:la.test(f)?this.keyHooks:{}),e=h.props?this.props.concat(h.props):this.props,a=new n.Event(g),b=e.length;while(b--)c=e[b],a[c]=g[c];return a.target||(a.target=g.srcElement||d),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,h.filter?h.filter(a,g):a},props:\"altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which\".split(\" \"),fixHooks:{},keyHooks:{props:\"char charCode key keyCode\".split(\" \"),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:\"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement\".split(\" \"),filter:function(a,b){var c,e,f,g=b.button,h=b.fromElement;return null==a.pageX&&null!=b.clientX&&(e=a.target.ownerDocument||d,f=e.documentElement,c=e.body,a.pageX=b.clientX+(f&&f.scrollLeft||c&&c.scrollLeft||0)-(f&&f.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(f&&f.scrollTop||c&&c.scrollTop||0)-(f&&f.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&h&&(a.relatedTarget=h===a.target?b.toElement:h),a.which||void 0===g||(a.which=1&g?1:2&g?3:4&g?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==ra()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:\"focusin\"},blur:{trigger:function(){return this===ra()&&this.blur?(this.blur(),!1):void 0},delegateType:\"focusout\"},click:{trigger:function(){return n.nodeName(this,\"input\")&&\"checkbox\"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,\"a\")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c){var d=n.extend(new n.Event,c,{type:a,isSimulated:!0});n.event.trigger(d,null,b),d.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=d.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)}:function(a,b,c){var d=\"on\"+b;a.detachEvent&&(\"undefined\"==typeof a[d]&&(a[d]=null),a.detachEvent(d,c))},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?pa:qa):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={constructor:n.Event,isDefaultPrevented:qa,isPropagationStopped:qa,isImmediatePropagationStopped:qa,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=pa,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=pa,a&&!this.isSimulated&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=pa,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:\"mouseover\",mouseleave:\"mouseout\",pointerenter:\"pointerover\",pointerleave:\"pointerout\"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!n.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),l.submit||(n.event.special.submit={setup:function(){return n.nodeName(this,\"form\")?!1:void n.event.add(this,\"click._submit keypress._submit\",function(a){var b=a.target,c=n.nodeName(b,\"input\")||n.nodeName(b,\"button\")?n.prop(b,\"form\"):void 0;c&&!n._data(c,\"submit\")&&(n.event.add(c,\"submit._submit\",function(a){a._submitBubble=!0}),n._data(c,\"submit\",!0))})},postDispatch:function(a){a._submitBubble&&(delete a._submitBubble,this.parentNode&&!a.isTrigger&&n.event.simulate(\"submit\",this.parentNode,a))},teardown:function(){return n.nodeName(this,\"form\")?!1:void n.event.remove(this,\"._submit\")}}),l.change||(n.event.special.change={setup:function(){return ka.test(this.nodeName)?((\"checkbox\"===this.type||\"radio\"===this.type)&&(n.event.add(this,\"propertychange._change\",function(a){\"checked\"===a.originalEvent.propertyName&&(this._justChanged=!0)}),n.event.add(this,\"click._change\",function(a){this._justChanged&&!a.isTrigger&&(this._justChanged=!1),n.event.simulate(\"change\",this,a)})),!1):void n.event.add(this,\"beforeactivate._change\",function(a){var b=a.target;ka.test(b.nodeName)&&!n._data(b,\"change\")&&(n.event.add(b,\"change._change\",function(a){!this.parentNode||a.isSimulated||a.isTrigger||n.event.simulate(\"change\",this.parentNode,a)}),n._data(b,\"change\",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||\"radio\"!==b.type&&\"checkbox\"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return n.event.remove(this,\"._change\"),!ka.test(this.nodeName)}}),l.focusin||n.each({focus:\"focusin\",blur:\"focusout\"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a))};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=n._data(d,b);e||d.addEventListener(a,c,!0),n._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=n._data(d,b)-1;e?n._data(d,b,e):(d.removeEventListener(a,c,!0),n._removeData(d,b))}}}),n.fn.extend({on:function(a,b,c,d){return sa(this,a,b,c,d)},one:function(a,b,c,d){return sa(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+\".\"+d.namespace:d.origType,d.selector,d.handler),this;if(\"object\"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||\"function\"==typeof b)&&(c=b,b=void 0),c===!1&&(c=qa),this.each(function(){n.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var ta=/ jQuery\\d+=\"(?:null|\\d+)\"/g,ua=new RegExp(\"<(?:\"+ba+\")[\\\\s/>]\",\"i\"),va=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\\w:-]+)[^>]*)\\/>/gi,wa=/<script|<style|<link/i,xa=/checked\\s*(?:[^=]|=\\s*.checked.)/i,ya=/^true\\/(.*)/,za=/^\\s*<!(?:\\[CDATA\\[|--)|(?:\\]\\]|--)>\\s*$/g,Aa=ca(d),Ba=Aa.appendChild(d.createElement(\"div\"));function Ca(a,b){return n.nodeName(a,\"table\")&&n.nodeName(11!==b.nodeType?b:b.firstChild,\"tr\")?a.getElementsByTagName(\"tbody\")[0]||a.appendChild(a.ownerDocument.createElement(\"tbody\")):a}function Da(a){return a.type=(null!==n.find.attr(a,\"type\"))+\"/\"+a.type,a}function Ea(a){var b=ya.exec(a.type);return b?a.type=b[1]:a.removeAttribute(\"type\"),a}function Fa(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Ga(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}\"script\"===c&&b.text!==a.text?(Da(b).text=a.text,Ea(b)):\"object\"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):\"input\"===c&&Z.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):\"option\"===c?b.defaultSelected=b.selected=a.defaultSelected:(\"input\"===c||\"textarea\"===c)&&(b.defaultValue=a.defaultValue)}}function Ha(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&\"string\"==typeof q&&!l.checkClone&&xa.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(o&&(k=ja(b,a[0].ownerDocument,!1,a,d),e=k.firstChild,1===k.childNodes.length&&(k=e),e||d)){for(i=n.map(ea(k,\"script\"),Da),h=i.length;o>m;m++)g=k,m!==p&&(g=n.clone(g,!0,!0),h&&n.merge(i,ea(g,\"script\"))),c.call(a[m],g,m);if(h)for(j=i[i.length-1].ownerDocument,n.map(i,Ea),m=0;h>m;m++)g=i[m],_.test(g.type||\"\")&&!n._data(g,\"globalEval\")&&n.contains(j,g)&&(g.src?n._evalUrl&&n._evalUrl(g.src):n.globalEval((g.text||g.textContent||g.innerHTML||\"\").replace(za,\"\")));k=e=null}return a}function Ia(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(ea(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&fa(ea(d,\"script\")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(va,\"<$1></$2>\")},clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!ua.test(\"<\"+a.nodeName+\">\")?f=a.cloneNode(!0):(Ba.innerHTML=a.outerHTML,Ba.removeChild(f=Ba.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=ea(f),h=ea(a),g=0;null!=(e=h[g]);++g)d[g]&&Ga(e,d[g]);if(b)if(c)for(h=h||ea(a),d=d||ea(f),g=0;null!=(e=h[g]);g++)Fa(e,d[g]);else Fa(a,f);return d=ea(f,\"script\"),d.length>0&&fa(d,!i&&ea(a,\"script\")),d=h=e=null,f},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.attributes,m=n.event.special;null!=(d=a[h]);h++)if((b||M(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k||\"undefined\"==typeof d.removeAttribute?d[i]=void 0:d.removeAttribute(i),c.push(f))}}}),n.fn.extend({domManip:Ha,detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return Y(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||d).createTextNode(a))},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(ea(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,\"select\")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return Y(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(ta,\"\"):void 0;if(\"string\"==typeof a&&!wa.test(a)&&(l.htmlSerialize||!ua.test(a))&&(l.leadingWhitespace||!aa.test(a))&&!da[($.exec(a)||[\"\",\"\"])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ea(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ha(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(ea(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:\"append\",prependTo:\"prepend\",insertBefore:\"before\",insertAfter:\"after\",replaceAll:\"replaceWith\"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],f=n(a),h=f.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(f[d])[b](c),g.apply(e,c.get());return this.pushStack(e)}});var Ja,Ka={HTML:\"block\",BODY:\"block\"};function La(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],\"display\");return c.detach(),d}function Ma(a){var b=d,c=Ka[a];return c||(c=La(a,b),\"none\"!==c&&c||(Ja=(Ja||n(\"<iframe frameborder='0' width='0' height='0'/>\")).appendTo(b.documentElement),b=(Ja[0].contentWindow||Ja[0].contentDocument).document,b.write(),b.close(),c=La(a,b),Ja.detach()),Ka[a]=c),c}var Na=/^margin/,Oa=new RegExp(\"^(\"+T+\")(?!px)[a-z%]+$\",\"i\"),Pa=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e},Qa=d.documentElement;!function(){var b,c,e,f,g,h,i=d.createElement(\"div\"),j=d.createElement(\"div\");if(j.style){j.style.cssText=\"float:left;opacity:.5\",l.opacity=\"0.5\"===j.style.opacity,l.cssFloat=!!j.style.cssFloat,j.style.backgroundClip=\"content-box\",j.cloneNode(!0).style.backgroundClip=\"\",l.clearCloneStyle=\"content-box\"===j.style.backgroundClip,i=d.createElement(\"div\"),i.style.cssText=\"border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute\",j.innerHTML=\"\",i.appendChild(j),l.boxSizing=\"\"===j.style.boxSizing||\"\"===j.style.MozBoxSizing||\"\"===j.style.WebkitBoxSizing,n.extend(l,{reliableHiddenOffsets:function(){return null==b&&k(),f},boxSizingReliable:function(){return null==b&&k(),e},pixelMarginRight:function(){return null==b&&k(),c},pixelPosition:function(){return null==b&&k(),b},reliableMarginRight:function(){return null==b&&k(),g},reliableMarginLeft:function(){return null==b&&k(),h}});function k(){var k,l,m=d.documentElement;m.appendChild(i),j.style.cssText=\"-webkit-box-sizing:border-box;box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%\",b=e=h=!1,c=g=!0,a.getComputedStyle&&(l=a.getComputedStyle(j),b=\"1%\"!==(l||{}).top,h=\"2px\"===(l||{}).marginLeft,e=\"4px\"===(l||{width:\"4px\"}).width,j.style.marginRight=\"50%\",c=\"4px\"===(l||{marginRight:\"4px\"}).marginRight,k=j.appendChild(d.createElement(\"div\")),k.style.cssText=j.style.cssText=\"-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0\",k.style.marginRight=k.style.width=\"0\",j.style.width=\"1px\",g=!parseFloat((a.getComputedStyle(k)||{}).marginRight),j.removeChild(k)),j.style.display=\"none\",f=0===j.getClientRects().length,f&&(j.style.display=\"\",j.innerHTML=\"<table><tr><td></td><td>t</td></tr></table>\",k=j.getElementsByTagName(\"td\"),k[0].style.cssText=\"margin:0;border:0;padding:0;display:none\",f=0===k[0].offsetHeight,f&&(k[0].style.display=\"\",k[1].style.display=\"none\",f=0===k[0].offsetHeight)),m.removeChild(i)}}}();var Ra,Sa,Ta=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ra=function(b){var c=b.ownerDocument.defaultView;return c.opener||(c=a),c.getComputedStyle(b)},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(\"\"!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),!l.pixelMarginRight()&&Oa.test(g)&&Na.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+\"\"}):Qa.currentStyle&&(Ra=function(a){return a.currentStyle},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Oa.test(g)&&!Ta.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left=\"fontSize\"===b?\"1em\":g,g=h.pixelLeft+\"px\",h.left=d,f&&(e.left=f)),void 0===g?g:g+\"\"||\"auto\"});function Ua(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Va=/alpha\\([^)]*\\)/i,Wa=/opacity\\s*=\\s*([^)]*)/i,Xa=/^(none|table(?!-c[ea]).+)/,Ya=new RegExp(\"^(\"+T+\")(.*)$\",\"i\"),Za={position:\"absolute\",visibility:\"hidden\",display:\"block\"},$a={letterSpacing:\"0\",fontWeight:\"400\"},_a=[\"Webkit\",\"O\",\"Moz\",\"ms\"],ab=d.createElement(\"div\").style;function bb(a){if(a in ab)return a;var b=a.charAt(0).toUpperCase()+a.slice(1),c=_a.length;while(c--)if(a=_a[c]+b,a in ab)return a}function cb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=n._data(d,\"olddisplay\"),c=d.style.display,b?(f[g]||\"none\"!==c||(d.style.display=\"\"),\"\"===d.style.display&&W(d)&&(f[g]=n._data(d,\"olddisplay\",Ma(d.nodeName)))):(e=W(d),(c&&\"none\"!==c||!e)&&n._data(d,\"olddisplay\",e?c:n.css(d,\"display\"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&\"none\"!==d.style.display&&\"\"!==d.style.display||(d.style.display=b?f[g]||\"\":\"none\"));return a}function db(a,b,c){var d=Ya.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||\"px\"):b}function eb(a,b,c,d,e){for(var f=c===(d?\"border\":\"content\")?4:\"width\"===b?1:0,g=0;4>f;f+=2)\"margin\"===c&&(g+=n.css(a,c+V[f],!0,e)),d?(\"content\"===c&&(g-=n.css(a,\"padding\"+V[f],!0,e)),\"margin\"!==c&&(g-=n.css(a,\"border\"+V[f]+\"Width\",!0,e))):(g+=n.css(a,\"padding\"+V[f],!0,e),\"padding\"!==c&&(g+=n.css(a,\"border\"+V[f]+\"Width\",!0,e)));return g}function fb(b,c,e){var f=!0,g=\"width\"===c?b.offsetWidth:b.offsetHeight,h=Ra(b),i=l.boxSizing&&\"border-box\"===n.css(b,\"boxSizing\",!1,h);if(d.msFullscreenElement&&a.top!==a&&b.getClientRects().length&&(g=Math.round(100*b.getBoundingClientRect()[c])),0>=g||null==g){if(g=Sa(b,c,h),(0>g||null==g)&&(g=b.style[c]),Oa.test(g))return g;f=i&&(l.boxSizingReliable()||g===b.style[c]),g=parseFloat(g)||0}return g+eb(b,c,e||(i?\"border\":\"content\"),f,h)+\"px\"}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Sa(a,\"opacity\");return\"\"===c?\"1\":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{\"float\":l.cssFloat?\"cssFloat\":\"styleFloat\"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;if(b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],void 0===c)return g&&\"get\"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,\"string\"===f&&(e=U.exec(c))&&e[1]&&(c=X(a,b,e),f=\"number\"),null!=c&&c===c&&(\"number\"===f&&(c+=e&&e[3]||(n.cssNumber[h]?\"\":\"px\")),l.clearCloneStyle||\"\"!==c||0!==b.indexOf(\"background\")||(i[b]=\"inherit\"),!(g&&\"set\"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],g&&\"get\"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Sa(a,b,d)),\"normal\"===f&&b in $a&&(f=$a[b]),\"\"===c||c?(e=parseFloat(f),c===!0||isFinite(e)?e||0:f):f}}),n.each([\"height\",\"width\"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?Xa.test(n.css(a,\"display\"))&&0===a.offsetWidth?Pa(a,Za,function(){return fb(a,b,d)}):fb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ra(a);return db(a,c,d?eb(a,b,d,l.boxSizing&&\"border-box\"===n.css(a,\"boxSizing\",!1,e),e):0)}}}),l.opacity||(n.cssHooks.opacity={get:function(a,b){return Wa.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||\"\")?.01*parseFloat(RegExp.$1)+\"\":b?\"1\":\"\"},set:function(a,b){var c=a.style,d=a.currentStyle,e=n.isNumeric(b)?\"alpha(opacity=\"+100*b+\")\":\"\",f=d&&d.filter||c.filter||\"\";c.zoom=1,(b>=1||\"\"===b)&&\"\"===n.trim(f.replace(Va,\"\"))&&c.removeAttribute&&(c.removeAttribute(\"filter\"),\"\"===b||d&&!d.filter)||(c.filter=Va.test(f)?f.replace(Va,e):f+\" \"+e)}}),n.cssHooks.marginRight=Ua(l.reliableMarginRight,function(a,b){return b?Pa(a,{display:\"inline-block\"},Sa,[a,\"marginRight\"]):void 0}),n.cssHooks.marginLeft=Ua(l.reliableMarginLeft,function(a,b){return b?(parseFloat(Sa(a,\"marginLeft\"))||(n.contains(a.ownerDocument,a)?a.getBoundingClientRect().left-Pa(a,{\nmarginLeft:0},function(){return a.getBoundingClientRect().left}):0))+\"px\":void 0}),n.each({margin:\"\",padding:\"\",border:\"Width\"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f=\"string\"==typeof c?c.split(\" \"):[c];4>d;d++)e[a+V[d]+b]=f[d]||f[d-2]||f[0];return e}},Na.test(a)||(n.cssHooks[a+b].set=db)}),n.fn.extend({css:function(a,b){return Y(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=Ra(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return cb(this,!0)},hide:function(){return cb(this)},toggle:function(a){return\"boolean\"==typeof a?a?this.show():this.hide():this.each(function(){W(this)?n(this).show():n(this).hide()})}});function gb(a,b,c,d,e){return new gb.prototype.init(a,b,c,d,e)}n.Tween=gb,gb.prototype={constructor:gb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||n.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(n.cssNumber[c]?\"\":\"px\")},cur:function(){var a=gb.propHooks[this.prop];return a&&a.get?a.get(this):gb.propHooks._default.get(this)},run:function(a){var b,c=gb.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):gb.propHooks._default.set(this),this}},gb.prototype.init.prototype=gb.prototype,gb.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=n.css(a.elem,a.prop,\"\"),b&&\"auto\"!==b?b:0)},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[n.cssProps[a.prop]]&&!n.cssHooks[a.prop]?a.elem[a.prop]=a.now:n.style(a.elem,a.prop,a.now+a.unit)}}},gb.propHooks.scrollTop=gb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:\"swing\"},n.fx=gb.prototype.init,n.fx.step={};var hb,ib,jb=/^(?:toggle|show|hide)$/,kb=/queueHooks$/;function lb(){return a.setTimeout(function(){hb=void 0}),hb=n.now()}function mb(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=V[e],d[\"margin\"+c]=d[\"padding\"+c]=a;return b&&(d.opacity=d.width=a),d}function nb(a,b,c){for(var d,e=(qb.tweeners[b]||[]).concat(qb.tweeners[\"*\"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ob(a,b,c){var d,e,f,g,h,i,j,k,m=this,o={},p=a.style,q=a.nodeType&&W(a),r=n._data(a,\"fxshow\");c.queue||(h=n._queueHooks(a,\"fx\"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,m.always(function(){m.always(function(){h.unqueued--,n.queue(a,\"fx\").length||h.empty.fire()})})),1===a.nodeType&&(\"height\"in b||\"width\"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=n.css(a,\"display\"),k=\"none\"===j?n._data(a,\"olddisplay\")||Ma(a.nodeName):j,\"inline\"===k&&\"none\"===n.css(a,\"float\")&&(l.inlineBlockNeedsLayout&&\"inline\"!==Ma(a.nodeName)?p.zoom=1:p.display=\"inline-block\")),c.overflow&&(p.overflow=\"hidden\",l.shrinkWrapBlocks()||m.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],jb.exec(e)){if(delete b[d],f=f||\"toggle\"===e,e===(q?\"hide\":\"show\")){if(\"show\"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(o))\"inline\"===(\"none\"===j?Ma(a.nodeName):j)&&(p.display=j);else{r?\"hidden\"in r&&(q=r.hidden):r=n._data(a,\"fxshow\",{}),f&&(r.hidden=!q),q?n(a).show():m.done(function(){n(a).hide()}),m.done(function(){var b;n._removeData(a,\"fxshow\");for(b in o)n.style(a,b,o[b])});for(d in o)g=nb(q?r[d]:0,d,m),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start=\"width\"===d||\"height\"===d?1:0))}}function pb(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCase(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&\"expand\"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function qb(a,b,c){var d,e,f=0,g=qb.prefilters.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=hb||lb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{},easing:n.easing._default},c),originalProperties:b,originalOptions:c,startTime:hb||lb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.rejectWith(a,[j,b]),this}}),k=j.props;for(pb(k,j.opts.specialEasing);g>f;f++)if(d=qb.prefilters[f].call(j,a,k,j.opts))return n.isFunction(d.stop)&&(n._queueHooks(j.elem,j.opts.queue).stop=n.proxy(d.stop,d)),d;return n.map(k,nb,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(qb,{tweeners:{\"*\":[function(a,b){var c=this.createTween(a,b);return X(c.elem,a,U.exec(b),c),c}]},tweener:function(a,b){n.isFunction(a)?(b=a,a=[\"*\"]):a=a.match(G);for(var c,d=0,e=a.length;e>d;d++)c=a[d],qb.tweeners[c]=qb.tweeners[c]||[],qb.tweeners[c].unshift(b)},prefilters:[ob],prefilter:function(a,b){b?qb.prefilters.unshift(a):qb.prefilters.push(a)}}),n.speed=function(a,b,c){var d=a&&\"object\"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:\"number\"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue=\"fx\"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(W).css(\"opacity\",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=qb(this,n.extend({},a),f);(e||n._data(this,\"finish\"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return\"string\"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||\"fx\",[]),this.each(function(){var b=!0,e=null!=a&&a+\"queueHooks\",f=n.timers,g=n._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&kb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||\"fx\"),this.each(function(){var b,c=n._data(this),d=c[a+\"queue\"],e=c[a+\"queueHooks\"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each([\"toggle\",\"show\",\"hide\"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||\"boolean\"==typeof a?c.apply(this,arguments):this.animate(mb(b,!0),a,d,e)}}),n.each({slideDown:mb(\"show\"),slideUp:mb(\"hide\"),slideToggle:mb(\"toggle\"),fadeIn:{opacity:\"show\"},fadeOut:{opacity:\"hide\"},fadeToggle:{opacity:\"toggle\"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=n.timers,c=0;for(hb=n.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||n.fx.stop(),hb=void 0},n.fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){ib||(ib=a.setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){a.clearInterval(ib),ib=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(b,c){return b=n.fx?n.fx.speeds[b]||b:b,c=c||\"fx\",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a,b=d.createElement(\"input\"),c=d.createElement(\"div\"),e=d.createElement(\"select\"),f=e.appendChild(d.createElement(\"option\"));c=d.createElement(\"div\"),c.setAttribute(\"className\",\"t\"),c.innerHTML=\"  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>\",a=c.getElementsByTagName(\"a\")[0],b.setAttribute(\"type\",\"checkbox\"),c.appendChild(b),a=c.getElementsByTagName(\"a\")[0],a.style.cssText=\"top:1px\",l.getSetAttribute=\"t\"!==c.className,l.style=/top/.test(a.getAttribute(\"style\")),l.hrefNormalized=\"/a\"===a.getAttribute(\"href\"),l.checkOn=!!b.value,l.optSelected=f.selected,l.enctype=!!d.createElement(\"form\").enctype,e.disabled=!0,l.optDisabled=!f.disabled,b=d.createElement(\"input\"),b.setAttribute(\"value\",\"\"),l.input=\"\"===b.getAttribute(\"value\"),b.value=\"t\",b.setAttribute(\"type\",\"radio\"),l.radioValue=\"t\"===b.value}();var rb=/\\r/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e=\"\":\"number\"==typeof e?e+=\"\":n.isArray(e)&&(e=n.map(e,function(a){return null==a?\"\":a+\"\"})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&\"set\"in b&&void 0!==b.set(this,e,\"value\")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&\"get\"in b&&void 0!==(c=b.get(e,\"value\"))?c:(c=e.value,\"string\"==typeof c?c.replace(rb,\"\"):null==c?\"\":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,\"value\");return null!=b?b:n.trim(n.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f=\"select-one\"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(l.optDisabled?!c.disabled:null===c.getAttribute(\"disabled\"))&&(!c.parentNode.disabled||!n.nodeName(c.parentNode,\"optgroup\"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)if(d=e[g],n.inArray(n.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),n.each([\"radio\",\"checkbox\"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>-1:void 0}},l.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute(\"value\")?\"on\":a.value})});var sb,tb,ub=n.expr.attrHandle,vb=/^(?:checked|selected)$/i,wb=l.getSetAttribute,xb=l.input;n.fn.extend({attr:function(a,b){return Y(this,n.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return\"undefined\"==typeof a.getAttribute?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),e=n.attrHooks[b]||(n.expr.match.bool.test(b)?tb:sb)),void 0!==c?null===c?void n.removeAttr(a,b):e&&\"set\"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+\"\"),c):e&&\"get\"in e&&null!==(d=e.get(a,b))?d:(d=n.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&\"radio\"===b&&n.nodeName(a,\"input\")){var c=a.value;return a.setAttribute(\"type\",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(G);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)?xb&&wb||!vb.test(c)?a[d]=!1:a[n.camelCase(\"default-\"+c)]=a[d]=!1:n.attr(a,c,\"\"),a.removeAttribute(wb?c:d)}}),tb={set:function(a,b,c){return b===!1?n.removeAttr(a,c):xb&&wb||!vb.test(c)?a.setAttribute(!wb&&n.propFix[c]||c,c):a[n.camelCase(\"default-\"+c)]=a[c]=!0,c}},n.each(n.expr.match.bool.source.match(/\\w+/g),function(a,b){var c=ub[b]||n.find.attr;xb&&wb||!vb.test(b)?ub[b]=function(a,b,d){var e,f;return d||(f=ub[b],ub[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,ub[b]=f),e}:ub[b]=function(a,b,c){return c?void 0:a[n.camelCase(\"default-\"+b)]?b.toLowerCase():null}}),xb&&wb||(n.attrHooks.value={set:function(a,b,c){return n.nodeName(a,\"input\")?void(a.defaultValue=b):sb&&sb.set(a,b,c)}}),wb||(sb={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+=\"\",\"value\"===c||b===a.getAttribute(c)?b:void 0}},ub.id=ub.name=ub.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&\"\"!==d.value?d.value:null},n.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:sb.set},n.attrHooks.contenteditable={set:function(a,b,c){sb.set(a,\"\"===b?!1:b,c)}},n.each([\"width\",\"height\"],function(a,b){n.attrHooks[b]={set:function(a,c){return\"\"===c?(a.setAttribute(b,\"auto\"),c):void 0}}})),l.style||(n.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+\"\"}});var yb=/^(?:input|select|textarea|button|object)$/i,zb=/^(?:a|area)$/i;n.fn.extend({prop:function(a,b){return Y(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return a=n.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),n.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&n.isXMLDoc(a)||(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&\"set\"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&\"get\"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=n.find.attr(a,\"tabindex\");return b?parseInt(b,10):yb.test(a.nodeName)||zb.test(a.nodeName)&&a.href?0:-1}}},propFix:{\"for\":\"htmlFor\",\"class\":\"className\"}}),l.hrefNormalized||n.each([\"href\",\"src\"],function(a,b){n.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),l.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),n.each([\"tabIndex\",\"readOnly\",\"maxLength\",\"cellSpacing\",\"cellPadding\",\"rowSpan\",\"colSpan\",\"useMap\",\"frameBorder\",\"contentEditable\"],function(){n.propFix[this.toLowerCase()]=this}),l.enctype||(n.propFix.enctype=\"encoding\");var Ab=/[\\t\\r\\n\\f]/g;function Bb(a){return n.attr(a,\"class\")||\"\"}n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,Bb(this)))});if(\"string\"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Bb(c),d=1===c.nodeType&&(\" \"+e+\" \").replace(Ab,\" \")){g=0;while(f=b[g++])d.indexOf(\" \"+f+\" \")<0&&(d+=f+\" \");h=n.trim(d),e!==h&&n.attr(c,\"class\",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,Bb(this)))});if(!arguments.length)return this.attr(\"class\",\"\");if(\"string\"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Bb(c),d=1===c.nodeType&&(\" \"+e+\" \").replace(Ab,\" \")){g=0;while(f=b[g++])while(d.indexOf(\" \"+f+\" \")>-1)d=d.replace(\" \"+f+\" \",\" \");h=n.trim(d),e!==h&&n.attr(c,\"class\",h)}}return this},toggleClass:function(a,b){var c=typeof a;return\"boolean\"==typeof b&&\"string\"===c?b?this.addClass(a):this.removeClass(a):n.isFunction(a)?this.each(function(c){n(this).toggleClass(a.call(this,c,Bb(this),b),b)}):this.each(function(){var b,d,e,f;if(\"string\"===c){d=0,e=n(this),f=a.match(G)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(void 0===a||\"boolean\"===c)&&(b=Bb(this),b&&n._data(this,\"__className__\",b),n.attr(this,\"class\",b||a===!1?\"\":n._data(this,\"__className__\")||\"\"))})},hasClass:function(a){var b,c,d=0;b=\" \"+a+\" \";while(c=this[d++])if(1===c.nodeType&&(\" \"+Bb(c)+\" \").replace(Ab,\" \").indexOf(b)>-1)return!0;return!1}}),n.each(\"blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu\".split(\" \"),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Cb=a.location,Db=n.now(),Eb=/\\?/,Fb=/(,)|(\\[|{)|(}|])|\"(?:[^\"\\\\\\r\\n]|\\\\[\"\\\\\\/bfnrt]|\\\\u[\\da-fA-F]{4})*\"\\s*:?|true|false|null|-?(?!0\\d)\\d+(?:\\.\\d+|)(?:[eE][+-]?\\d+|)/g;n.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+\"\");var c,d=null,e=n.trim(b+\"\");return e&&!n.trim(e.replace(Fb,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,\"\")}))?Function(\"return \"+e)():n.error(\"Invalid JSON: \"+b)},n.parseXML=function(b){var c,d;if(!b||\"string\"!=typeof b)return null;try{a.DOMParser?(d=new a.DOMParser,c=d.parseFromString(b,\"text/xml\")):(c=new a.ActiveXObject(\"Microsoft.XMLDOM\"),c.async=\"false\",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName(\"parsererror\").length||n.error(\"Invalid XML: \"+b),c};var Gb=/#.*$/,Hb=/([?&])_=[^&]*/,Ib=/^(.*?):[ \\t]*([^\\r\\n]*)\\r?$/gm,Jb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Kb=/^(?:GET|HEAD)$/,Lb=/^\\/\\//,Mb=/^([\\w.+-]+:)(?:\\/\\/(?:[^\\/?#]*@|)([^\\/?#:]*)(?::(\\d+)|)|)/,Nb={},Ob={},Pb=\"*/\".concat(\"*\"),Qb=Cb.href,Rb=Mb.exec(Qb.toLowerCase())||[];function Sb(a){return function(b,c){\"string\"!=typeof b&&(c=b,b=\"*\");var d,e=0,f=b.toLowerCase().match(G)||[];if(n.isFunction(c))while(d=f[e++])\"+\"===d.charAt(0)?(d=d.slice(1)||\"*\",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Tb(a,b,c,d){var e={},f=a===Ob;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return\"string\"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e[\"*\"]&&g(\"*\")}function Ub(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&n.extend(!0,a,c),a}function Vb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while(\"*\"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader(\"Content-Type\"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+\" \"+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Wb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if(\"*\"===f)f=i;else if(\"*\"!==i&&i!==f){if(g=j[i+\" \"+f]||j[\"* \"+f],!g)for(e in j)if(h=e.split(\" \"),h[1]===f&&(g=j[i+\" \"+h[0]]||j[\"* \"+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a[\"throws\"])b=g(b);else try{b=g(b)}catch(l){return{state:\"parsererror\",error:g?l:\"No conversion from \"+i+\" to \"+f}}}return{state:\"success\",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Qb,type:\"GET\",isLocal:Jb.test(Rb[1]),global:!0,processData:!0,async:!0,contentType:\"application/x-www-form-urlencoded; charset=UTF-8\",accepts:{\"*\":Pb,text:\"text/plain\",html:\"text/html\",xml:\"application/xml, text/xml\",json:\"application/json, text/javascript\"},contents:{xml:/\\bxml\\b/,html:/\\bhtml/,json:/\\bjson\\b/},responseFields:{xml:\"responseXML\",text:\"responseText\",json:\"responseJSON\"},converters:{\"* text\":String,\"text html\":!0,\"text json\":n.parseJSON,\"text xml\":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Ub(Ub(a,n.ajaxSettings),b):Ub(n.ajaxSettings,a)},ajaxPrefilter:Sb(Nb),ajaxTransport:Sb(Ob),ajax:function(b,c){\"object\"==typeof b&&(c=b,b=void 0),c=c||{};var d,e,f,g,h,i,j,k,l=n.ajaxSetup({},c),m=l.context||l,o=l.context&&(m.nodeType||m.jquery)?n(m):n.event,p=n.Deferred(),q=n.Callbacks(\"once memory\"),r=l.statusCode||{},s={},t={},u=0,v=\"canceled\",w={readyState:0,getResponseHeader:function(a){var b;if(2===u){if(!k){k={};while(b=Ib.exec(g))k[b[1].toLowerCase()]=b[2]}b=k[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===u?g:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return u||(a=t[c]=t[c]||a,s[a]=b),this},overrideMimeType:function(a){return u||(l.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>u)for(b in a)r[b]=[r[b],a[b]];else w.always(a[w.status]);return this},abort:function(a){var b=a||v;return j&&j.abort(b),y(0,b),this}};if(p.promise(w).complete=q.add,w.success=w.done,w.error=w.fail,l.url=((b||l.url||Qb)+\"\").replace(Gb,\"\").replace(Lb,Rb[1]+\"//\"),l.type=c.method||c.type||l.method||l.type,l.dataTypes=n.trim(l.dataType||\"*\").toLowerCase().match(G)||[\"\"],null==l.crossDomain&&(d=Mb.exec(l.url.toLowerCase()),l.crossDomain=!(!d||d[1]===Rb[1]&&d[2]===Rb[2]&&(d[3]||(\"http:\"===d[1]?\"80\":\"443\"))===(Rb[3]||(\"http:\"===Rb[1]?\"80\":\"443\")))),l.data&&l.processData&&\"string\"!=typeof l.data&&(l.data=n.param(l.data,l.traditional)),Tb(Nb,l,c,w),2===u)return w;i=n.event&&l.global,i&&0===n.active++&&n.event.trigger(\"ajaxStart\"),l.type=l.type.toUpperCase(),l.hasContent=!Kb.test(l.type),f=l.url,l.hasContent||(l.data&&(f=l.url+=(Eb.test(f)?\"&\":\"?\")+l.data,delete l.data),l.cache===!1&&(l.url=Hb.test(f)?f.replace(Hb,\"$1_=\"+Db++):f+(Eb.test(f)?\"&\":\"?\")+\"_=\"+Db++)),l.ifModified&&(n.lastModified[f]&&w.setRequestHeader(\"If-Modified-Since\",n.lastModified[f]),n.etag[f]&&w.setRequestHeader(\"If-None-Match\",n.etag[f])),(l.data&&l.hasContent&&l.contentType!==!1||c.contentType)&&w.setRequestHeader(\"Content-Type\",l.contentType),w.setRequestHeader(\"Accept\",l.dataTypes[0]&&l.accepts[l.dataTypes[0]]?l.accepts[l.dataTypes[0]]+(\"*\"!==l.dataTypes[0]?\", \"+Pb+\"; q=0.01\":\"\"):l.accepts[\"*\"]);for(e in l.headers)w.setRequestHeader(e,l.headers[e]);if(l.beforeSend&&(l.beforeSend.call(m,w,l)===!1||2===u))return w.abort();v=\"abort\";for(e in{success:1,error:1,complete:1})w[e](l[e]);if(j=Tb(Ob,l,c,w)){if(w.readyState=1,i&&o.trigger(\"ajaxSend\",[w,l]),2===u)return w;l.async&&l.timeout>0&&(h=a.setTimeout(function(){w.abort(\"timeout\")},l.timeout));try{u=1,j.send(s,y)}catch(x){if(!(2>u))throw x;y(-1,x)}}else y(-1,\"No Transport\");function y(b,c,d,e){var k,s,t,v,x,y=c;2!==u&&(u=2,h&&a.clearTimeout(h),j=void 0,g=e||\"\",w.readyState=b>0?4:0,k=b>=200&&300>b||304===b,d&&(v=Vb(l,w,d)),v=Wb(l,v,w,k),k?(l.ifModified&&(x=w.getResponseHeader(\"Last-Modified\"),x&&(n.lastModified[f]=x),x=w.getResponseHeader(\"etag\"),x&&(n.etag[f]=x)),204===b||\"HEAD\"===l.type?y=\"nocontent\":304===b?y=\"notmodified\":(y=v.state,s=v.data,t=v.error,k=!t)):(t=y,(b||!y)&&(y=\"error\",0>b&&(b=0))),w.status=b,w.statusText=(c||y)+\"\",k?p.resolveWith(m,[s,y,w]):p.rejectWith(m,[w,y,t]),w.statusCode(r),r=void 0,i&&o.trigger(k?\"ajaxSuccess\":\"ajaxError\",[w,l,k?s:t]),q.fireWith(m,[w,y]),i&&(o.trigger(\"ajaxComplete\",[w,l]),--n.active||n.event.trigger(\"ajaxStop\")))}return w},getJSON:function(a,b,c){return n.get(a,b,c,\"json\")},getScript:function(a,b){return n.get(a,void 0,b,\"script\")}}),n.each([\"get\",\"post\"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax(n.extend({url:a,type:b,dataType:e,data:c,success:d},n.isPlainObject(a)&&a))}}),n._evalUrl=function(a){return n.ajax({url:a,type:\"GET\",dataType:\"script\",cache:!0,async:!1,global:!1,\"throws\":!0})},n.fn.extend({wrapAll:function(a){if(n.isFunction(a))return this.each(function(b){n(this).wrapAll(a.call(this,b))});if(this[0]){var b=n(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return n.isFunction(a)?this.each(function(b){n(this).wrapInner(a.call(this,b))}):this.each(function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,\"body\")||n(this).replaceWith(this.childNodes)}).end()}});function Xb(a){return a.style&&a.style.display||n.css(a,\"display\")}function Yb(a){while(a&&1===a.nodeType){if(\"none\"===Xb(a)||\"hidden\"===a.type)return!0;a=a.parentNode}return!1}n.expr.filters.hidden=function(a){return l.reliableHiddenOffsets()?a.offsetWidth<=0&&a.offsetHeight<=0&&!a.getClientRects().length:Yb(a)},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var Zb=/%20/g,$b=/\\[\\]$/,_b=/\\r?\\n/g,ac=/^(?:submit|button|image|reset|file)$/i,bc=/^(?:input|select|textarea|keygen)/i;function cc(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||$b.test(a)?d(a,e):cc(a+\"[\"+(\"object\"==typeof e&&null!=e?b:\"\")+\"]\",e,c,d)});else if(c||\"object\"!==n.type(b))d(a,b);else for(e in b)cc(a+\"[\"+e+\"]\",b[e],c,d)}n.param=function(a,b){var c,d=[],e=function(a,b){b=n.isFunction(b)?b():null==b?\"\":b,d[d.length]=encodeURIComponent(a)+\"=\"+encodeURIComponent(b)};if(void 0===b&&(b=n.ajaxSettings&&n.ajaxSettings.traditional),n.isArray(a)||a.jquery&&!n.isPlainObject(a))n.each(a,function(){e(this.name,this.value)});else for(c in a)cc(c,a[c],b,e);return d.join(\"&\").replace(Zb,\"+\")},n.fn.extend({serialize:function(){return n.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=n.prop(this,\"elements\");return a?n.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!n(this).is(\":disabled\")&&bc.test(this.nodeName)&&!ac.test(a)&&(this.checked||!Z.test(a))}).map(function(a,b){var c=n(this).val();return null==c?null:n.isArray(c)?n.map(c,function(a){return{name:b.name,value:a.replace(_b,\"\\r\\n\")}}):{name:b.name,value:c.replace(_b,\"\\r\\n\")}}).get()}}),n.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return this.isLocal?hc():d.documentMode>8?gc():/^(get|post|head|put|delete|options)$/i.test(this.type)&&gc()||hc()}:gc;var dc=0,ec={},fc=n.ajaxSettings.xhr();a.attachEvent&&a.attachEvent(\"onunload\",function(){for(var a in ec)ec[a](void 0,!0)}),l.cors=!!fc&&\"withCredentials\"in fc,fc=l.ajax=!!fc,fc&&n.ajaxTransport(function(b){if(!b.crossDomain||l.cors){var c;return{send:function(d,e){var f,g=b.xhr(),h=++dc;if(g.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(f in b.xhrFields)g[f]=b.xhrFields[f];b.mimeType&&g.overrideMimeType&&g.overrideMimeType(b.mimeType),b.crossDomain||d[\"X-Requested-With\"]||(d[\"X-Requested-With\"]=\"XMLHttpRequest\");for(f in d)void 0!==d[f]&&g.setRequestHeader(f,d[f]+\"\");g.send(b.hasContent&&b.data||null),c=function(a,d){var f,i,j;if(c&&(d||4===g.readyState))if(delete ec[h],c=void 0,g.onreadystatechange=n.noop,d)4!==g.readyState&&g.abort();else{j={},f=g.status,\"string\"==typeof g.responseText&&(j.text=g.responseText);try{i=g.statusText}catch(k){i=\"\"}f||!b.isLocal||b.crossDomain?1223===f&&(f=204):f=j.text?200:404}j&&e(f,i,j,g.getAllResponseHeaders())},b.async?4===g.readyState?a.setTimeout(c):g.onreadystatechange=ec[h]=c:c()},abort:function(){c&&c(void 0,!0)}}}});function gc(){try{return new a.XMLHttpRequest}catch(b){}}function hc(){try{return new a.ActiveXObject(\"Microsoft.XMLHTTP\")}catch(b){}}n.ajaxPrefilter(function(a){a.crossDomain&&(a.contents.script=!1)}),n.ajaxSetup({accepts:{script:\"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript\"},contents:{script:/\\b(?:java|ecma)script\\b/},converters:{\"text script\":function(a){return n.globalEval(a),a}}}),n.ajaxPrefilter(\"script\",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type=\"GET\",a.global=!1)}),n.ajaxTransport(\"script\",function(a){if(a.crossDomain){var b,c=d.head||n(\"head\")[0]||d.documentElement;return{send:function(e,f){b=d.createElement(\"script\"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||f(200,\"success\"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var ic=[],jc=/(=)\\?(?=&|$)|\\?\\?/;n.ajaxSetup({jsonp:\"callback\",jsonpCallback:function(){var a=ic.pop()||n.expando+\"_\"+Db++;return this[a]=!0,a}}),n.ajaxPrefilter(\"json jsonp\",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(jc.test(b.url)?\"url\":\"string\"==typeof b.data&&0===(b.contentType||\"\").indexOf(\"application/x-www-form-urlencoded\")&&jc.test(b.data)&&\"data\");return h||\"jsonp\"===b.dataTypes[0]?(e=b.jsonpCallback=n.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(jc,\"$1\"+e):b.jsonp!==!1&&(b.url+=(Eb.test(b.url)?\"&\":\"?\")+b.jsonp+\"=\"+e),b.converters[\"script json\"]=function(){return g||n.error(e+\" was not called\"),g[0]},b.dataTypes[0]=\"json\",f=a[e],a[e]=function(){g=arguments},d.always(function(){void 0===f?n(a).removeProp(e):a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,ic.push(e)),g&&n.isFunction(f)&&f(g[0]),g=f=void 0}),\"script\"):void 0}),l.createHTMLDocument=function(){if(!d.implementation.createHTMLDocument)return!1;var a=d.implementation.createHTMLDocument(\"\");return a.body.innerHTML=\"<form></form><form></form>\",2===a.body.childNodes.length}(),n.parseHTML=function(a,b,c){if(!a||\"string\"!=typeof a)return null;\"boolean\"==typeof b&&(c=b,b=!1),b=b||(l.createHTMLDocument?d.implementation.createHTMLDocument(\"\"):d);var e=x.exec(a),f=!c&&[];return e?[b.createElement(e[1])]:(e=ja([a],b,f),f&&f.length&&n(f).remove(),n.merge([],e.childNodes))};var kc=n.fn.load;n.fn.load=function(a,b,c){if(\"string\"!=typeof a&&kc)return kc.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(\" \");return h>-1&&(d=n.trim(a.slice(h,a.length)),a=a.slice(0,h)),n.isFunction(b)?(c=b,b=void 0):b&&\"object\"==typeof b&&(e=\"POST\"),g.length>0&&n.ajax({url:a,type:e||\"GET\",dataType:\"html\",data:b}).done(function(a){f=arguments,g.html(d?n(\"<div>\").append(n.parseHTML(a)).find(d):a)}).always(c&&function(a,b){g.each(function(){c.apply(g,f||[a.responseText,b,a])})}),this},n.each([\"ajaxStart\",\"ajaxStop\",\"ajaxComplete\",\"ajaxError\",\"ajaxSuccess\",\"ajaxSend\"],function(a,b){n.fn[b]=function(a){return this.on(b,a)}}),n.expr.filters.animated=function(a){return n.grep(n.timers,function(b){return a===b.elem}).length};function lc(a){return n.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}n.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=n.css(a,\"position\"),l=n(a),m={};\"static\"===k&&(a.style.position=\"relative\"),h=l.offset(),f=n.css(a,\"top\"),i=n.css(a,\"left\"),j=(\"absolute\"===k||\"fixed\"===k)&&n.inArray(\"auto\",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),n.isFunction(b)&&(b=b.call(a,c,n.extend({},h))),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),\"using\"in b?b.using.call(a,m):l.css(m)}},n.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){n.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,n.contains(b,e)?(\"undefined\"!=typeof e.getBoundingClientRect&&(d=e.getBoundingClientRect()),c=lc(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return\"fixed\"===n.css(d,\"position\")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),n.nodeName(a[0],\"html\")||(c=a.offset()),c.top+=n.css(a[0],\"borderTopWidth\",!0)-a.scrollTop(),c.left+=n.css(a[0],\"borderLeftWidth\",!0)-a.scrollLeft()),{top:b.top-c.top-n.css(d,\"marginTop\",!0),left:b.left-c.left-n.css(d,\"marginLeft\",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent;while(a&&!n.nodeName(a,\"html\")&&\"static\"===n.css(a,\"position\"))a=a.offsetParent;return a||Qa})}}),n.each({scrollLeft:\"pageXOffset\",scrollTop:\"pageYOffset\"},function(a,b){var c=/Y/.test(b);n.fn[a]=function(d){return Y(this,function(a,d,e){var f=lc(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?n(f).scrollLeft():e,c?e:n(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),n.each([\"top\",\"left\"],function(a,b){\nn.cssHooks[b]=Ua(l.pixelPosition,function(a,c){return c?(c=Sa(a,b),Oa.test(c)?n(a).position()[b]+\"px\":c):void 0})}),n.each({Height:\"height\",Width:\"width\"},function(a,b){n.each({padding:\"inner\"+a,content:b,\"\":\"outer\"+a},function(c,d){n.fn[d]=function(d,e){var f=arguments.length&&(c||\"boolean\"!=typeof d),g=c||(d===!0||e===!0?\"margin\":\"border\");return Y(this,function(b,c,d){var e;return n.isWindow(b)?b.document.documentElement[\"client\"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body[\"scroll\"+a],e[\"scroll\"+a],b.body[\"offset\"+a],e[\"offset\"+a],e[\"client\"+a])):void 0===d?n.css(b,c,g):n.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),n.fn.extend({bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,\"**\"):this.off(b,a||\"**\",c)}}),n.fn.size=function(){return this.length},n.fn.andSelf=n.fn.addBack,\"function\"==typeof define&&define.amd&&define(\"jquery\",[],function(){return n});var mc=a.jQuery,nc=a.$;return n.noConflict=function(b){return a.$===n&&(a.$=nc),b&&a.jQuery===n&&(a.jQuery=mc),n},b||(a.jQuery=a.$=n),n});"
  },
  {
    "path": "web_gui/gui_v3/js/moment.js",
    "content": "//! moment.js\n//! version : 2.17.1\n//! authors : Tim Wood, Iskren Chernev, Moment.js contributors\n//! license : MIT\n//! momentjs.com\n\n;(function (global, factory) {\n    typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :\n    typeof define === 'function' && define.amd ? define(factory) :\n    global.moment = factory()\n}(this, (function () { 'use strict';\n\nvar hookCallback;\n\nfunction hooks () {\n    return hookCallback.apply(null, arguments);\n}\n\n// This is done to register the method called with moment()\n// without creating circular dependencies.\nfunction setHookCallback (callback) {\n    hookCallback = callback;\n}\n\nfunction isArray(input) {\n    return input instanceof Array || Object.prototype.toString.call(input) === '[object Array]';\n}\n\nfunction isObject(input) {\n    // IE8 will treat undefined and null as object if it wasn't for\n    // input != null\n    return input != null && Object.prototype.toString.call(input) === '[object Object]';\n}\n\nfunction isObjectEmpty(obj) {\n    var k;\n    for (k in obj) {\n        // even if its not own property I'd still call it non-empty\n        return false;\n    }\n    return true;\n}\n\nfunction isNumber(input) {\n    return typeof input === 'number' || Object.prototype.toString.call(input) === '[object Number]';\n}\n\nfunction isDate(input) {\n    return input instanceof Date || Object.prototype.toString.call(input) === '[object Date]';\n}\n\nfunction map(arr, fn) {\n    var res = [], i;\n    for (i = 0; i < arr.length; ++i) {\n        res.push(fn(arr[i], i));\n    }\n    return res;\n}\n\nfunction hasOwnProp(a, b) {\n    return Object.prototype.hasOwnProperty.call(a, b);\n}\n\nfunction extend(a, b) {\n    for (var i in b) {\n        if (hasOwnProp(b, i)) {\n            a[i] = b[i];\n        }\n    }\n\n    if (hasOwnProp(b, 'toString')) {\n        a.toString = b.toString;\n    }\n\n    if (hasOwnProp(b, 'valueOf')) {\n        a.valueOf = b.valueOf;\n    }\n\n    return a;\n}\n\nfunction createUTC (input, format, locale, strict) {\n    return createLocalOrUTC(input, format, locale, strict, true).utc();\n}\n\nfunction defaultParsingFlags() {\n    // We need to deep clone this object.\n    return {\n        empty           : false,\n        unusedTokens    : [],\n        unusedInput     : [],\n        overflow        : -2,\n        charsLeftOver   : 0,\n        nullInput       : false,\n        invalidMonth    : null,\n        invalidFormat   : false,\n        userInvalidated : false,\n        iso             : false,\n        parsedDateParts : [],\n        meridiem        : null\n    };\n}\n\nfunction getParsingFlags(m) {\n    if (m._pf == null) {\n        m._pf = defaultParsingFlags();\n    }\n    return m._pf;\n}\n\nvar some;\nif (Array.prototype.some) {\n    some = Array.prototype.some;\n} else {\n    some = function (fun) {\n        var t = Object(this);\n        var len = t.length >>> 0;\n\n        for (var i = 0; i < len; i++) {\n            if (i in t && fun.call(this, t[i], i, t)) {\n                return true;\n            }\n        }\n\n        return false;\n    };\n}\n\nvar some$1 = some;\n\nfunction isValid(m) {\n    if (m._isValid == null) {\n        var flags = getParsingFlags(m);\n        var parsedParts = some$1.call(flags.parsedDateParts, function (i) {\n            return i != null;\n        });\n        var isNowValid = !isNaN(m._d.getTime()) &&\n            flags.overflow < 0 &&\n            !flags.empty &&\n            !flags.invalidMonth &&\n            !flags.invalidWeekday &&\n            !flags.nullInput &&\n            !flags.invalidFormat &&\n            !flags.userInvalidated &&\n            (!flags.meridiem || (flags.meridiem && parsedParts));\n\n        if (m._strict) {\n            isNowValid = isNowValid &&\n                flags.charsLeftOver === 0 &&\n                flags.unusedTokens.length === 0 &&\n                flags.bigHour === undefined;\n        }\n\n        if (Object.isFrozen == null || !Object.isFrozen(m)) {\n            m._isValid = isNowValid;\n        }\n        else {\n            return isNowValid;\n        }\n    }\n    return m._isValid;\n}\n\nfunction createInvalid (flags) {\n    var m = createUTC(NaN);\n    if (flags != null) {\n        extend(getParsingFlags(m), flags);\n    }\n    else {\n        getParsingFlags(m).userInvalidated = true;\n    }\n\n    return m;\n}\n\nfunction isUndefined(input) {\n    return input === void 0;\n}\n\n// Plugins that add properties should also add the key here (null value),\n// so we can properly clone ourselves.\nvar momentProperties = hooks.momentProperties = [];\n\nfunction copyConfig(to, from) {\n    var i, prop, val;\n\n    if (!isUndefined(from._isAMomentObject)) {\n        to._isAMomentObject = from._isAMomentObject;\n    }\n    if (!isUndefined(from._i)) {\n        to._i = from._i;\n    }\n    if (!isUndefined(from._f)) {\n        to._f = from._f;\n    }\n    if (!isUndefined(from._l)) {\n        to._l = from._l;\n    }\n    if (!isUndefined(from._strict)) {\n        to._strict = from._strict;\n    }\n    if (!isUndefined(from._tzm)) {\n        to._tzm = from._tzm;\n    }\n    if (!isUndefined(from._isUTC)) {\n        to._isUTC = from._isUTC;\n    }\n    if (!isUndefined(from._offset)) {\n        to._offset = from._offset;\n    }\n    if (!isUndefined(from._pf)) {\n        to._pf = getParsingFlags(from);\n    }\n    if (!isUndefined(from._locale)) {\n        to._locale = from._locale;\n    }\n\n    if (momentProperties.length > 0) {\n        for (i in momentProperties) {\n            prop = momentProperties[i];\n            val = from[prop];\n            if (!isUndefined(val)) {\n                to[prop] = val;\n            }\n        }\n    }\n\n    return to;\n}\n\nvar updateInProgress = false;\n\n// Moment prototype object\nfunction Moment(config) {\n    copyConfig(this, config);\n    this._d = new Date(config._d != null ? config._d.getTime() : NaN);\n    if (!this.isValid()) {\n        this._d = new Date(NaN);\n    }\n    // Prevent infinite loop in case updateOffset creates new moment\n    // objects.\n    if (updateInProgress === false) {\n        updateInProgress = true;\n        hooks.updateOffset(this);\n        updateInProgress = false;\n    }\n}\n\nfunction isMoment (obj) {\n    return obj instanceof Moment || (obj != null && obj._isAMomentObject != null);\n}\n\nfunction absFloor (number) {\n    if (number < 0) {\n        // -0 -> 0\n        return Math.ceil(number) || 0;\n    } else {\n        return Math.floor(number);\n    }\n}\n\nfunction toInt(argumentForCoercion) {\n    var coercedNumber = +argumentForCoercion,\n        value = 0;\n\n    if (coercedNumber !== 0 && isFinite(coercedNumber)) {\n        value = absFloor(coercedNumber);\n    }\n\n    return value;\n}\n\n// compare two arrays, return the number of differences\nfunction compareArrays(array1, array2, dontConvert) {\n    var len = Math.min(array1.length, array2.length),\n        lengthDiff = Math.abs(array1.length - array2.length),\n        diffs = 0,\n        i;\n    for (i = 0; i < len; i++) {\n        if ((dontConvert && array1[i] !== array2[i]) ||\n            (!dontConvert && toInt(array1[i]) !== toInt(array2[i]))) {\n            diffs++;\n        }\n    }\n    return diffs + lengthDiff;\n}\n\nfunction warn(msg) {\n    if (hooks.suppressDeprecationWarnings === false &&\n            (typeof console !==  'undefined') && console.warn) {\n        console.warn('Deprecation warning: ' + msg);\n    }\n}\n\nfunction deprecate(msg, fn) {\n    var firstTime = true;\n\n    return extend(function () {\n        if (hooks.deprecationHandler != null) {\n            hooks.deprecationHandler(null, msg);\n        }\n        if (firstTime) {\n            var args = [];\n            var arg;\n            for (var i = 0; i < arguments.length; i++) {\n                arg = '';\n                if (typeof arguments[i] === 'object') {\n                    arg += '\\n[' + i + '] ';\n                    for (var key in arguments[0]) {\n                        arg += key + ': ' + arguments[0][key] + ', ';\n                    }\n                    arg = arg.slice(0, -2); // Remove trailing comma and space\n                } else {\n                    arg = arguments[i];\n                }\n                args.push(arg);\n            }\n            warn(msg + '\\nArguments: ' + Array.prototype.slice.call(args).join('') + '\\n' + (new Error()).stack);\n            firstTime = false;\n        }\n        return fn.apply(this, arguments);\n    }, fn);\n}\n\nvar deprecations = {};\n\nfunction deprecateSimple(name, msg) {\n    if (hooks.deprecationHandler != null) {\n        hooks.deprecationHandler(name, msg);\n    }\n    if (!deprecations[name]) {\n        warn(msg);\n        deprecations[name] = true;\n    }\n}\n\nhooks.suppressDeprecationWarnings = false;\nhooks.deprecationHandler = null;\n\nfunction isFunction(input) {\n    return input instanceof Function || Object.prototype.toString.call(input) === '[object Function]';\n}\n\nfunction set (config) {\n    var prop, i;\n    for (i in config) {\n        prop = config[i];\n        if (isFunction(prop)) {\n            this[i] = prop;\n        } else {\n            this['_' + i] = prop;\n        }\n    }\n    this._config = config;\n    // Lenient ordinal parsing accepts just a number in addition to\n    // number + (possibly) stuff coming from _ordinalParseLenient.\n    this._ordinalParseLenient = new RegExp(this._ordinalParse.source + '|' + (/\\d{1,2}/).source);\n}\n\nfunction mergeConfigs(parentConfig, childConfig) {\n    var res = extend({}, parentConfig), prop;\n    for (prop in childConfig) {\n        if (hasOwnProp(childConfig, prop)) {\n            if (isObject(parentConfig[prop]) && isObject(childConfig[prop])) {\n                res[prop] = {};\n                extend(res[prop], parentConfig[prop]);\n                extend(res[prop], childConfig[prop]);\n            } else if (childConfig[prop] != null) {\n                res[prop] = childConfig[prop];\n            } else {\n                delete res[prop];\n            }\n        }\n    }\n    for (prop in parentConfig) {\n        if (hasOwnProp(parentConfig, prop) &&\n                !hasOwnProp(childConfig, prop) &&\n                isObject(parentConfig[prop])) {\n            // make sure changes to properties don't modify parent config\n            res[prop] = extend({}, res[prop]);\n        }\n    }\n    return res;\n}\n\nfunction Locale(config) {\n    if (config != null) {\n        this.set(config);\n    }\n}\n\nvar keys;\n\nif (Object.keys) {\n    keys = Object.keys;\n} else {\n    keys = function (obj) {\n        var i, res = [];\n        for (i in obj) {\n            if (hasOwnProp(obj, i)) {\n                res.push(i);\n            }\n        }\n        return res;\n    };\n}\n\nvar keys$1 = keys;\n\nvar defaultCalendar = {\n    sameDay : '[Today at] LT',\n    nextDay : '[Tomorrow at] LT',\n    nextWeek : 'dddd [at] LT',\n    lastDay : '[Yesterday at] LT',\n    lastWeek : '[Last] dddd [at] LT',\n    sameElse : 'L'\n};\n\nfunction calendar (key, mom, now) {\n    var output = this._calendar[key] || this._calendar['sameElse'];\n    return isFunction(output) ? output.call(mom, now) : output;\n}\n\nvar defaultLongDateFormat = {\n    LTS  : 'h:mm:ss A',\n    LT   : 'h:mm A',\n    L    : 'MM/DD/YYYY',\n    LL   : 'MMMM D, YYYY',\n    LLL  : 'MMMM D, YYYY h:mm A',\n    LLLL : 'dddd, MMMM D, YYYY h:mm A'\n};\n\nfunction longDateFormat (key) {\n    var format = this._longDateFormat[key],\n        formatUpper = this._longDateFormat[key.toUpperCase()];\n\n    if (format || !formatUpper) {\n        return format;\n    }\n\n    this._longDateFormat[key] = formatUpper.replace(/MMMM|MM|DD|dddd/g, function (val) {\n        return val.slice(1);\n    });\n\n    return this._longDateFormat[key];\n}\n\nvar defaultInvalidDate = 'Invalid date';\n\nfunction invalidDate () {\n    return this._invalidDate;\n}\n\nvar defaultOrdinal = '%d';\nvar defaultOrdinalParse = /\\d{1,2}/;\n\nfunction ordinal (number) {\n    return this._ordinal.replace('%d', number);\n}\n\nvar defaultRelativeTime = {\n    future : 'in %s',\n    past   : '%s ago',\n    s  : 'a few seconds',\n    m  : 'a minute',\n    mm : '%d minutes',\n    h  : 'an hour',\n    hh : '%d hours',\n    d  : 'a day',\n    dd : '%d days',\n    M  : 'a month',\n    MM : '%d months',\n    y  : 'a year',\n    yy : '%d years'\n};\n\nfunction relativeTime (number, withoutSuffix, string, isFuture) {\n    var output = this._relativeTime[string];\n    return (isFunction(output)) ?\n        output(number, withoutSuffix, string, isFuture) :\n        output.replace(/%d/i, number);\n}\n\nfunction pastFuture (diff, output) {\n    var format = this._relativeTime[diff > 0 ? 'future' : 'past'];\n    return isFunction(format) ? format(output) : format.replace(/%s/i, output);\n}\n\nvar aliases = {};\n\nfunction addUnitAlias (unit, shorthand) {\n    var lowerCase = unit.toLowerCase();\n    aliases[lowerCase] = aliases[lowerCase + 's'] = aliases[shorthand] = unit;\n}\n\nfunction normalizeUnits(units) {\n    return typeof units === 'string' ? aliases[units] || aliases[units.toLowerCase()] : undefined;\n}\n\nfunction normalizeObjectUnits(inputObject) {\n    var normalizedInput = {},\n        normalizedProp,\n        prop;\n\n    for (prop in inputObject) {\n        if (hasOwnProp(inputObject, prop)) {\n            normalizedProp = normalizeUnits(prop);\n            if (normalizedProp) {\n                normalizedInput[normalizedProp] = inputObject[prop];\n            }\n        }\n    }\n\n    return normalizedInput;\n}\n\nvar priorities = {};\n\nfunction addUnitPriority(unit, priority) {\n    priorities[unit] = priority;\n}\n\nfunction getPrioritizedUnits(unitsObj) {\n    var units = [];\n    for (var u in unitsObj) {\n        units.push({unit: u, priority: priorities[u]});\n    }\n    units.sort(function (a, b) {\n        return a.priority - b.priority;\n    });\n    return units;\n}\n\nfunction makeGetSet (unit, keepTime) {\n    return function (value) {\n        if (value != null) {\n            set$1(this, unit, value);\n            hooks.updateOffset(this, keepTime);\n            return this;\n        } else {\n            return get(this, unit);\n        }\n    };\n}\n\nfunction get (mom, unit) {\n    return mom.isValid() ?\n        mom._d['get' + (mom._isUTC ? 'UTC' : '') + unit]() : NaN;\n}\n\nfunction set$1 (mom, unit, value) {\n    if (mom.isValid()) {\n        mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value);\n    }\n}\n\n// MOMENTS\n\nfunction stringGet (units) {\n    units = normalizeUnits(units);\n    if (isFunction(this[units])) {\n        return this[units]();\n    }\n    return this;\n}\n\n\nfunction stringSet (units, value) {\n    if (typeof units === 'object') {\n        units = normalizeObjectUnits(units);\n        var prioritized = getPrioritizedUnits(units);\n        for (var i = 0; i < prioritized.length; i++) {\n            this[prioritized[i].unit](units[prioritized[i].unit]);\n        }\n    } else {\n        units = normalizeUnits(units);\n        if (isFunction(this[units])) {\n            return this[units](value);\n        }\n    }\n    return this;\n}\n\nfunction zeroFill(number, targetLength, forceSign) {\n    var absNumber = '' + Math.abs(number),\n        zerosToFill = targetLength - absNumber.length,\n        sign = number >= 0;\n    return (sign ? (forceSign ? '+' : '') : '-') +\n        Math.pow(10, Math.max(0, zerosToFill)).toString().substr(1) + absNumber;\n}\n\nvar formattingTokens = /(\\[[^\\[]*\\])|(\\\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g;\n\nvar localFormattingTokens = /(\\[[^\\[]*\\])|(\\\\)?(LTS|LT|LL?L?L?|l{1,4})/g;\n\nvar formatFunctions = {};\n\nvar formatTokenFunctions = {};\n\n// token:    'M'\n// padded:   ['MM', 2]\n// ordinal:  'Mo'\n// callback: function () { this.month() + 1 }\nfunction addFormatToken (token, padded, ordinal, callback) {\n    var func = callback;\n    if (typeof callback === 'string') {\n        func = function () {\n            return this[callback]();\n        };\n    }\n    if (token) {\n        formatTokenFunctions[token] = func;\n    }\n    if (padded) {\n        formatTokenFunctions[padded[0]] = function () {\n            return zeroFill(func.apply(this, arguments), padded[1], padded[2]);\n        };\n    }\n    if (ordinal) {\n        formatTokenFunctions[ordinal] = function () {\n            return this.localeData().ordinal(func.apply(this, arguments), token);\n        };\n    }\n}\n\nfunction removeFormattingTokens(input) {\n    if (input.match(/\\[[\\s\\S]/)) {\n        return input.replace(/^\\[|\\]$/g, '');\n    }\n    return input.replace(/\\\\/g, '');\n}\n\nfunction makeFormatFunction(format) {\n    var array = format.match(formattingTokens), i, length;\n\n    for (i = 0, length = array.length; i < length; i++) {\n        if (formatTokenFunctions[array[i]]) {\n            array[i] = formatTokenFunctions[array[i]];\n        } else {\n            array[i] = removeFormattingTokens(array[i]);\n        }\n    }\n\n    return function (mom) {\n        var output = '', i;\n        for (i = 0; i < length; i++) {\n            output += array[i] instanceof Function ? array[i].call(mom, format) : array[i];\n        }\n        return output;\n    };\n}\n\n// format date using native date object\nfunction formatMoment(m, format) {\n    if (!m.isValid()) {\n        return m.localeData().invalidDate();\n    }\n\n    format = expandFormat(format, m.localeData());\n    formatFunctions[format] = formatFunctions[format] || makeFormatFunction(format);\n\n    return formatFunctions[format](m);\n}\n\nfunction expandFormat(format, locale) {\n    var i = 5;\n\n    function replaceLongDateFormatTokens(input) {\n        return locale.longDateFormat(input) || input;\n    }\n\n    localFormattingTokens.lastIndex = 0;\n    while (i >= 0 && localFormattingTokens.test(format)) {\n        format = format.replace(localFormattingTokens, replaceLongDateFormatTokens);\n        localFormattingTokens.lastIndex = 0;\n        i -= 1;\n    }\n\n    return format;\n}\n\nvar match1         = /\\d/;            //       0 - 9\nvar match2         = /\\d\\d/;          //      00 - 99\nvar match3         = /\\d{3}/;         //     000 - 999\nvar match4         = /\\d{4}/;         //    0000 - 9999\nvar match6         = /[+-]?\\d{6}/;    // -999999 - 999999\nvar match1to2      = /\\d\\d?/;         //       0 - 99\nvar match3to4      = /\\d\\d\\d\\d?/;     //     999 - 9999\nvar match5to6      = /\\d\\d\\d\\d\\d\\d?/; //   99999 - 999999\nvar match1to3      = /\\d{1,3}/;       //       0 - 999\nvar match1to4      = /\\d{1,4}/;       //       0 - 9999\nvar match1to6      = /[+-]?\\d{1,6}/;  // -999999 - 999999\n\nvar matchUnsigned  = /\\d+/;           //       0 - inf\nvar matchSigned    = /[+-]?\\d+/;      //    -inf - inf\n\nvar matchOffset    = /Z|[+-]\\d\\d:?\\d\\d/gi; // +00:00 -00:00 +0000 -0000 or Z\nvar matchShortOffset = /Z|[+-]\\d\\d(?::?\\d\\d)?/gi; // +00 -00 +00:00 -00:00 +0000 -0000 or Z\n\nvar matchTimestamp = /[+-]?\\d+(\\.\\d{1,3})?/; // 123456789 123456789.123\n\n// any word (or two) characters or numbers including two/three word month in arabic.\n// includes scottish gaelic two word and hyphenated months\nvar matchWord = /[0-9]*['a-z\\u00A0-\\u05FF\\u0700-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]+|[\\u0600-\\u06FF\\/]+(\\s*?[\\u0600-\\u06FF]+){1,2}/i;\n\n\nvar regexes = {};\n\nfunction addRegexToken (token, regex, strictRegex) {\n    regexes[token] = isFunction(regex) ? regex : function (isStrict, localeData) {\n        return (isStrict && strictRegex) ? strictRegex : regex;\n    };\n}\n\nfunction getParseRegexForToken (token, config) {\n    if (!hasOwnProp(regexes, token)) {\n        return new RegExp(unescapeFormat(token));\n    }\n\n    return regexes[token](config._strict, config._locale);\n}\n\n// Code from http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript\nfunction unescapeFormat(s) {\n    return regexEscape(s.replace('\\\\', '').replace(/\\\\(\\[)|\\\\(\\])|\\[([^\\]\\[]*)\\]|\\\\(.)/g, function (matched, p1, p2, p3, p4) {\n        return p1 || p2 || p3 || p4;\n    }));\n}\n\nfunction regexEscape(s) {\n    return s.replace(/[-\\/\\\\^$*+?.()|[\\]{}]/g, '\\\\$&');\n}\n\nvar tokens = {};\n\nfunction addParseToken (token, callback) {\n    var i, func = callback;\n    if (typeof token === 'string') {\n        token = [token];\n    }\n    if (isNumber(callback)) {\n        func = function (input, array) {\n            array[callback] = toInt(input);\n        };\n    }\n    for (i = 0; i < token.length; i++) {\n        tokens[token[i]] = func;\n    }\n}\n\nfunction addWeekParseToken (token, callback) {\n    addParseToken(token, function (input, array, config, token) {\n        config._w = config._w || {};\n        callback(input, config._w, config, token);\n    });\n}\n\nfunction addTimeToArrayFromToken(token, input, config) {\n    if (input != null && hasOwnProp(tokens, token)) {\n        tokens[token](input, config._a, config, token);\n    }\n}\n\nvar YEAR = 0;\nvar MONTH = 1;\nvar DATE = 2;\nvar HOUR = 3;\nvar MINUTE = 4;\nvar SECOND = 5;\nvar MILLISECOND = 6;\nvar WEEK = 7;\nvar WEEKDAY = 8;\n\nvar indexOf;\n\nif (Array.prototype.indexOf) {\n    indexOf = Array.prototype.indexOf;\n} else {\n    indexOf = function (o) {\n        // I know\n        var i;\n        for (i = 0; i < this.length; ++i) {\n            if (this[i] === o) {\n                return i;\n            }\n        }\n        return -1;\n    };\n}\n\nvar indexOf$1 = indexOf;\n\nfunction daysInMonth(year, month) {\n    return new Date(Date.UTC(year, month + 1, 0)).getUTCDate();\n}\n\n// FORMATTING\n\naddFormatToken('M', ['MM', 2], 'Mo', function () {\n    return this.month() + 1;\n});\n\naddFormatToken('MMM', 0, 0, function (format) {\n    return this.localeData().monthsShort(this, format);\n});\n\naddFormatToken('MMMM', 0, 0, function (format) {\n    return this.localeData().months(this, format);\n});\n\n// ALIASES\n\naddUnitAlias('month', 'M');\n\n// PRIORITY\n\naddUnitPriority('month', 8);\n\n// PARSING\n\naddRegexToken('M',    match1to2);\naddRegexToken('MM',   match1to2, match2);\naddRegexToken('MMM',  function (isStrict, locale) {\n    return locale.monthsShortRegex(isStrict);\n});\naddRegexToken('MMMM', function (isStrict, locale) {\n    return locale.monthsRegex(isStrict);\n});\n\naddParseToken(['M', 'MM'], function (input, array) {\n    array[MONTH] = toInt(input) - 1;\n});\n\naddParseToken(['MMM', 'MMMM'], function (input, array, config, token) {\n    var month = config._locale.monthsParse(input, token, config._strict);\n    // if we didn't find a month name, mark the date as invalid.\n    if (month != null) {\n        array[MONTH] = month;\n    } else {\n        getParsingFlags(config).invalidMonth = input;\n    }\n});\n\n// LOCALES\n\nvar MONTHS_IN_FORMAT = /D[oD]?(\\[[^\\[\\]]*\\]|\\s)+MMMM?/;\nvar defaultLocaleMonths = 'January_February_March_April_May_June_July_August_September_October_November_December'.split('_');\nfunction localeMonths (m, format) {\n    if (!m) {\n        return this._months;\n    }\n    return isArray(this._months) ? this._months[m.month()] :\n        this._months[(this._months.isFormat || MONTHS_IN_FORMAT).test(format) ? 'format' : 'standalone'][m.month()];\n}\n\nvar defaultLocaleMonthsShort = 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_');\nfunction localeMonthsShort (m, format) {\n    if (!m) {\n        return this._monthsShort;\n    }\n    return isArray(this._monthsShort) ? this._monthsShort[m.month()] :\n        this._monthsShort[MONTHS_IN_FORMAT.test(format) ? 'format' : 'standalone'][m.month()];\n}\n\nfunction handleStrictParse(monthName, format, strict) {\n    var i, ii, mom, llc = monthName.toLocaleLowerCase();\n    if (!this._monthsParse) {\n        // this is not used\n        this._monthsParse = [];\n        this._longMonthsParse = [];\n        this._shortMonthsParse = [];\n        for (i = 0; i < 12; ++i) {\n            mom = createUTC([2000, i]);\n            this._shortMonthsParse[i] = this.monthsShort(mom, '').toLocaleLowerCase();\n            this._longMonthsParse[i] = this.months(mom, '').toLocaleLowerCase();\n        }\n    }\n\n    if (strict) {\n        if (format === 'MMM') {\n            ii = indexOf$1.call(this._shortMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf$1.call(this._longMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    } else {\n        if (format === 'MMM') {\n            ii = indexOf$1.call(this._shortMonthsParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._longMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf$1.call(this._longMonthsParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._shortMonthsParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    }\n}\n\nfunction localeMonthsParse (monthName, format, strict) {\n    var i, mom, regex;\n\n    if (this._monthsParseExact) {\n        return handleStrictParse.call(this, monthName, format, strict);\n    }\n\n    if (!this._monthsParse) {\n        this._monthsParse = [];\n        this._longMonthsParse = [];\n        this._shortMonthsParse = [];\n    }\n\n    // TODO: add sorting\n    // Sorting makes sure if one month (or abbr) is a prefix of another\n    // see sorting in computeMonthsParse\n    for (i = 0; i < 12; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, i]);\n        if (strict && !this._longMonthsParse[i]) {\n            this._longMonthsParse[i] = new RegExp('^' + this.months(mom, '').replace('.', '') + '$', 'i');\n            this._shortMonthsParse[i] = new RegExp('^' + this.monthsShort(mom, '').replace('.', '') + '$', 'i');\n        }\n        if (!strict && !this._monthsParse[i]) {\n            regex = '^' + this.months(mom, '') + '|^' + this.monthsShort(mom, '');\n            this._monthsParse[i] = new RegExp(regex.replace('.', ''), 'i');\n        }\n        // test the regex\n        if (strict && format === 'MMMM' && this._longMonthsParse[i].test(monthName)) {\n            return i;\n        } else if (strict && format === 'MMM' && this._shortMonthsParse[i].test(monthName)) {\n            return i;\n        } else if (!strict && this._monthsParse[i].test(monthName)) {\n            return i;\n        }\n    }\n}\n\n// MOMENTS\n\nfunction setMonth (mom, value) {\n    var dayOfMonth;\n\n    if (!mom.isValid()) {\n        // No op\n        return mom;\n    }\n\n    if (typeof value === 'string') {\n        if (/^\\d+$/.test(value)) {\n            value = toInt(value);\n        } else {\n            value = mom.localeData().monthsParse(value);\n            // TODO: Another silent failure?\n            if (!isNumber(value)) {\n                return mom;\n            }\n        }\n    }\n\n    dayOfMonth = Math.min(mom.date(), daysInMonth(mom.year(), value));\n    mom._d['set' + (mom._isUTC ? 'UTC' : '') + 'Month'](value, dayOfMonth);\n    return mom;\n}\n\nfunction getSetMonth (value) {\n    if (value != null) {\n        setMonth(this, value);\n        hooks.updateOffset(this, true);\n        return this;\n    } else {\n        return get(this, 'Month');\n    }\n}\n\nfunction getDaysInMonth () {\n    return daysInMonth(this.year(), this.month());\n}\n\nvar defaultMonthsShortRegex = matchWord;\nfunction monthsShortRegex (isStrict) {\n    if (this._monthsParseExact) {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            computeMonthsParse.call(this);\n        }\n        if (isStrict) {\n            return this._monthsShortStrictRegex;\n        } else {\n            return this._monthsShortRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_monthsShortRegex')) {\n            this._monthsShortRegex = defaultMonthsShortRegex;\n        }\n        return this._monthsShortStrictRegex && isStrict ?\n            this._monthsShortStrictRegex : this._monthsShortRegex;\n    }\n}\n\nvar defaultMonthsRegex = matchWord;\nfunction monthsRegex (isStrict) {\n    if (this._monthsParseExact) {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            computeMonthsParse.call(this);\n        }\n        if (isStrict) {\n            return this._monthsStrictRegex;\n        } else {\n            return this._monthsRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_monthsRegex')) {\n            this._monthsRegex = defaultMonthsRegex;\n        }\n        return this._monthsStrictRegex && isStrict ?\n            this._monthsStrictRegex : this._monthsRegex;\n    }\n}\n\nfunction computeMonthsParse () {\n    function cmpLenRev(a, b) {\n        return b.length - a.length;\n    }\n\n    var shortPieces = [], longPieces = [], mixedPieces = [],\n        i, mom;\n    for (i = 0; i < 12; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, i]);\n        shortPieces.push(this.monthsShort(mom, ''));\n        longPieces.push(this.months(mom, ''));\n        mixedPieces.push(this.months(mom, ''));\n        mixedPieces.push(this.monthsShort(mom, ''));\n    }\n    // Sorting makes sure if one month (or abbr) is a prefix of another it\n    // will match the longer piece.\n    shortPieces.sort(cmpLenRev);\n    longPieces.sort(cmpLenRev);\n    mixedPieces.sort(cmpLenRev);\n    for (i = 0; i < 12; i++) {\n        shortPieces[i] = regexEscape(shortPieces[i]);\n        longPieces[i] = regexEscape(longPieces[i]);\n    }\n    for (i = 0; i < 24; i++) {\n        mixedPieces[i] = regexEscape(mixedPieces[i]);\n    }\n\n    this._monthsRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');\n    this._monthsShortRegex = this._monthsRegex;\n    this._monthsStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');\n    this._monthsShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');\n}\n\n// FORMATTING\n\naddFormatToken('Y', 0, 0, function () {\n    var y = this.year();\n    return y <= 9999 ? '' + y : '+' + y;\n});\n\naddFormatToken(0, ['YY', 2], 0, function () {\n    return this.year() % 100;\n});\n\naddFormatToken(0, ['YYYY',   4],       0, 'year');\naddFormatToken(0, ['YYYYY',  5],       0, 'year');\naddFormatToken(0, ['YYYYYY', 6, true], 0, 'year');\n\n// ALIASES\n\naddUnitAlias('year', 'y');\n\n// PRIORITIES\n\naddUnitPriority('year', 1);\n\n// PARSING\n\naddRegexToken('Y',      matchSigned);\naddRegexToken('YY',     match1to2, match2);\naddRegexToken('YYYY',   match1to4, match4);\naddRegexToken('YYYYY',  match1to6, match6);\naddRegexToken('YYYYYY', match1to6, match6);\n\naddParseToken(['YYYYY', 'YYYYYY'], YEAR);\naddParseToken('YYYY', function (input, array) {\n    array[YEAR] = input.length === 2 ? hooks.parseTwoDigitYear(input) : toInt(input);\n});\naddParseToken('YY', function (input, array) {\n    array[YEAR] = hooks.parseTwoDigitYear(input);\n});\naddParseToken('Y', function (input, array) {\n    array[YEAR] = parseInt(input, 10);\n});\n\n// HELPERS\n\nfunction daysInYear(year) {\n    return isLeapYear(year) ? 366 : 365;\n}\n\nfunction isLeapYear(year) {\n    return (year % 4 === 0 && year % 100 !== 0) || year % 400 === 0;\n}\n\n// HOOKS\n\nhooks.parseTwoDigitYear = function (input) {\n    return toInt(input) + (toInt(input) > 68 ? 1900 : 2000);\n};\n\n// MOMENTS\n\nvar getSetYear = makeGetSet('FullYear', true);\n\nfunction getIsLeapYear () {\n    return isLeapYear(this.year());\n}\n\nfunction createDate (y, m, d, h, M, s, ms) {\n    //can't just apply() to create a date:\n    //http://stackoverflow.com/questions/181348/instantiating-a-javascript-object-by-calling-prototype-constructor-apply\n    var date = new Date(y, m, d, h, M, s, ms);\n\n    //the date constructor remaps years 0-99 to 1900-1999\n    if (y < 100 && y >= 0 && isFinite(date.getFullYear())) {\n        date.setFullYear(y);\n    }\n    return date;\n}\n\nfunction createUTCDate (y) {\n    var date = new Date(Date.UTC.apply(null, arguments));\n\n    //the Date.UTC function remaps years 0-99 to 1900-1999\n    if (y < 100 && y >= 0 && isFinite(date.getUTCFullYear())) {\n        date.setUTCFullYear(y);\n    }\n    return date;\n}\n\n// start-of-first-week - start-of-year\nfunction firstWeekOffset(year, dow, doy) {\n    var // first-week day -- which january is always in the first week (4 for iso, 1 for other)\n        fwd = 7 + dow - doy,\n        // first-week day local weekday -- which local weekday is fwd\n        fwdlw = (7 + createUTCDate(year, 0, fwd).getUTCDay() - dow) % 7;\n\n    return -fwdlw + fwd - 1;\n}\n\n//http://en.wikipedia.org/wiki/ISO_week_date#Calculating_a_date_given_the_year.2C_week_number_and_weekday\nfunction dayOfYearFromWeeks(year, week, weekday, dow, doy) {\n    var localWeekday = (7 + weekday - dow) % 7,\n        weekOffset = firstWeekOffset(year, dow, doy),\n        dayOfYear = 1 + 7 * (week - 1) + localWeekday + weekOffset,\n        resYear, resDayOfYear;\n\n    if (dayOfYear <= 0) {\n        resYear = year - 1;\n        resDayOfYear = daysInYear(resYear) + dayOfYear;\n    } else if (dayOfYear > daysInYear(year)) {\n        resYear = year + 1;\n        resDayOfYear = dayOfYear - daysInYear(year);\n    } else {\n        resYear = year;\n        resDayOfYear = dayOfYear;\n    }\n\n    return {\n        year: resYear,\n        dayOfYear: resDayOfYear\n    };\n}\n\nfunction weekOfYear(mom, dow, doy) {\n    var weekOffset = firstWeekOffset(mom.year(), dow, doy),\n        week = Math.floor((mom.dayOfYear() - weekOffset - 1) / 7) + 1,\n        resWeek, resYear;\n\n    if (week < 1) {\n        resYear = mom.year() - 1;\n        resWeek = week + weeksInYear(resYear, dow, doy);\n    } else if (week > weeksInYear(mom.year(), dow, doy)) {\n        resWeek = week - weeksInYear(mom.year(), dow, doy);\n        resYear = mom.year() + 1;\n    } else {\n        resYear = mom.year();\n        resWeek = week;\n    }\n\n    return {\n        week: resWeek,\n        year: resYear\n    };\n}\n\nfunction weeksInYear(year, dow, doy) {\n    var weekOffset = firstWeekOffset(year, dow, doy),\n        weekOffsetNext = firstWeekOffset(year + 1, dow, doy);\n    return (daysInYear(year) - weekOffset + weekOffsetNext) / 7;\n}\n\n// FORMATTING\n\naddFormatToken('w', ['ww', 2], 'wo', 'week');\naddFormatToken('W', ['WW', 2], 'Wo', 'isoWeek');\n\n// ALIASES\n\naddUnitAlias('week', 'w');\naddUnitAlias('isoWeek', 'W');\n\n// PRIORITIES\n\naddUnitPriority('week', 5);\naddUnitPriority('isoWeek', 5);\n\n// PARSING\n\naddRegexToken('w',  match1to2);\naddRegexToken('ww', match1to2, match2);\naddRegexToken('W',  match1to2);\naddRegexToken('WW', match1to2, match2);\n\naddWeekParseToken(['w', 'ww', 'W', 'WW'], function (input, week, config, token) {\n    week[token.substr(0, 1)] = toInt(input);\n});\n\n// HELPERS\n\n// LOCALES\n\nfunction localeWeek (mom) {\n    return weekOfYear(mom, this._week.dow, this._week.doy).week;\n}\n\nvar defaultLocaleWeek = {\n    dow : 0, // Sunday is the first day of the week.\n    doy : 6  // The week that contains Jan 1st is the first week of the year.\n};\n\nfunction localeFirstDayOfWeek () {\n    return this._week.dow;\n}\n\nfunction localeFirstDayOfYear () {\n    return this._week.doy;\n}\n\n// MOMENTS\n\nfunction getSetWeek (input) {\n    var week = this.localeData().week(this);\n    return input == null ? week : this.add((input - week) * 7, 'd');\n}\n\nfunction getSetISOWeek (input) {\n    var week = weekOfYear(this, 1, 4).week;\n    return input == null ? week : this.add((input - week) * 7, 'd');\n}\n\n// FORMATTING\n\naddFormatToken('d', 0, 'do', 'day');\n\naddFormatToken('dd', 0, 0, function (format) {\n    return this.localeData().weekdaysMin(this, format);\n});\n\naddFormatToken('ddd', 0, 0, function (format) {\n    return this.localeData().weekdaysShort(this, format);\n});\n\naddFormatToken('dddd', 0, 0, function (format) {\n    return this.localeData().weekdays(this, format);\n});\n\naddFormatToken('e', 0, 0, 'weekday');\naddFormatToken('E', 0, 0, 'isoWeekday');\n\n// ALIASES\n\naddUnitAlias('day', 'd');\naddUnitAlias('weekday', 'e');\naddUnitAlias('isoWeekday', 'E');\n\n// PRIORITY\naddUnitPriority('day', 11);\naddUnitPriority('weekday', 11);\naddUnitPriority('isoWeekday', 11);\n\n// PARSING\n\naddRegexToken('d',    match1to2);\naddRegexToken('e',    match1to2);\naddRegexToken('E',    match1to2);\naddRegexToken('dd',   function (isStrict, locale) {\n    return locale.weekdaysMinRegex(isStrict);\n});\naddRegexToken('ddd',   function (isStrict, locale) {\n    return locale.weekdaysShortRegex(isStrict);\n});\naddRegexToken('dddd',   function (isStrict, locale) {\n    return locale.weekdaysRegex(isStrict);\n});\n\naddWeekParseToken(['dd', 'ddd', 'dddd'], function (input, week, config, token) {\n    var weekday = config._locale.weekdaysParse(input, token, config._strict);\n    // if we didn't get a weekday name, mark the date as invalid\n    if (weekday != null) {\n        week.d = weekday;\n    } else {\n        getParsingFlags(config).invalidWeekday = input;\n    }\n});\n\naddWeekParseToken(['d', 'e', 'E'], function (input, week, config, token) {\n    week[token] = toInt(input);\n});\n\n// HELPERS\n\nfunction parseWeekday(input, locale) {\n    if (typeof input !== 'string') {\n        return input;\n    }\n\n    if (!isNaN(input)) {\n        return parseInt(input, 10);\n    }\n\n    input = locale.weekdaysParse(input);\n    if (typeof input === 'number') {\n        return input;\n    }\n\n    return null;\n}\n\nfunction parseIsoWeekday(input, locale) {\n    if (typeof input === 'string') {\n        return locale.weekdaysParse(input) % 7 || 7;\n    }\n    return isNaN(input) ? null : input;\n}\n\n// LOCALES\n\nvar defaultLocaleWeekdays = 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_');\nfunction localeWeekdays (m, format) {\n    if (!m) {\n        return this._weekdays;\n    }\n    return isArray(this._weekdays) ? this._weekdays[m.day()] :\n        this._weekdays[this._weekdays.isFormat.test(format) ? 'format' : 'standalone'][m.day()];\n}\n\nvar defaultLocaleWeekdaysShort = 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_');\nfunction localeWeekdaysShort (m) {\n    return (m) ? this._weekdaysShort[m.day()] : this._weekdaysShort;\n}\n\nvar defaultLocaleWeekdaysMin = 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_');\nfunction localeWeekdaysMin (m) {\n    return (m) ? this._weekdaysMin[m.day()] : this._weekdaysMin;\n}\n\nfunction handleStrictParse$1(weekdayName, format, strict) {\n    var i, ii, mom, llc = weekdayName.toLocaleLowerCase();\n    if (!this._weekdaysParse) {\n        this._weekdaysParse = [];\n        this._shortWeekdaysParse = [];\n        this._minWeekdaysParse = [];\n\n        for (i = 0; i < 7; ++i) {\n            mom = createUTC([2000, 1]).day(i);\n            this._minWeekdaysParse[i] = this.weekdaysMin(mom, '').toLocaleLowerCase();\n            this._shortWeekdaysParse[i] = this.weekdaysShort(mom, '').toLocaleLowerCase();\n            this._weekdaysParse[i] = this.weekdays(mom, '').toLocaleLowerCase();\n        }\n    }\n\n    if (strict) {\n        if (format === 'dddd') {\n            ii = indexOf$1.call(this._weekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else if (format === 'ddd') {\n            ii = indexOf$1.call(this._shortWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf$1.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    } else {\n        if (format === 'dddd') {\n            ii = indexOf$1.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._shortWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else if (format === 'ddd') {\n            ii = indexOf$1.call(this._shortWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._minWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        } else {\n            ii = indexOf$1.call(this._minWeekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._weekdaysParse, llc);\n            if (ii !== -1) {\n                return ii;\n            }\n            ii = indexOf$1.call(this._shortWeekdaysParse, llc);\n            return ii !== -1 ? ii : null;\n        }\n    }\n}\n\nfunction localeWeekdaysParse (weekdayName, format, strict) {\n    var i, mom, regex;\n\n    if (this._weekdaysParseExact) {\n        return handleStrictParse$1.call(this, weekdayName, format, strict);\n    }\n\n    if (!this._weekdaysParse) {\n        this._weekdaysParse = [];\n        this._minWeekdaysParse = [];\n        this._shortWeekdaysParse = [];\n        this._fullWeekdaysParse = [];\n    }\n\n    for (i = 0; i < 7; i++) {\n        // make the regex if we don't have it already\n\n        mom = createUTC([2000, 1]).day(i);\n        if (strict && !this._fullWeekdaysParse[i]) {\n            this._fullWeekdaysParse[i] = new RegExp('^' + this.weekdays(mom, '').replace('.', '\\.?') + '$', 'i');\n            this._shortWeekdaysParse[i] = new RegExp('^' + this.weekdaysShort(mom, '').replace('.', '\\.?') + '$', 'i');\n            this._minWeekdaysParse[i] = new RegExp('^' + this.weekdaysMin(mom, '').replace('.', '\\.?') + '$', 'i');\n        }\n        if (!this._weekdaysParse[i]) {\n            regex = '^' + this.weekdays(mom, '') + '|^' + this.weekdaysShort(mom, '') + '|^' + this.weekdaysMin(mom, '');\n            this._weekdaysParse[i] = new RegExp(regex.replace('.', ''), 'i');\n        }\n        // test the regex\n        if (strict && format === 'dddd' && this._fullWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (strict && format === 'ddd' && this._shortWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (strict && format === 'dd' && this._minWeekdaysParse[i].test(weekdayName)) {\n            return i;\n        } else if (!strict && this._weekdaysParse[i].test(weekdayName)) {\n            return i;\n        }\n    }\n}\n\n// MOMENTS\n\nfunction getSetDayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    var day = this._isUTC ? this._d.getUTCDay() : this._d.getDay();\n    if (input != null) {\n        input = parseWeekday(input, this.localeData());\n        return this.add(input - day, 'd');\n    } else {\n        return day;\n    }\n}\n\nfunction getSetLocaleDayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    var weekday = (this.day() + 7 - this.localeData()._week.dow) % 7;\n    return input == null ? weekday : this.add(input - weekday, 'd');\n}\n\nfunction getSetISODayOfWeek (input) {\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n\n    // behaves the same as moment#day except\n    // as a getter, returns 7 instead of 0 (1-7 range instead of 0-6)\n    // as a setter, sunday should belong to the previous week.\n\n    if (input != null) {\n        var weekday = parseIsoWeekday(input, this.localeData());\n        return this.day(this.day() % 7 ? weekday : weekday - 7);\n    } else {\n        return this.day() || 7;\n    }\n}\n\nvar defaultWeekdaysRegex = matchWord;\nfunction weekdaysRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysStrictRegex;\n        } else {\n            return this._weekdaysRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            this._weekdaysRegex = defaultWeekdaysRegex;\n        }\n        return this._weekdaysStrictRegex && isStrict ?\n            this._weekdaysStrictRegex : this._weekdaysRegex;\n    }\n}\n\nvar defaultWeekdaysShortRegex = matchWord;\nfunction weekdaysShortRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysShortStrictRegex;\n        } else {\n            return this._weekdaysShortRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysShortRegex')) {\n            this._weekdaysShortRegex = defaultWeekdaysShortRegex;\n        }\n        return this._weekdaysShortStrictRegex && isStrict ?\n            this._weekdaysShortStrictRegex : this._weekdaysShortRegex;\n    }\n}\n\nvar defaultWeekdaysMinRegex = matchWord;\nfunction weekdaysMinRegex (isStrict) {\n    if (this._weekdaysParseExact) {\n        if (!hasOwnProp(this, '_weekdaysRegex')) {\n            computeWeekdaysParse.call(this);\n        }\n        if (isStrict) {\n            return this._weekdaysMinStrictRegex;\n        } else {\n            return this._weekdaysMinRegex;\n        }\n    } else {\n        if (!hasOwnProp(this, '_weekdaysMinRegex')) {\n            this._weekdaysMinRegex = defaultWeekdaysMinRegex;\n        }\n        return this._weekdaysMinStrictRegex && isStrict ?\n            this._weekdaysMinStrictRegex : this._weekdaysMinRegex;\n    }\n}\n\n\nfunction computeWeekdaysParse () {\n    function cmpLenRev(a, b) {\n        return b.length - a.length;\n    }\n\n    var minPieces = [], shortPieces = [], longPieces = [], mixedPieces = [],\n        i, mom, minp, shortp, longp;\n    for (i = 0; i < 7; i++) {\n        // make the regex if we don't have it already\n        mom = createUTC([2000, 1]).day(i);\n        minp = this.weekdaysMin(mom, '');\n        shortp = this.weekdaysShort(mom, '');\n        longp = this.weekdays(mom, '');\n        minPieces.push(minp);\n        shortPieces.push(shortp);\n        longPieces.push(longp);\n        mixedPieces.push(minp);\n        mixedPieces.push(shortp);\n        mixedPieces.push(longp);\n    }\n    // Sorting makes sure if one weekday (or abbr) is a prefix of another it\n    // will match the longer piece.\n    minPieces.sort(cmpLenRev);\n    shortPieces.sort(cmpLenRev);\n    longPieces.sort(cmpLenRev);\n    mixedPieces.sort(cmpLenRev);\n    for (i = 0; i < 7; i++) {\n        shortPieces[i] = regexEscape(shortPieces[i]);\n        longPieces[i] = regexEscape(longPieces[i]);\n        mixedPieces[i] = regexEscape(mixedPieces[i]);\n    }\n\n    this._weekdaysRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');\n    this._weekdaysShortRegex = this._weekdaysRegex;\n    this._weekdaysMinRegex = this._weekdaysRegex;\n\n    this._weekdaysStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');\n    this._weekdaysShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');\n    this._weekdaysMinStrictRegex = new RegExp('^(' + minPieces.join('|') + ')', 'i');\n}\n\n// FORMATTING\n\nfunction hFormat() {\n    return this.hours() % 12 || 12;\n}\n\nfunction kFormat() {\n    return this.hours() || 24;\n}\n\naddFormatToken('H', ['HH', 2], 0, 'hour');\naddFormatToken('h', ['hh', 2], 0, hFormat);\naddFormatToken('k', ['kk', 2], 0, kFormat);\n\naddFormatToken('hmm', 0, 0, function () {\n    return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2);\n});\n\naddFormatToken('hmmss', 0, 0, function () {\n    return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2) +\n        zeroFill(this.seconds(), 2);\n});\n\naddFormatToken('Hmm', 0, 0, function () {\n    return '' + this.hours() + zeroFill(this.minutes(), 2);\n});\n\naddFormatToken('Hmmss', 0, 0, function () {\n    return '' + this.hours() + zeroFill(this.minutes(), 2) +\n        zeroFill(this.seconds(), 2);\n});\n\nfunction meridiem (token, lowercase) {\n    addFormatToken(token, 0, 0, function () {\n        return this.localeData().meridiem(this.hours(), this.minutes(), lowercase);\n    });\n}\n\nmeridiem('a', true);\nmeridiem('A', false);\n\n// ALIASES\n\naddUnitAlias('hour', 'h');\n\n// PRIORITY\naddUnitPriority('hour', 13);\n\n// PARSING\n\nfunction matchMeridiem (isStrict, locale) {\n    return locale._meridiemParse;\n}\n\naddRegexToken('a',  matchMeridiem);\naddRegexToken('A',  matchMeridiem);\naddRegexToken('H',  match1to2);\naddRegexToken('h',  match1to2);\naddRegexToken('HH', match1to2, match2);\naddRegexToken('hh', match1to2, match2);\n\naddRegexToken('hmm', match3to4);\naddRegexToken('hmmss', match5to6);\naddRegexToken('Hmm', match3to4);\naddRegexToken('Hmmss', match5to6);\n\naddParseToken(['H', 'HH'], HOUR);\naddParseToken(['a', 'A'], function (input, array, config) {\n    config._isPm = config._locale.isPM(input);\n    config._meridiem = input;\n});\naddParseToken(['h', 'hh'], function (input, array, config) {\n    array[HOUR] = toInt(input);\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('hmm', function (input, array, config) {\n    var pos = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos));\n    array[MINUTE] = toInt(input.substr(pos));\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('hmmss', function (input, array, config) {\n    var pos1 = input.length - 4;\n    var pos2 = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos1));\n    array[MINUTE] = toInt(input.substr(pos1, 2));\n    array[SECOND] = toInt(input.substr(pos2));\n    getParsingFlags(config).bigHour = true;\n});\naddParseToken('Hmm', function (input, array, config) {\n    var pos = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos));\n    array[MINUTE] = toInt(input.substr(pos));\n});\naddParseToken('Hmmss', function (input, array, config) {\n    var pos1 = input.length - 4;\n    var pos2 = input.length - 2;\n    array[HOUR] = toInt(input.substr(0, pos1));\n    array[MINUTE] = toInt(input.substr(pos1, 2));\n    array[SECOND] = toInt(input.substr(pos2));\n});\n\n// LOCALES\n\nfunction localeIsPM (input) {\n    // IE8 Quirks Mode & IE7 Standards Mode do not allow accessing strings like arrays\n    // Using charAt should be more compatible.\n    return ((input + '').toLowerCase().charAt(0) === 'p');\n}\n\nvar defaultLocaleMeridiemParse = /[ap]\\.?m?\\.?/i;\nfunction localeMeridiem (hours, minutes, isLower) {\n    if (hours > 11) {\n        return isLower ? 'pm' : 'PM';\n    } else {\n        return isLower ? 'am' : 'AM';\n    }\n}\n\n\n// MOMENTS\n\n// Setting the hour should keep the time, because the user explicitly\n// specified which hour he wants. So trying to maintain the same hour (in\n// a new timezone) makes sense. Adding/subtracting hours does not follow\n// this rule.\nvar getSetHour = makeGetSet('Hours', true);\n\n// months\n// week\n// weekdays\n// meridiem\nvar baseConfig = {\n    calendar: defaultCalendar,\n    longDateFormat: defaultLongDateFormat,\n    invalidDate: defaultInvalidDate,\n    ordinal: defaultOrdinal,\n    ordinalParse: defaultOrdinalParse,\n    relativeTime: defaultRelativeTime,\n\n    months: defaultLocaleMonths,\n    monthsShort: defaultLocaleMonthsShort,\n\n    week: defaultLocaleWeek,\n\n    weekdays: defaultLocaleWeekdays,\n    weekdaysMin: defaultLocaleWeekdaysMin,\n    weekdaysShort: defaultLocaleWeekdaysShort,\n\n    meridiemParse: defaultLocaleMeridiemParse\n};\n\n// internal storage for locale config files\nvar locales = {};\nvar localeFamilies = {};\nvar globalLocale;\n\nfunction normalizeLocale(key) {\n    return key ? key.toLowerCase().replace('_', '-') : key;\n}\n\n// pick the locale from the array\n// try ['en-au', 'en-gb'] as 'en-au', 'en-gb', 'en', as in move through the list trying each\n// substring from most specific to least, but move to the next array item if it's a more specific variant than the current root\nfunction chooseLocale(names) {\n    var i = 0, j, next, locale, split;\n\n    while (i < names.length) {\n        split = normalizeLocale(names[i]).split('-');\n        j = split.length;\n        next = normalizeLocale(names[i + 1]);\n        next = next ? next.split('-') : null;\n        while (j > 0) {\n            locale = loadLocale(split.slice(0, j).join('-'));\n            if (locale) {\n                return locale;\n            }\n            if (next && next.length >= j && compareArrays(split, next, true) >= j - 1) {\n                //the next array item is better than a shallower substring of this one\n                break;\n            }\n            j--;\n        }\n        i++;\n    }\n    return null;\n}\n\nfunction loadLocale(name) {\n    var oldLocale = null;\n    // TODO: Find a better way to register and load all the locales in Node\n    if (!locales[name] && (typeof module !== 'undefined') &&\n            module && module.exports) {\n        try {\n            oldLocale = globalLocale._abbr;\n            require('./locale/' + name);\n            // because defineLocale currently also sets the global locale, we\n            // want to undo that for lazy loaded locales\n            getSetGlobalLocale(oldLocale);\n        } catch (e) { }\n    }\n    return locales[name];\n}\n\n// This function will load locale and then set the global locale.  If\n// no arguments are passed in, it will simply return the current global\n// locale key.\nfunction getSetGlobalLocale (key, values) {\n    var data;\n    if (key) {\n        if (isUndefined(values)) {\n            data = getLocale(key);\n        }\n        else {\n            data = defineLocale(key, values);\n        }\n\n        if (data) {\n            // moment.duration._locale = moment._locale = data;\n            globalLocale = data;\n        }\n    }\n\n    return globalLocale._abbr;\n}\n\nfunction defineLocale (name, config) {\n    if (config !== null) {\n        var parentConfig = baseConfig;\n        config.abbr = name;\n        if (locales[name] != null) {\n            deprecateSimple('defineLocaleOverride',\n                    'use moment.updateLocale(localeName, config) to change ' +\n                    'an existing locale. moment.defineLocale(localeName, ' +\n                    'config) should only be used for creating a new locale ' +\n                    'See http://momentjs.com/guides/#/warnings/define-locale/ for more info.');\n            parentConfig = locales[name]._config;\n        } else if (config.parentLocale != null) {\n            if (locales[config.parentLocale] != null) {\n                parentConfig = locales[config.parentLocale]._config;\n            } else {\n                if (!localeFamilies[config.parentLocale]) {\n                    localeFamilies[config.parentLocale] = [];\n                }\n                localeFamilies[config.parentLocale].push({\n                    name: name,\n                    config: config\n                });\n                return null;\n            }\n        }\n        locales[name] = new Locale(mergeConfigs(parentConfig, config));\n\n        if (localeFamilies[name]) {\n            localeFamilies[name].forEach(function (x) {\n                defineLocale(x.name, x.config);\n            });\n        }\n\n        // backwards compat for now: also set the locale\n        // make sure we set the locale AFTER all child locales have been\n        // created, so we won't end up with the child locale set.\n        getSetGlobalLocale(name);\n\n\n        return locales[name];\n    } else {\n        // useful for testing\n        delete locales[name];\n        return null;\n    }\n}\n\nfunction updateLocale(name, config) {\n    if (config != null) {\n        var locale, parentConfig = baseConfig;\n        // MERGE\n        if (locales[name] != null) {\n            parentConfig = locales[name]._config;\n        }\n        config = mergeConfigs(parentConfig, config);\n        locale = new Locale(config);\n        locale.parentLocale = locales[name];\n        locales[name] = locale;\n\n        // backwards compat for now: also set the locale\n        getSetGlobalLocale(name);\n    } else {\n        // pass null for config to unupdate, useful for tests\n        if (locales[name] != null) {\n            if (locales[name].parentLocale != null) {\n                locales[name] = locales[name].parentLocale;\n            } else if (locales[name] != null) {\n                delete locales[name];\n            }\n        }\n    }\n    return locales[name];\n}\n\n// returns locale data\nfunction getLocale (key) {\n    var locale;\n\n    if (key && key._locale && key._locale._abbr) {\n        key = key._locale._abbr;\n    }\n\n    if (!key) {\n        return globalLocale;\n    }\n\n    if (!isArray(key)) {\n        //short-circuit everything else\n        locale = loadLocale(key);\n        if (locale) {\n            return locale;\n        }\n        key = [key];\n    }\n\n    return chooseLocale(key);\n}\n\nfunction listLocales() {\n    return keys$1(locales);\n}\n\nfunction checkOverflow (m) {\n    var overflow;\n    var a = m._a;\n\n    if (a && getParsingFlags(m).overflow === -2) {\n        overflow =\n            a[MONTH]       < 0 || a[MONTH]       > 11  ? MONTH :\n            a[DATE]        < 1 || a[DATE]        > daysInMonth(a[YEAR], a[MONTH]) ? DATE :\n            a[HOUR]        < 0 || a[HOUR]        > 24 || (a[HOUR] === 24 && (a[MINUTE] !== 0 || a[SECOND] !== 0 || a[MILLISECOND] !== 0)) ? HOUR :\n            a[MINUTE]      < 0 || a[MINUTE]      > 59  ? MINUTE :\n            a[SECOND]      < 0 || a[SECOND]      > 59  ? SECOND :\n            a[MILLISECOND] < 0 || a[MILLISECOND] > 999 ? MILLISECOND :\n            -1;\n\n        if (getParsingFlags(m)._overflowDayOfYear && (overflow < YEAR || overflow > DATE)) {\n            overflow = DATE;\n        }\n        if (getParsingFlags(m)._overflowWeeks && overflow === -1) {\n            overflow = WEEK;\n        }\n        if (getParsingFlags(m)._overflowWeekday && overflow === -1) {\n            overflow = WEEKDAY;\n        }\n\n        getParsingFlags(m).overflow = overflow;\n    }\n\n    return m;\n}\n\n// iso 8601 regex\n// 0000-00-00 0000-W00 or 0000-W00-0 + T + 00 or 00:00 or 00:00:00 or 00:00:00.000 + +00:00 or +0000 or +00)\nvar extendedIsoRegex = /^\\s*((?:[+-]\\d{6}|\\d{4})-(?:\\d\\d-\\d\\d|W\\d\\d-\\d|W\\d\\d|\\d\\d\\d|\\d\\d))(?:(T| )(\\d\\d(?::\\d\\d(?::\\d\\d(?:[.,]\\d+)?)?)?)([\\+\\-]\\d\\d(?::?\\d\\d)?|\\s*Z)?)?$/;\nvar basicIsoRegex = /^\\s*((?:[+-]\\d{6}|\\d{4})(?:\\d\\d\\d\\d|W\\d\\d\\d|W\\d\\d|\\d\\d\\d|\\d\\d))(?:(T| )(\\d\\d(?:\\d\\d(?:\\d\\d(?:[.,]\\d+)?)?)?)([\\+\\-]\\d\\d(?::?\\d\\d)?|\\s*Z)?)?$/;\n\nvar tzRegex = /Z|[+-]\\d\\d(?::?\\d\\d)?/;\n\nvar isoDates = [\n    ['YYYYYY-MM-DD', /[+-]\\d{6}-\\d\\d-\\d\\d/],\n    ['YYYY-MM-DD', /\\d{4}-\\d\\d-\\d\\d/],\n    ['GGGG-[W]WW-E', /\\d{4}-W\\d\\d-\\d/],\n    ['GGGG-[W]WW', /\\d{4}-W\\d\\d/, false],\n    ['YYYY-DDD', /\\d{4}-\\d{3}/],\n    ['YYYY-MM', /\\d{4}-\\d\\d/, false],\n    ['YYYYYYMMDD', /[+-]\\d{10}/],\n    ['YYYYMMDD', /\\d{8}/],\n    // YYYYMM is NOT allowed by the standard\n    ['GGGG[W]WWE', /\\d{4}W\\d{3}/],\n    ['GGGG[W]WW', /\\d{4}W\\d{2}/, false],\n    ['YYYYDDD', /\\d{7}/]\n];\n\n// iso time formats and regexes\nvar isoTimes = [\n    ['HH:mm:ss.SSSS', /\\d\\d:\\d\\d:\\d\\d\\.\\d+/],\n    ['HH:mm:ss,SSSS', /\\d\\d:\\d\\d:\\d\\d,\\d+/],\n    ['HH:mm:ss', /\\d\\d:\\d\\d:\\d\\d/],\n    ['HH:mm', /\\d\\d:\\d\\d/],\n    ['HHmmss.SSSS', /\\d\\d\\d\\d\\d\\d\\.\\d+/],\n    ['HHmmss,SSSS', /\\d\\d\\d\\d\\d\\d,\\d+/],\n    ['HHmmss', /\\d\\d\\d\\d\\d\\d/],\n    ['HHmm', /\\d\\d\\d\\d/],\n    ['HH', /\\d\\d/]\n];\n\nvar aspNetJsonRegex = /^\\/?Date\\((\\-?\\d+)/i;\n\n// date from iso format\nfunction configFromISO(config) {\n    var i, l,\n        string = config._i,\n        match = extendedIsoRegex.exec(string) || basicIsoRegex.exec(string),\n        allowTime, dateFormat, timeFormat, tzFormat;\n\n    if (match) {\n        getParsingFlags(config).iso = true;\n\n        for (i = 0, l = isoDates.length; i < l; i++) {\n            if (isoDates[i][1].exec(match[1])) {\n                dateFormat = isoDates[i][0];\n                allowTime = isoDates[i][2] !== false;\n                break;\n            }\n        }\n        if (dateFormat == null) {\n            config._isValid = false;\n            return;\n        }\n        if (match[3]) {\n            for (i = 0, l = isoTimes.length; i < l; i++) {\n                if (isoTimes[i][1].exec(match[3])) {\n                    // match[2] should be 'T' or space\n                    timeFormat = (match[2] || ' ') + isoTimes[i][0];\n                    break;\n                }\n            }\n            if (timeFormat == null) {\n                config._isValid = false;\n                return;\n            }\n        }\n        if (!allowTime && timeFormat != null) {\n            config._isValid = false;\n            return;\n        }\n        if (match[4]) {\n            if (tzRegex.exec(match[4])) {\n                tzFormat = 'Z';\n            } else {\n                config._isValid = false;\n                return;\n            }\n        }\n        config._f = dateFormat + (timeFormat || '') + (tzFormat || '');\n        configFromStringAndFormat(config);\n    } else {\n        config._isValid = false;\n    }\n}\n\n// date from iso format or fallback\nfunction configFromString(config) {\n    var matched = aspNetJsonRegex.exec(config._i);\n\n    if (matched !== null) {\n        config._d = new Date(+matched[1]);\n        return;\n    }\n\n    configFromISO(config);\n    if (config._isValid === false) {\n        delete config._isValid;\n        hooks.createFromInputFallback(config);\n    }\n}\n\nhooks.createFromInputFallback = deprecate(\n    'value provided is not in a recognized ISO format. moment construction falls back to js Date(), ' +\n    'which is not reliable across all browsers and versions. Non ISO date formats are ' +\n    'discouraged and will be removed in an upcoming major release. Please refer to ' +\n    'http://momentjs.com/guides/#/warnings/js-date/ for more info.',\n    function (config) {\n        config._d = new Date(config._i + (config._useUTC ? ' UTC' : ''));\n    }\n);\n\n// Pick the first defined of two or three arguments.\nfunction defaults(a, b, c) {\n    if (a != null) {\n        return a;\n    }\n    if (b != null) {\n        return b;\n    }\n    return c;\n}\n\nfunction currentDateArray(config) {\n    // hooks is actually the exported moment object\n    var nowValue = new Date(hooks.now());\n    if (config._useUTC) {\n        return [nowValue.getUTCFullYear(), nowValue.getUTCMonth(), nowValue.getUTCDate()];\n    }\n    return [nowValue.getFullYear(), nowValue.getMonth(), nowValue.getDate()];\n}\n\n// convert an array to a date.\n// the array should mirror the parameters below\n// note: all values past the year are optional and will default to the lowest possible value.\n// [year, month, day , hour, minute, second, millisecond]\nfunction configFromArray (config) {\n    var i, date, input = [], currentDate, yearToUse;\n\n    if (config._d) {\n        return;\n    }\n\n    currentDate = currentDateArray(config);\n\n    //compute day of the year from weeks and weekdays\n    if (config._w && config._a[DATE] == null && config._a[MONTH] == null) {\n        dayOfYearFromWeekInfo(config);\n    }\n\n    //if the day of the year is set, figure out what it is\n    if (config._dayOfYear) {\n        yearToUse = defaults(config._a[YEAR], currentDate[YEAR]);\n\n        if (config._dayOfYear > daysInYear(yearToUse)) {\n            getParsingFlags(config)._overflowDayOfYear = true;\n        }\n\n        date = createUTCDate(yearToUse, 0, config._dayOfYear);\n        config._a[MONTH] = date.getUTCMonth();\n        config._a[DATE] = date.getUTCDate();\n    }\n\n    // Default to current date.\n    // * if no year, month, day of month are given, default to today\n    // * if day of month is given, default month and year\n    // * if month is given, default only year\n    // * if year is given, don't default anything\n    for (i = 0; i < 3 && config._a[i] == null; ++i) {\n        config._a[i] = input[i] = currentDate[i];\n    }\n\n    // Zero out whatever was not defaulted, including time\n    for (; i < 7; i++) {\n        config._a[i] = input[i] = (config._a[i] == null) ? (i === 2 ? 1 : 0) : config._a[i];\n    }\n\n    // Check for 24:00:00.000\n    if (config._a[HOUR] === 24 &&\n            config._a[MINUTE] === 0 &&\n            config._a[SECOND] === 0 &&\n            config._a[MILLISECOND] === 0) {\n        config._nextDay = true;\n        config._a[HOUR] = 0;\n    }\n\n    config._d = (config._useUTC ? createUTCDate : createDate).apply(null, input);\n    // Apply timezone offset from input. The actual utcOffset can be changed\n    // with parseZone.\n    if (config._tzm != null) {\n        config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);\n    }\n\n    if (config._nextDay) {\n        config._a[HOUR] = 24;\n    }\n}\n\nfunction dayOfYearFromWeekInfo(config) {\n    var w, weekYear, week, weekday, dow, doy, temp, weekdayOverflow;\n\n    w = config._w;\n    if (w.GG != null || w.W != null || w.E != null) {\n        dow = 1;\n        doy = 4;\n\n        // TODO: We need to take the current isoWeekYear, but that depends on\n        // how we interpret now (local, utc, fixed offset). So create\n        // a now version of current config (take local/utc/offset flags, and\n        // create now).\n        weekYear = defaults(w.GG, config._a[YEAR], weekOfYear(createLocal(), 1, 4).year);\n        week = defaults(w.W, 1);\n        weekday = defaults(w.E, 1);\n        if (weekday < 1 || weekday > 7) {\n            weekdayOverflow = true;\n        }\n    } else {\n        dow = config._locale._week.dow;\n        doy = config._locale._week.doy;\n\n        var curWeek = weekOfYear(createLocal(), dow, doy);\n\n        weekYear = defaults(w.gg, config._a[YEAR], curWeek.year);\n\n        // Default to current week.\n        week = defaults(w.w, curWeek.week);\n\n        if (w.d != null) {\n            // weekday -- low day numbers are considered next week\n            weekday = w.d;\n            if (weekday < 0 || weekday > 6) {\n                weekdayOverflow = true;\n            }\n        } else if (w.e != null) {\n            // local weekday -- counting starts from begining of week\n            weekday = w.e + dow;\n            if (w.e < 0 || w.e > 6) {\n                weekdayOverflow = true;\n            }\n        } else {\n            // default to begining of week\n            weekday = dow;\n        }\n    }\n    if (week < 1 || week > weeksInYear(weekYear, dow, doy)) {\n        getParsingFlags(config)._overflowWeeks = true;\n    } else if (weekdayOverflow != null) {\n        getParsingFlags(config)._overflowWeekday = true;\n    } else {\n        temp = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy);\n        config._a[YEAR] = temp.year;\n        config._dayOfYear = temp.dayOfYear;\n    }\n}\n\n// constant that refers to the ISO standard\nhooks.ISO_8601 = function () {};\n\n// date from string and format string\nfunction configFromStringAndFormat(config) {\n    // TODO: Move this to another part of the creation flow to prevent circular deps\n    if (config._f === hooks.ISO_8601) {\n        configFromISO(config);\n        return;\n    }\n\n    config._a = [];\n    getParsingFlags(config).empty = true;\n\n    // This array is used to make a Date, either with `new Date` or `Date.UTC`\n    var string = '' + config._i,\n        i, parsedInput, tokens, token, skipped,\n        stringLength = string.length,\n        totalParsedInputLength = 0;\n\n    tokens = expandFormat(config._f, config._locale).match(formattingTokens) || [];\n\n    for (i = 0; i < tokens.length; i++) {\n        token = tokens[i];\n        parsedInput = (string.match(getParseRegexForToken(token, config)) || [])[0];\n        // console.log('token', token, 'parsedInput', parsedInput,\n        //         'regex', getParseRegexForToken(token, config));\n        if (parsedInput) {\n            skipped = string.substr(0, string.indexOf(parsedInput));\n            if (skipped.length > 0) {\n                getParsingFlags(config).unusedInput.push(skipped);\n            }\n            string = string.slice(string.indexOf(parsedInput) + parsedInput.length);\n            totalParsedInputLength += parsedInput.length;\n        }\n        // don't parse if it's not a known token\n        if (formatTokenFunctions[token]) {\n            if (parsedInput) {\n                getParsingFlags(config).empty = false;\n            }\n            else {\n                getParsingFlags(config).unusedTokens.push(token);\n            }\n            addTimeToArrayFromToken(token, parsedInput, config);\n        }\n        else if (config._strict && !parsedInput) {\n            getParsingFlags(config).unusedTokens.push(token);\n        }\n    }\n\n    // add remaining unparsed input length to the string\n    getParsingFlags(config).charsLeftOver = stringLength - totalParsedInputLength;\n    if (string.length > 0) {\n        getParsingFlags(config).unusedInput.push(string);\n    }\n\n    // clear _12h flag if hour is <= 12\n    if (config._a[HOUR] <= 12 &&\n        getParsingFlags(config).bigHour === true &&\n        config._a[HOUR] > 0) {\n        getParsingFlags(config).bigHour = undefined;\n    }\n\n    getParsingFlags(config).parsedDateParts = config._a.slice(0);\n    getParsingFlags(config).meridiem = config._meridiem;\n    // handle meridiem\n    config._a[HOUR] = meridiemFixWrap(config._locale, config._a[HOUR], config._meridiem);\n\n    configFromArray(config);\n    checkOverflow(config);\n}\n\n\nfunction meridiemFixWrap (locale, hour, meridiem) {\n    var isPm;\n\n    if (meridiem == null) {\n        // nothing to do\n        return hour;\n    }\n    if (locale.meridiemHour != null) {\n        return locale.meridiemHour(hour, meridiem);\n    } else if (locale.isPM != null) {\n        // Fallback\n        isPm = locale.isPM(meridiem);\n        if (isPm && hour < 12) {\n            hour += 12;\n        }\n        if (!isPm && hour === 12) {\n            hour = 0;\n        }\n        return hour;\n    } else {\n        // this is not supposed to happen\n        return hour;\n    }\n}\n\n// date from string and array of format strings\nfunction configFromStringAndArray(config) {\n    var tempConfig,\n        bestMoment,\n\n        scoreToBeat,\n        i,\n        currentScore;\n\n    if (config._f.length === 0) {\n        getParsingFlags(config).invalidFormat = true;\n        config._d = new Date(NaN);\n        return;\n    }\n\n    for (i = 0; i < config._f.length; i++) {\n        currentScore = 0;\n        tempConfig = copyConfig({}, config);\n        if (config._useUTC != null) {\n            tempConfig._useUTC = config._useUTC;\n        }\n        tempConfig._f = config._f[i];\n        configFromStringAndFormat(tempConfig);\n\n        if (!isValid(tempConfig)) {\n            continue;\n        }\n\n        // if there is any input that was not parsed add a penalty for that format\n        currentScore += getParsingFlags(tempConfig).charsLeftOver;\n\n        //or tokens\n        currentScore += getParsingFlags(tempConfig).unusedTokens.length * 10;\n\n        getParsingFlags(tempConfig).score = currentScore;\n\n        if (scoreToBeat == null || currentScore < scoreToBeat) {\n            scoreToBeat = currentScore;\n            bestMoment = tempConfig;\n        }\n    }\n\n    extend(config, bestMoment || tempConfig);\n}\n\nfunction configFromObject(config) {\n    if (config._d) {\n        return;\n    }\n\n    var i = normalizeObjectUnits(config._i);\n    config._a = map([i.year, i.month, i.day || i.date, i.hour, i.minute, i.second, i.millisecond], function (obj) {\n        return obj && parseInt(obj, 10);\n    });\n\n    configFromArray(config);\n}\n\nfunction createFromConfig (config) {\n    var res = new Moment(checkOverflow(prepareConfig(config)));\n    if (res._nextDay) {\n        // Adding is smart enough around DST\n        res.add(1, 'd');\n        res._nextDay = undefined;\n    }\n\n    return res;\n}\n\nfunction prepareConfig (config) {\n    var input = config._i,\n        format = config._f;\n\n    config._locale = config._locale || getLocale(config._l);\n\n    if (input === null || (format === undefined && input === '')) {\n        return createInvalid({nullInput: true});\n    }\n\n    if (typeof input === 'string') {\n        config._i = input = config._locale.preparse(input);\n    }\n\n    if (isMoment(input)) {\n        return new Moment(checkOverflow(input));\n    } else if (isDate(input)) {\n        config._d = input;\n    } else if (isArray(format)) {\n        configFromStringAndArray(config);\n    } else if (format) {\n        configFromStringAndFormat(config);\n    }  else {\n        configFromInput(config);\n    }\n\n    if (!isValid(config)) {\n        config._d = null;\n    }\n\n    return config;\n}\n\nfunction configFromInput(config) {\n    var input = config._i;\n    if (input === undefined) {\n        config._d = new Date(hooks.now());\n    } else if (isDate(input)) {\n        config._d = new Date(input.valueOf());\n    } else if (typeof input === 'string') {\n        configFromString(config);\n    } else if (isArray(input)) {\n        config._a = map(input.slice(0), function (obj) {\n            return parseInt(obj, 10);\n        });\n        configFromArray(config);\n    } else if (typeof(input) === 'object') {\n        configFromObject(config);\n    } else if (isNumber(input)) {\n        // from milliseconds\n        config._d = new Date(input);\n    } else {\n        hooks.createFromInputFallback(config);\n    }\n}\n\nfunction createLocalOrUTC (input, format, locale, strict, isUTC) {\n    var c = {};\n\n    if (locale === true || locale === false) {\n        strict = locale;\n        locale = undefined;\n    }\n\n    if ((isObject(input) && isObjectEmpty(input)) ||\n            (isArray(input) && input.length === 0)) {\n        input = undefined;\n    }\n    // object construction must be done this way.\n    // https://github.com/moment/moment/issues/1423\n    c._isAMomentObject = true;\n    c._useUTC = c._isUTC = isUTC;\n    c._l = locale;\n    c._i = input;\n    c._f = format;\n    c._strict = strict;\n\n    return createFromConfig(c);\n}\n\nfunction createLocal (input, format, locale, strict) {\n    return createLocalOrUTC(input, format, locale, strict, false);\n}\n\nvar prototypeMin = deprecate(\n    'moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/',\n    function () {\n        var other = createLocal.apply(null, arguments);\n        if (this.isValid() && other.isValid()) {\n            return other < this ? this : other;\n        } else {\n            return createInvalid();\n        }\n    }\n);\n\nvar prototypeMax = deprecate(\n    'moment().max is deprecated, use moment.min instead. http://momentjs.com/guides/#/warnings/min-max/',\n    function () {\n        var other = createLocal.apply(null, arguments);\n        if (this.isValid() && other.isValid()) {\n            return other > this ? this : other;\n        } else {\n            return createInvalid();\n        }\n    }\n);\n\n// Pick a moment m from moments so that m[fn](other) is true for all\n// other. This relies on the function fn to be transitive.\n//\n// moments should either be an array of moment objects or an array, whose\n// first element is an array of moment objects.\nfunction pickBy(fn, moments) {\n    var res, i;\n    if (moments.length === 1 && isArray(moments[0])) {\n        moments = moments[0];\n    }\n    if (!moments.length) {\n        return createLocal();\n    }\n    res = moments[0];\n    for (i = 1; i < moments.length; ++i) {\n        if (!moments[i].isValid() || moments[i][fn](res)) {\n            res = moments[i];\n        }\n    }\n    return res;\n}\n\n// TODO: Use [].sort instead?\nfunction min () {\n    var args = [].slice.call(arguments, 0);\n\n    return pickBy('isBefore', args);\n}\n\nfunction max () {\n    var args = [].slice.call(arguments, 0);\n\n    return pickBy('isAfter', args);\n}\n\nvar now = function () {\n    return Date.now ? Date.now() : +(new Date());\n};\n\nfunction Duration (duration) {\n    var normalizedInput = normalizeObjectUnits(duration),\n        years = normalizedInput.year || 0,\n        quarters = normalizedInput.quarter || 0,\n        months = normalizedInput.month || 0,\n        weeks = normalizedInput.week || 0,\n        days = normalizedInput.day || 0,\n        hours = normalizedInput.hour || 0,\n        minutes = normalizedInput.minute || 0,\n        seconds = normalizedInput.second || 0,\n        milliseconds = normalizedInput.millisecond || 0;\n\n    // representation for dateAddRemove\n    this._milliseconds = +milliseconds +\n        seconds * 1e3 + // 1000\n        minutes * 6e4 + // 1000 * 60\n        hours * 1000 * 60 * 60; //using 1000 * 60 * 60 instead of 36e5 to avoid floating point rounding errors https://github.com/moment/moment/issues/2978\n    // Because of dateAddRemove treats 24 hours as different from a\n    // day when working around DST, we need to store them separately\n    this._days = +days +\n        weeks * 7;\n    // It is impossible translate months into days without knowing\n    // which months you are are talking about, so we have to store\n    // it separately.\n    this._months = +months +\n        quarters * 3 +\n        years * 12;\n\n    this._data = {};\n\n    this._locale = getLocale();\n\n    this._bubble();\n}\n\nfunction isDuration (obj) {\n    return obj instanceof Duration;\n}\n\nfunction absRound (number) {\n    if (number < 0) {\n        return Math.round(-1 * number) * -1;\n    } else {\n        return Math.round(number);\n    }\n}\n\n// FORMATTING\n\nfunction offset (token, separator) {\n    addFormatToken(token, 0, 0, function () {\n        var offset = this.utcOffset();\n        var sign = '+';\n        if (offset < 0) {\n            offset = -offset;\n            sign = '-';\n        }\n        return sign + zeroFill(~~(offset / 60), 2) + separator + zeroFill(~~(offset) % 60, 2);\n    });\n}\n\noffset('Z', ':');\noffset('ZZ', '');\n\n// PARSING\n\naddRegexToken('Z',  matchShortOffset);\naddRegexToken('ZZ', matchShortOffset);\naddParseToken(['Z', 'ZZ'], function (input, array, config) {\n    config._useUTC = true;\n    config._tzm = offsetFromString(matchShortOffset, input);\n});\n\n// HELPERS\n\n// timezone chunker\n// '+10:00' > ['10',  '00']\n// '-1530'  > ['-15', '30']\nvar chunkOffset = /([\\+\\-]|\\d\\d)/gi;\n\nfunction offsetFromString(matcher, string) {\n    var matches = (string || '').match(matcher);\n\n    if (matches === null) {\n        return null;\n    }\n\n    var chunk   = matches[matches.length - 1] || [];\n    var parts   = (chunk + '').match(chunkOffset) || ['-', 0, 0];\n    var minutes = +(parts[1] * 60) + toInt(parts[2]);\n\n    return minutes === 0 ?\n      0 :\n      parts[0] === '+' ? minutes : -minutes;\n}\n\n// Return a moment from input, that is local/utc/zone equivalent to model.\nfunction cloneWithOffset(input, model) {\n    var res, diff;\n    if (model._isUTC) {\n        res = model.clone();\n        diff = (isMoment(input) || isDate(input) ? input.valueOf() : createLocal(input).valueOf()) - res.valueOf();\n        // Use low-level api, because this fn is low-level api.\n        res._d.setTime(res._d.valueOf() + diff);\n        hooks.updateOffset(res, false);\n        return res;\n    } else {\n        return createLocal(input).local();\n    }\n}\n\nfunction getDateOffset (m) {\n    // On Firefox.24 Date#getTimezoneOffset returns a floating point.\n    // https://github.com/moment/moment/pull/1871\n    return -Math.round(m._d.getTimezoneOffset() / 15) * 15;\n}\n\n// HOOKS\n\n// This function will be called whenever a moment is mutated.\n// It is intended to keep the offset in sync with the timezone.\nhooks.updateOffset = function () {};\n\n// MOMENTS\n\n// keepLocalTime = true means only change the timezone, without\n// affecting the local hour. So 5:31:26 +0300 --[utcOffset(2, true)]-->\n// 5:31:26 +0200 It is possible that 5:31:26 doesn't exist with offset\n// +0200, so we adjust the time as needed, to be valid.\n//\n// Keeping the time actually adds/subtracts (one hour)\n// from the actual represented time. That is why we call updateOffset\n// a second time. In case it wants us to change the offset again\n// _changeInProgress == true case, then we have to adjust, because\n// there is no such time in the given timezone.\nfunction getSetOffset (input, keepLocalTime) {\n    var offset = this._offset || 0,\n        localAdjust;\n    if (!this.isValid()) {\n        return input != null ? this : NaN;\n    }\n    if (input != null) {\n        if (typeof input === 'string') {\n            input = offsetFromString(matchShortOffset, input);\n            if (input === null) {\n                return this;\n            }\n        } else if (Math.abs(input) < 16) {\n            input = input * 60;\n        }\n        if (!this._isUTC && keepLocalTime) {\n            localAdjust = getDateOffset(this);\n        }\n        this._offset = input;\n        this._isUTC = true;\n        if (localAdjust != null) {\n            this.add(localAdjust, 'm');\n        }\n        if (offset !== input) {\n            if (!keepLocalTime || this._changeInProgress) {\n                addSubtract(this, createDuration(input - offset, 'm'), 1, false);\n            } else if (!this._changeInProgress) {\n                this._changeInProgress = true;\n                hooks.updateOffset(this, true);\n                this._changeInProgress = null;\n            }\n        }\n        return this;\n    } else {\n        return this._isUTC ? offset : getDateOffset(this);\n    }\n}\n\nfunction getSetZone (input, keepLocalTime) {\n    if (input != null) {\n        if (typeof input !== 'string') {\n            input = -input;\n        }\n\n        this.utcOffset(input, keepLocalTime);\n\n        return this;\n    } else {\n        return -this.utcOffset();\n    }\n}\n\nfunction setOffsetToUTC (keepLocalTime) {\n    return this.utcOffset(0, keepLocalTime);\n}\n\nfunction setOffsetToLocal (keepLocalTime) {\n    if (this._isUTC) {\n        this.utcOffset(0, keepLocalTime);\n        this._isUTC = false;\n\n        if (keepLocalTime) {\n            this.subtract(getDateOffset(this), 'm');\n        }\n    }\n    return this;\n}\n\nfunction setOffsetToParsedOffset () {\n    if (this._tzm != null) {\n        this.utcOffset(this._tzm);\n    } else if (typeof this._i === 'string') {\n        var tZone = offsetFromString(matchOffset, this._i);\n        if (tZone != null) {\n            this.utcOffset(tZone);\n        }\n        else {\n            this.utcOffset(0, true);\n        }\n    }\n    return this;\n}\n\nfunction hasAlignedHourOffset (input) {\n    if (!this.isValid()) {\n        return false;\n    }\n    input = input ? createLocal(input).utcOffset() : 0;\n\n    return (this.utcOffset() - input) % 60 === 0;\n}\n\nfunction isDaylightSavingTime () {\n    return (\n        this.utcOffset() > this.clone().month(0).utcOffset() ||\n        this.utcOffset() > this.clone().month(5).utcOffset()\n    );\n}\n\nfunction isDaylightSavingTimeShifted () {\n    if (!isUndefined(this._isDSTShifted)) {\n        return this._isDSTShifted;\n    }\n\n    var c = {};\n\n    copyConfig(c, this);\n    c = prepareConfig(c);\n\n    if (c._a) {\n        var other = c._isUTC ? createUTC(c._a) : createLocal(c._a);\n        this._isDSTShifted = this.isValid() &&\n            compareArrays(c._a, other.toArray()) > 0;\n    } else {\n        this._isDSTShifted = false;\n    }\n\n    return this._isDSTShifted;\n}\n\nfunction isLocal () {\n    return this.isValid() ? !this._isUTC : false;\n}\n\nfunction isUtcOffset () {\n    return this.isValid() ? this._isUTC : false;\n}\n\nfunction isUtc () {\n    return this.isValid() ? this._isUTC && this._offset === 0 : false;\n}\n\n// ASP.NET json date format regex\nvar aspNetRegex = /^(\\-)?(?:(\\d*)[. ])?(\\d+)\\:(\\d+)(?:\\:(\\d+)(\\.\\d*)?)?$/;\n\n// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html\n// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere\n// and further modified to allow for strings containing both week and day\nvar isoRegex = /^(-)?P(?:(-?[0-9,.]*)Y)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)W)?(?:(-?[0-9,.]*)D)?(?:T(?:(-?[0-9,.]*)H)?(?:(-?[0-9,.]*)M)?(?:(-?[0-9,.]*)S)?)?$/;\n\nfunction createDuration (input, key) {\n    var duration = input,\n        // matching against regexp is expensive, do it on demand\n        match = null,\n        sign,\n        ret,\n        diffRes;\n\n    if (isDuration(input)) {\n        duration = {\n            ms : input._milliseconds,\n            d  : input._days,\n            M  : input._months\n        };\n    } else if (isNumber(input)) {\n        duration = {};\n        if (key) {\n            duration[key] = input;\n        } else {\n            duration.milliseconds = input;\n        }\n    } else if (!!(match = aspNetRegex.exec(input))) {\n        sign = (match[1] === '-') ? -1 : 1;\n        duration = {\n            y  : 0,\n            d  : toInt(match[DATE])                         * sign,\n            h  : toInt(match[HOUR])                         * sign,\n            m  : toInt(match[MINUTE])                       * sign,\n            s  : toInt(match[SECOND])                       * sign,\n            ms : toInt(absRound(match[MILLISECOND] * 1000)) * sign // the millisecond decimal point is included in the match\n        };\n    } else if (!!(match = isoRegex.exec(input))) {\n        sign = (match[1] === '-') ? -1 : 1;\n        duration = {\n            y : parseIso(match[2], sign),\n            M : parseIso(match[3], sign),\n            w : parseIso(match[4], sign),\n            d : parseIso(match[5], sign),\n            h : parseIso(match[6], sign),\n            m : parseIso(match[7], sign),\n            s : parseIso(match[8], sign)\n        };\n    } else if (duration == null) {// checks for null or undefined\n        duration = {};\n    } else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {\n        diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));\n\n        duration = {};\n        duration.ms = diffRes.milliseconds;\n        duration.M = diffRes.months;\n    }\n\n    ret = new Duration(duration);\n\n    if (isDuration(input) && hasOwnProp(input, '_locale')) {\n        ret._locale = input._locale;\n    }\n\n    return ret;\n}\n\ncreateDuration.fn = Duration.prototype;\n\nfunction parseIso (inp, sign) {\n    // We'd normally use ~~inp for this, but unfortunately it also\n    // converts floats to ints.\n    // inp may be undefined, so careful calling replace on it.\n    var res = inp && parseFloat(inp.replace(',', '.'));\n    // apply sign while we're at it\n    return (isNaN(res) ? 0 : res) * sign;\n}\n\nfunction positiveMomentsDifference(base, other) {\n    var res = {milliseconds: 0, months: 0};\n\n    res.months = other.month() - base.month() +\n        (other.year() - base.year()) * 12;\n    if (base.clone().add(res.months, 'M').isAfter(other)) {\n        --res.months;\n    }\n\n    res.milliseconds = +other - +(base.clone().add(res.months, 'M'));\n\n    return res;\n}\n\nfunction momentsDifference(base, other) {\n    var res;\n    if (!(base.isValid() && other.isValid())) {\n        return {milliseconds: 0, months: 0};\n    }\n\n    other = cloneWithOffset(other, base);\n    if (base.isBefore(other)) {\n        res = positiveMomentsDifference(base, other);\n    } else {\n        res = positiveMomentsDifference(other, base);\n        res.milliseconds = -res.milliseconds;\n        res.months = -res.months;\n    }\n\n    return res;\n}\n\n// TODO: remove 'name' arg after deprecation is removed\nfunction createAdder(direction, name) {\n    return function (val, period) {\n        var dur, tmp;\n        //invert the arguments, but complain about it\n        if (period !== null && !isNaN(+period)) {\n            deprecateSimple(name, 'moment().' + name  + '(period, number) is deprecated. Please use moment().' + name + '(number, period). ' +\n            'See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info.');\n            tmp = val; val = period; period = tmp;\n        }\n\n        val = typeof val === 'string' ? +val : val;\n        dur = createDuration(val, period);\n        addSubtract(this, dur, direction);\n        return this;\n    };\n}\n\nfunction addSubtract (mom, duration, isAdding, updateOffset) {\n    var milliseconds = duration._milliseconds,\n        days = absRound(duration._days),\n        months = absRound(duration._months);\n\n    if (!mom.isValid()) {\n        // No op\n        return;\n    }\n\n    updateOffset = updateOffset == null ? true : updateOffset;\n\n    if (milliseconds) {\n        mom._d.setTime(mom._d.valueOf() + milliseconds * isAdding);\n    }\n    if (days) {\n        set$1(mom, 'Date', get(mom, 'Date') + days * isAdding);\n    }\n    if (months) {\n        setMonth(mom, get(mom, 'Month') + months * isAdding);\n    }\n    if (updateOffset) {\n        hooks.updateOffset(mom, days || months);\n    }\n}\n\nvar add      = createAdder(1, 'add');\nvar subtract = createAdder(-1, 'subtract');\n\nfunction getCalendarFormat(myMoment, now) {\n    var diff = myMoment.diff(now, 'days', true);\n    return diff < -6 ? 'sameElse' :\n            diff < -1 ? 'lastWeek' :\n            diff < 0 ? 'lastDay' :\n            diff < 1 ? 'sameDay' :\n            diff < 2 ? 'nextDay' :\n            diff < 7 ? 'nextWeek' : 'sameElse';\n}\n\nfunction calendar$1 (time, formats) {\n    // We want to compare the start of today, vs this.\n    // Getting start-of-today depends on whether we're local/utc/offset or not.\n    var now = time || createLocal(),\n        sod = cloneWithOffset(now, this).startOf('day'),\n        format = hooks.calendarFormat(this, sod) || 'sameElse';\n\n    var output = formats && (isFunction(formats[format]) ? formats[format].call(this, now) : formats[format]);\n\n    return this.format(output || this.localeData().calendar(format, this, createLocal(now)));\n}\n\nfunction clone () {\n    return new Moment(this);\n}\n\nfunction isAfter (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input);\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() > localInput.valueOf();\n    } else {\n        return localInput.valueOf() < this.clone().startOf(units).valueOf();\n    }\n}\n\nfunction isBefore (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input);\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() < localInput.valueOf();\n    } else {\n        return this.clone().endOf(units).valueOf() < localInput.valueOf();\n    }\n}\n\nfunction isBetween (from, to, units, inclusivity) {\n    inclusivity = inclusivity || '()';\n    return (inclusivity[0] === '(' ? this.isAfter(from, units) : !this.isBefore(from, units)) &&\n        (inclusivity[1] === ')' ? this.isBefore(to, units) : !this.isAfter(to, units));\n}\n\nfunction isSame (input, units) {\n    var localInput = isMoment(input) ? input : createLocal(input),\n        inputMs;\n    if (!(this.isValid() && localInput.isValid())) {\n        return false;\n    }\n    units = normalizeUnits(units || 'millisecond');\n    if (units === 'millisecond') {\n        return this.valueOf() === localInput.valueOf();\n    } else {\n        inputMs = localInput.valueOf();\n        return this.clone().startOf(units).valueOf() <= inputMs && inputMs <= this.clone().endOf(units).valueOf();\n    }\n}\n\nfunction isSameOrAfter (input, units) {\n    return this.isSame(input, units) || this.isAfter(input,units);\n}\n\nfunction isSameOrBefore (input, units) {\n    return this.isSame(input, units) || this.isBefore(input,units);\n}\n\nfunction diff (input, units, asFloat) {\n    var that,\n        zoneDelta,\n        delta, output;\n\n    if (!this.isValid()) {\n        return NaN;\n    }\n\n    that = cloneWithOffset(input, this);\n\n    if (!that.isValid()) {\n        return NaN;\n    }\n\n    zoneDelta = (that.utcOffset() - this.utcOffset()) * 6e4;\n\n    units = normalizeUnits(units);\n\n    if (units === 'year' || units === 'month' || units === 'quarter') {\n        output = monthDiff(this, that);\n        if (units === 'quarter') {\n            output = output / 3;\n        } else if (units === 'year') {\n            output = output / 12;\n        }\n    } else {\n        delta = this - that;\n        output = units === 'second' ? delta / 1e3 : // 1000\n            units === 'minute' ? delta / 6e4 : // 1000 * 60\n            units === 'hour' ? delta / 36e5 : // 1000 * 60 * 60\n            units === 'day' ? (delta - zoneDelta) / 864e5 : // 1000 * 60 * 60 * 24, negate dst\n            units === 'week' ? (delta - zoneDelta) / 6048e5 : // 1000 * 60 * 60 * 24 * 7, negate dst\n            delta;\n    }\n    return asFloat ? output : absFloor(output);\n}\n\nfunction monthDiff (a, b) {\n    // difference in months\n    var wholeMonthDiff = ((b.year() - a.year()) * 12) + (b.month() - a.month()),\n        // b is in (anchor - 1 month, anchor + 1 month)\n        anchor = a.clone().add(wholeMonthDiff, 'months'),\n        anchor2, adjust;\n\n    if (b - anchor < 0) {\n        anchor2 = a.clone().add(wholeMonthDiff - 1, 'months');\n        // linear across the month\n        adjust = (b - anchor) / (anchor - anchor2);\n    } else {\n        anchor2 = a.clone().add(wholeMonthDiff + 1, 'months');\n        // linear across the month\n        adjust = (b - anchor) / (anchor2 - anchor);\n    }\n\n    //check for negative zero, return zero if negative zero\n    return -(wholeMonthDiff + adjust) || 0;\n}\n\nhooks.defaultFormat = 'YYYY-MM-DDTHH:mm:ssZ';\nhooks.defaultFormatUtc = 'YYYY-MM-DDTHH:mm:ss[Z]';\n\nfunction toString () {\n    return this.clone().locale('en').format('ddd MMM DD YYYY HH:mm:ss [GMT]ZZ');\n}\n\nfunction toISOString () {\n    var m = this.clone().utc();\n    if (0 < m.year() && m.year() <= 9999) {\n        if (isFunction(Date.prototype.toISOString)) {\n            // native implementation is ~50x faster, use it when we can\n            return this.toDate().toISOString();\n        } else {\n            return formatMoment(m, 'YYYY-MM-DD[T]HH:mm:ss.SSS[Z]');\n        }\n    } else {\n        return formatMoment(m, 'YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]');\n    }\n}\n\n/**\n * Return a human readable representation of a moment that can\n * also be evaluated to get a new moment which is the same\n *\n * @link https://nodejs.org/dist/latest/docs/api/util.html#util_custom_inspect_function_on_objects\n */\nfunction inspect () {\n    if (!this.isValid()) {\n        return 'moment.invalid(/* ' + this._i + ' */)';\n    }\n    var func = 'moment';\n    var zone = '';\n    if (!this.isLocal()) {\n        func = this.utcOffset() === 0 ? 'moment.utc' : 'moment.parseZone';\n        zone = 'Z';\n    }\n    var prefix = '[' + func + '(\"]';\n    var year = (0 < this.year() && this.year() <= 9999) ? 'YYYY' : 'YYYYYY';\n    var datetime = '-MM-DD[T]HH:mm:ss.SSS';\n    var suffix = zone + '[\")]';\n\n    return this.format(prefix + year + datetime + suffix);\n}\n\nfunction format (inputString) {\n    if (!inputString) {\n        inputString = this.isUtc() ? hooks.defaultFormatUtc : hooks.defaultFormat;\n    }\n    var output = formatMoment(this, inputString);\n    return this.localeData().postformat(output);\n}\n\nfunction from (time, withoutSuffix) {\n    if (this.isValid() &&\n            ((isMoment(time) && time.isValid()) ||\n             createLocal(time).isValid())) {\n        return createDuration({to: this, from: time}).locale(this.locale()).humanize(!withoutSuffix);\n    } else {\n        return this.localeData().invalidDate();\n    }\n}\n\nfunction fromNow (withoutSuffix) {\n    return this.from(createLocal(), withoutSuffix);\n}\n\nfunction to (time, withoutSuffix) {\n    if (this.isValid() &&\n            ((isMoment(time) && time.isValid()) ||\n             createLocal(time).isValid())) {\n        return createDuration({from: this, to: time}).locale(this.locale()).humanize(!withoutSuffix);\n    } else {\n        return this.localeData().invalidDate();\n    }\n}\n\nfunction toNow (withoutSuffix) {\n    return this.to(createLocal(), withoutSuffix);\n}\n\n// If passed a locale key, it will set the locale for this\n// instance.  Otherwise, it will return the locale configuration\n// variables for this instance.\nfunction locale (key) {\n    var newLocaleData;\n\n    if (key === undefined) {\n        return this._locale._abbr;\n    } else {\n        newLocaleData = getLocale(key);\n        if (newLocaleData != null) {\n            this._locale = newLocaleData;\n        }\n        return this;\n    }\n}\n\nvar lang = deprecate(\n    'moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.',\n    function (key) {\n        if (key === undefined) {\n            return this.localeData();\n        } else {\n            return this.locale(key);\n        }\n    }\n);\n\nfunction localeData () {\n    return this._locale;\n}\n\nfunction startOf (units) {\n    units = normalizeUnits(units);\n    // the following switch intentionally omits break keywords\n    // to utilize falling through the cases.\n    switch (units) {\n        case 'year':\n            this.month(0);\n            /* falls through */\n        case 'quarter':\n        case 'month':\n            this.date(1);\n            /* falls through */\n        case 'week':\n        case 'isoWeek':\n        case 'day':\n        case 'date':\n            this.hours(0);\n            /* falls through */\n        case 'hour':\n            this.minutes(0);\n            /* falls through */\n        case 'minute':\n            this.seconds(0);\n            /* falls through */\n        case 'second':\n            this.milliseconds(0);\n    }\n\n    // weeks are a special case\n    if (units === 'week') {\n        this.weekday(0);\n    }\n    if (units === 'isoWeek') {\n        this.isoWeekday(1);\n    }\n\n    // quarters are also special\n    if (units === 'quarter') {\n        this.month(Math.floor(this.month() / 3) * 3);\n    }\n\n    return this;\n}\n\nfunction endOf (units) {\n    units = normalizeUnits(units);\n    if (units === undefined || units === 'millisecond') {\n        return this;\n    }\n\n    // 'date' is an alias for 'day', so it should be considered as such.\n    if (units === 'date') {\n        units = 'day';\n    }\n\n    return this.startOf(units).add(1, (units === 'isoWeek' ? 'week' : units)).subtract(1, 'ms');\n}\n\nfunction valueOf () {\n    return this._d.valueOf() - ((this._offset || 0) * 60000);\n}\n\nfunction unix () {\n    return Math.floor(this.valueOf() / 1000);\n}\n\nfunction toDate () {\n    return new Date(this.valueOf());\n}\n\nfunction toArray () {\n    var m = this;\n    return [m.year(), m.month(), m.date(), m.hour(), m.minute(), m.second(), m.millisecond()];\n}\n\nfunction toObject () {\n    var m = this;\n    return {\n        years: m.year(),\n        months: m.month(),\n        date: m.date(),\n        hours: m.hours(),\n        minutes: m.minutes(),\n        seconds: m.seconds(),\n        milliseconds: m.milliseconds()\n    };\n}\n\nfunction toJSON () {\n    // new Date(NaN).toJSON() === null\n    return this.isValid() ? this.toISOString() : null;\n}\n\nfunction isValid$1 () {\n    return isValid(this);\n}\n\nfunction parsingFlags () {\n    return extend({}, getParsingFlags(this));\n}\n\nfunction invalidAt () {\n    return getParsingFlags(this).overflow;\n}\n\nfunction creationData() {\n    return {\n        input: this._i,\n        format: this._f,\n        locale: this._locale,\n        isUTC: this._isUTC,\n        strict: this._strict\n    };\n}\n\n// FORMATTING\n\naddFormatToken(0, ['gg', 2], 0, function () {\n    return this.weekYear() % 100;\n});\n\naddFormatToken(0, ['GG', 2], 0, function () {\n    return this.isoWeekYear() % 100;\n});\n\nfunction addWeekYearFormatToken (token, getter) {\n    addFormatToken(0, [token, token.length], 0, getter);\n}\n\naddWeekYearFormatToken('gggg',     'weekYear');\naddWeekYearFormatToken('ggggg',    'weekYear');\naddWeekYearFormatToken('GGGG',  'isoWeekYear');\naddWeekYearFormatToken('GGGGG', 'isoWeekYear');\n\n// ALIASES\n\naddUnitAlias('weekYear', 'gg');\naddUnitAlias('isoWeekYear', 'GG');\n\n// PRIORITY\n\naddUnitPriority('weekYear', 1);\naddUnitPriority('isoWeekYear', 1);\n\n\n// PARSING\n\naddRegexToken('G',      matchSigned);\naddRegexToken('g',      matchSigned);\naddRegexToken('GG',     match1to2, match2);\naddRegexToken('gg',     match1to2, match2);\naddRegexToken('GGGG',   match1to4, match4);\naddRegexToken('gggg',   match1to4, match4);\naddRegexToken('GGGGG',  match1to6, match6);\naddRegexToken('ggggg',  match1to6, match6);\n\naddWeekParseToken(['gggg', 'ggggg', 'GGGG', 'GGGGG'], function (input, week, config, token) {\n    week[token.substr(0, 2)] = toInt(input);\n});\n\naddWeekParseToken(['gg', 'GG'], function (input, week, config, token) {\n    week[token] = hooks.parseTwoDigitYear(input);\n});\n\n// MOMENTS\n\nfunction getSetWeekYear (input) {\n    return getSetWeekYearHelper.call(this,\n            input,\n            this.week(),\n            this.weekday(),\n            this.localeData()._week.dow,\n            this.localeData()._week.doy);\n}\n\nfunction getSetISOWeekYear (input) {\n    return getSetWeekYearHelper.call(this,\n            input, this.isoWeek(), this.isoWeekday(), 1, 4);\n}\n\nfunction getISOWeeksInYear () {\n    return weeksInYear(this.year(), 1, 4);\n}\n\nfunction getWeeksInYear () {\n    var weekInfo = this.localeData()._week;\n    return weeksInYear(this.year(), weekInfo.dow, weekInfo.doy);\n}\n\nfunction getSetWeekYearHelper(input, week, weekday, dow, doy) {\n    var weeksTarget;\n    if (input == null) {\n        return weekOfYear(this, dow, doy).year;\n    } else {\n        weeksTarget = weeksInYear(input, dow, doy);\n        if (week > weeksTarget) {\n            week = weeksTarget;\n        }\n        return setWeekAll.call(this, input, week, weekday, dow, doy);\n    }\n}\n\nfunction setWeekAll(weekYear, week, weekday, dow, doy) {\n    var dayOfYearData = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy),\n        date = createUTCDate(dayOfYearData.year, 0, dayOfYearData.dayOfYear);\n\n    this.year(date.getUTCFullYear());\n    this.month(date.getUTCMonth());\n    this.date(date.getUTCDate());\n    return this;\n}\n\n// FORMATTING\n\naddFormatToken('Q', 0, 'Qo', 'quarter');\n\n// ALIASES\n\naddUnitAlias('quarter', 'Q');\n\n// PRIORITY\n\naddUnitPriority('quarter', 7);\n\n// PARSING\n\naddRegexToken('Q', match1);\naddParseToken('Q', function (input, array) {\n    array[MONTH] = (toInt(input) - 1) * 3;\n});\n\n// MOMENTS\n\nfunction getSetQuarter (input) {\n    return input == null ? Math.ceil((this.month() + 1) / 3) : this.month((input - 1) * 3 + this.month() % 3);\n}\n\n// FORMATTING\n\naddFormatToken('D', ['DD', 2], 'Do', 'date');\n\n// ALIASES\n\naddUnitAlias('date', 'D');\n\n// PRIOROITY\naddUnitPriority('date', 9);\n\n// PARSING\n\naddRegexToken('D',  match1to2);\naddRegexToken('DD', match1to2, match2);\naddRegexToken('Do', function (isStrict, locale) {\n    return isStrict ? locale._ordinalParse : locale._ordinalParseLenient;\n});\n\naddParseToken(['D', 'DD'], DATE);\naddParseToken('Do', function (input, array) {\n    array[DATE] = toInt(input.match(match1to2)[0], 10);\n});\n\n// MOMENTS\n\nvar getSetDayOfMonth = makeGetSet('Date', true);\n\n// FORMATTING\n\naddFormatToken('DDD', ['DDDD', 3], 'DDDo', 'dayOfYear');\n\n// ALIASES\n\naddUnitAlias('dayOfYear', 'DDD');\n\n// PRIORITY\naddUnitPriority('dayOfYear', 4);\n\n// PARSING\n\naddRegexToken('DDD',  match1to3);\naddRegexToken('DDDD', match3);\naddParseToken(['DDD', 'DDDD'], function (input, array, config) {\n    config._dayOfYear = toInt(input);\n});\n\n// HELPERS\n\n// MOMENTS\n\nfunction getSetDayOfYear (input) {\n    var dayOfYear = Math.round((this.clone().startOf('day') - this.clone().startOf('year')) / 864e5) + 1;\n    return input == null ? dayOfYear : this.add((input - dayOfYear), 'd');\n}\n\n// FORMATTING\n\naddFormatToken('m', ['mm', 2], 0, 'minute');\n\n// ALIASES\n\naddUnitAlias('minute', 'm');\n\n// PRIORITY\n\naddUnitPriority('minute', 14);\n\n// PARSING\n\naddRegexToken('m',  match1to2);\naddRegexToken('mm', match1to2, match2);\naddParseToken(['m', 'mm'], MINUTE);\n\n// MOMENTS\n\nvar getSetMinute = makeGetSet('Minutes', false);\n\n// FORMATTING\n\naddFormatToken('s', ['ss', 2], 0, 'second');\n\n// ALIASES\n\naddUnitAlias('second', 's');\n\n// PRIORITY\n\naddUnitPriority('second', 15);\n\n// PARSING\n\naddRegexToken('s',  match1to2);\naddRegexToken('ss', match1to2, match2);\naddParseToken(['s', 'ss'], SECOND);\n\n// MOMENTS\n\nvar getSetSecond = makeGetSet('Seconds', false);\n\n// FORMATTING\n\naddFormatToken('S', 0, 0, function () {\n    return ~~(this.millisecond() / 100);\n});\n\naddFormatToken(0, ['SS', 2], 0, function () {\n    return ~~(this.millisecond() / 10);\n});\n\naddFormatToken(0, ['SSS', 3], 0, 'millisecond');\naddFormatToken(0, ['SSSS', 4], 0, function () {\n    return this.millisecond() * 10;\n});\naddFormatToken(0, ['SSSSS', 5], 0, function () {\n    return this.millisecond() * 100;\n});\naddFormatToken(0, ['SSSSSS', 6], 0, function () {\n    return this.millisecond() * 1000;\n});\naddFormatToken(0, ['SSSSSSS', 7], 0, function () {\n    return this.millisecond() * 10000;\n});\naddFormatToken(0, ['SSSSSSSS', 8], 0, function () {\n    return this.millisecond() * 100000;\n});\naddFormatToken(0, ['SSSSSSSSS', 9], 0, function () {\n    return this.millisecond() * 1000000;\n});\n\n\n// ALIASES\n\naddUnitAlias('millisecond', 'ms');\n\n// PRIORITY\n\naddUnitPriority('millisecond', 16);\n\n// PARSING\n\naddRegexToken('S',    match1to3, match1);\naddRegexToken('SS',   match1to3, match2);\naddRegexToken('SSS',  match1to3, match3);\n\nvar token;\nfor (token = 'SSSS'; token.length <= 9; token += 'S') {\n    addRegexToken(token, matchUnsigned);\n}\n\nfunction parseMs(input, array) {\n    array[MILLISECOND] = toInt(('0.' + input) * 1000);\n}\n\nfor (token = 'S'; token.length <= 9; token += 'S') {\n    addParseToken(token, parseMs);\n}\n// MOMENTS\n\nvar getSetMillisecond = makeGetSet('Milliseconds', false);\n\n// FORMATTING\n\naddFormatToken('z',  0, 0, 'zoneAbbr');\naddFormatToken('zz', 0, 0, 'zoneName');\n\n// MOMENTS\n\nfunction getZoneAbbr () {\n    return this._isUTC ? 'UTC' : '';\n}\n\nfunction getZoneName () {\n    return this._isUTC ? 'Coordinated Universal Time' : '';\n}\n\nvar proto = Moment.prototype;\n\nproto.add               = add;\nproto.calendar          = calendar$1;\nproto.clone             = clone;\nproto.diff              = diff;\nproto.endOf             = endOf;\nproto.format            = format;\nproto.from              = from;\nproto.fromNow           = fromNow;\nproto.to                = to;\nproto.toNow             = toNow;\nproto.get               = stringGet;\nproto.invalidAt         = invalidAt;\nproto.isAfter           = isAfter;\nproto.isBefore          = isBefore;\nproto.isBetween         = isBetween;\nproto.isSame            = isSame;\nproto.isSameOrAfter     = isSameOrAfter;\nproto.isSameOrBefore    = isSameOrBefore;\nproto.isValid           = isValid$1;\nproto.lang              = lang;\nproto.locale            = locale;\nproto.localeData        = localeData;\nproto.max               = prototypeMax;\nproto.min               = prototypeMin;\nproto.parsingFlags      = parsingFlags;\nproto.set               = stringSet;\nproto.startOf           = startOf;\nproto.subtract          = subtract;\nproto.toArray           = toArray;\nproto.toObject          = toObject;\nproto.toDate            = toDate;\nproto.toISOString       = toISOString;\nproto.inspect           = inspect;\nproto.toJSON            = toJSON;\nproto.toString          = toString;\nproto.unix              = unix;\nproto.valueOf           = valueOf;\nproto.creationData      = creationData;\n\n// Year\nproto.year       = getSetYear;\nproto.isLeapYear = getIsLeapYear;\n\n// Week Year\nproto.weekYear    = getSetWeekYear;\nproto.isoWeekYear = getSetISOWeekYear;\n\n// Quarter\nproto.quarter = proto.quarters = getSetQuarter;\n\n// Month\nproto.month       = getSetMonth;\nproto.daysInMonth = getDaysInMonth;\n\n// Week\nproto.week           = proto.weeks        = getSetWeek;\nproto.isoWeek        = proto.isoWeeks     = getSetISOWeek;\nproto.weeksInYear    = getWeeksInYear;\nproto.isoWeeksInYear = getISOWeeksInYear;\n\n// Day\nproto.date       = getSetDayOfMonth;\nproto.day        = proto.days             = getSetDayOfWeek;\nproto.weekday    = getSetLocaleDayOfWeek;\nproto.isoWeekday = getSetISODayOfWeek;\nproto.dayOfYear  = getSetDayOfYear;\n\n// Hour\nproto.hour = proto.hours = getSetHour;\n\n// Minute\nproto.minute = proto.minutes = getSetMinute;\n\n// Second\nproto.second = proto.seconds = getSetSecond;\n\n// Millisecond\nproto.millisecond = proto.milliseconds = getSetMillisecond;\n\n// Offset\nproto.utcOffset            = getSetOffset;\nproto.utc                  = setOffsetToUTC;\nproto.local                = setOffsetToLocal;\nproto.parseZone            = setOffsetToParsedOffset;\nproto.hasAlignedHourOffset = hasAlignedHourOffset;\nproto.isDST                = isDaylightSavingTime;\nproto.isLocal              = isLocal;\nproto.isUtcOffset          = isUtcOffset;\nproto.isUtc                = isUtc;\nproto.isUTC                = isUtc;\n\n// Timezone\nproto.zoneAbbr = getZoneAbbr;\nproto.zoneName = getZoneName;\n\n// Deprecations\nproto.dates  = deprecate('dates accessor is deprecated. Use date instead.', getSetDayOfMonth);\nproto.months = deprecate('months accessor is deprecated. Use month instead', getSetMonth);\nproto.years  = deprecate('years accessor is deprecated. Use year instead', getSetYear);\nproto.zone   = deprecate('moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/', getSetZone);\nproto.isDSTShifted = deprecate('isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information', isDaylightSavingTimeShifted);\n\nfunction createUnix (input) {\n    return createLocal(input * 1000);\n}\n\nfunction createInZone () {\n    return createLocal.apply(null, arguments).parseZone();\n}\n\nfunction preParsePostFormat (string) {\n    return string;\n}\n\nvar proto$1 = Locale.prototype;\n\nproto$1.calendar        = calendar;\nproto$1.longDateFormat  = longDateFormat;\nproto$1.invalidDate     = invalidDate;\nproto$1.ordinal         = ordinal;\nproto$1.preparse        = preParsePostFormat;\nproto$1.postformat      = preParsePostFormat;\nproto$1.relativeTime    = relativeTime;\nproto$1.pastFuture      = pastFuture;\nproto$1.set             = set;\n\n// Month\nproto$1.months            =        localeMonths;\nproto$1.monthsShort       =        localeMonthsShort;\nproto$1.monthsParse       =        localeMonthsParse;\nproto$1.monthsRegex       = monthsRegex;\nproto$1.monthsShortRegex  = monthsShortRegex;\n\n// Week\nproto$1.week = localeWeek;\nproto$1.firstDayOfYear = localeFirstDayOfYear;\nproto$1.firstDayOfWeek = localeFirstDayOfWeek;\n\n// Day of Week\nproto$1.weekdays       =        localeWeekdays;\nproto$1.weekdaysMin    =        localeWeekdaysMin;\nproto$1.weekdaysShort  =        localeWeekdaysShort;\nproto$1.weekdaysParse  =        localeWeekdaysParse;\n\nproto$1.weekdaysRegex       =        weekdaysRegex;\nproto$1.weekdaysShortRegex  =        weekdaysShortRegex;\nproto$1.weekdaysMinRegex    =        weekdaysMinRegex;\n\n// Hours\nproto$1.isPM = localeIsPM;\nproto$1.meridiem = localeMeridiem;\n\nfunction get$1 (format, index, field, setter) {\n    var locale = getLocale();\n    var utc = createUTC().set(setter, index);\n    return locale[field](utc, format);\n}\n\nfunction listMonthsImpl (format, index, field) {\n    if (isNumber(format)) {\n        index = format;\n        format = undefined;\n    }\n\n    format = format || '';\n\n    if (index != null) {\n        return get$1(format, index, field, 'month');\n    }\n\n    var i;\n    var out = [];\n    for (i = 0; i < 12; i++) {\n        out[i] = get$1(format, i, field, 'month');\n    }\n    return out;\n}\n\n// ()\n// (5)\n// (fmt, 5)\n// (fmt)\n// (true)\n// (true, 5)\n// (true, fmt, 5)\n// (true, fmt)\nfunction listWeekdaysImpl (localeSorted, format, index, field) {\n    if (typeof localeSorted === 'boolean') {\n        if (isNumber(format)) {\n            index = format;\n            format = undefined;\n        }\n\n        format = format || '';\n    } else {\n        format = localeSorted;\n        index = format;\n        localeSorted = false;\n\n        if (isNumber(format)) {\n            index = format;\n            format = undefined;\n        }\n\n        format = format || '';\n    }\n\n    var locale = getLocale(),\n        shift = localeSorted ? locale._week.dow : 0;\n\n    if (index != null) {\n        return get$1(format, (index + shift) % 7, field, 'day');\n    }\n\n    var i;\n    var out = [];\n    for (i = 0; i < 7; i++) {\n        out[i] = get$1(format, (i + shift) % 7, field, 'day');\n    }\n    return out;\n}\n\nfunction listMonths (format, index) {\n    return listMonthsImpl(format, index, 'months');\n}\n\nfunction listMonthsShort (format, index) {\n    return listMonthsImpl(format, index, 'monthsShort');\n}\n\nfunction listWeekdays (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdays');\n}\n\nfunction listWeekdaysShort (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdaysShort');\n}\n\nfunction listWeekdaysMin (localeSorted, format, index) {\n    return listWeekdaysImpl(localeSorted, format, index, 'weekdaysMin');\n}\n\ngetSetGlobalLocale('en', {\n    ordinalParse: /\\d{1,2}(th|st|nd|rd)/,\n    ordinal : function (number) {\n        var b = number % 10,\n            output = (toInt(number % 100 / 10) === 1) ? 'th' :\n            (b === 1) ? 'st' :\n            (b === 2) ? 'nd' :\n            (b === 3) ? 'rd' : 'th';\n        return number + output;\n    }\n});\n\n// Side effect imports\nhooks.lang = deprecate('moment.lang is deprecated. Use moment.locale instead.', getSetGlobalLocale);\nhooks.langData = deprecate('moment.langData is deprecated. Use moment.localeData instead.', getLocale);\n\nvar mathAbs = Math.abs;\n\nfunction abs () {\n    var data           = this._data;\n\n    this._milliseconds = mathAbs(this._milliseconds);\n    this._days         = mathAbs(this._days);\n    this._months       = mathAbs(this._months);\n\n    data.milliseconds  = mathAbs(data.milliseconds);\n    data.seconds       = mathAbs(data.seconds);\n    data.minutes       = mathAbs(data.minutes);\n    data.hours         = mathAbs(data.hours);\n    data.months        = mathAbs(data.months);\n    data.years         = mathAbs(data.years);\n\n    return this;\n}\n\nfunction addSubtract$1 (duration, input, value, direction) {\n    var other = createDuration(input, value);\n\n    duration._milliseconds += direction * other._milliseconds;\n    duration._days         += direction * other._days;\n    duration._months       += direction * other._months;\n\n    return duration._bubble();\n}\n\n// supports only 2.0-style add(1, 's') or add(duration)\nfunction add$1 (input, value) {\n    return addSubtract$1(this, input, value, 1);\n}\n\n// supports only 2.0-style subtract(1, 's') or subtract(duration)\nfunction subtract$1 (input, value) {\n    return addSubtract$1(this, input, value, -1);\n}\n\nfunction absCeil (number) {\n    if (number < 0) {\n        return Math.floor(number);\n    } else {\n        return Math.ceil(number);\n    }\n}\n\nfunction bubble () {\n    var milliseconds = this._milliseconds;\n    var days         = this._days;\n    var months       = this._months;\n    var data         = this._data;\n    var seconds, minutes, hours, years, monthsFromDays;\n\n    // if we have a mix of positive and negative values, bubble down first\n    // check: https://github.com/moment/moment/issues/2166\n    if (!((milliseconds >= 0 && days >= 0 && months >= 0) ||\n            (milliseconds <= 0 && days <= 0 && months <= 0))) {\n        milliseconds += absCeil(monthsToDays(months) + days) * 864e5;\n        days = 0;\n        months = 0;\n    }\n\n    // The following code bubbles up values, see the tests for\n    // examples of what that means.\n    data.milliseconds = milliseconds % 1000;\n\n    seconds           = absFloor(milliseconds / 1000);\n    data.seconds      = seconds % 60;\n\n    minutes           = absFloor(seconds / 60);\n    data.minutes      = minutes % 60;\n\n    hours             = absFloor(minutes / 60);\n    data.hours        = hours % 24;\n\n    days += absFloor(hours / 24);\n\n    // convert days to months\n    monthsFromDays = absFloor(daysToMonths(days));\n    months += monthsFromDays;\n    days -= absCeil(monthsToDays(monthsFromDays));\n\n    // 12 months -> 1 year\n    years = absFloor(months / 12);\n    months %= 12;\n\n    data.days   = days;\n    data.months = months;\n    data.years  = years;\n\n    return this;\n}\n\nfunction daysToMonths (days) {\n    // 400 years have 146097 days (taking into account leap year rules)\n    // 400 years have 12 months === 4800\n    return days * 4800 / 146097;\n}\n\nfunction monthsToDays (months) {\n    // the reverse of daysToMonths\n    return months * 146097 / 4800;\n}\n\nfunction as (units) {\n    var days;\n    var months;\n    var milliseconds = this._milliseconds;\n\n    units = normalizeUnits(units);\n\n    if (units === 'month' || units === 'year') {\n        days   = this._days   + milliseconds / 864e5;\n        months = this._months + daysToMonths(days);\n        return units === 'month' ? months : months / 12;\n    } else {\n        // handle milliseconds separately because of floating point math errors (issue #1867)\n        days = this._days + Math.round(monthsToDays(this._months));\n        switch (units) {\n            case 'week'   : return days / 7     + milliseconds / 6048e5;\n            case 'day'    : return days         + milliseconds / 864e5;\n            case 'hour'   : return days * 24    + milliseconds / 36e5;\n            case 'minute' : return days * 1440  + milliseconds / 6e4;\n            case 'second' : return days * 86400 + milliseconds / 1000;\n            // Math.floor prevents floating point math errors here\n            case 'millisecond': return Math.floor(days * 864e5) + milliseconds;\n            default: throw new Error('Unknown unit ' + units);\n        }\n    }\n}\n\n// TODO: Use this.as('ms')?\nfunction valueOf$1 () {\n    return (\n        this._milliseconds +\n        this._days * 864e5 +\n        (this._months % 12) * 2592e6 +\n        toInt(this._months / 12) * 31536e6\n    );\n}\n\nfunction makeAs (alias) {\n    return function () {\n        return this.as(alias);\n    };\n}\n\nvar asMilliseconds = makeAs('ms');\nvar asSeconds      = makeAs('s');\nvar asMinutes      = makeAs('m');\nvar asHours        = makeAs('h');\nvar asDays         = makeAs('d');\nvar asWeeks        = makeAs('w');\nvar asMonths       = makeAs('M');\nvar asYears        = makeAs('y');\n\nfunction get$2 (units) {\n    units = normalizeUnits(units);\n    return this[units + 's']();\n}\n\nfunction makeGetter(name) {\n    return function () {\n        return this._data[name];\n    };\n}\n\nvar milliseconds = makeGetter('milliseconds');\nvar seconds      = makeGetter('seconds');\nvar minutes      = makeGetter('minutes');\nvar hours        = makeGetter('hours');\nvar days         = makeGetter('days');\nvar months       = makeGetter('months');\nvar years        = makeGetter('years');\n\nfunction weeks () {\n    return absFloor(this.days() / 7);\n}\n\nvar round = Math.round;\nvar thresholds = {\n    s: 45,  // seconds to minute\n    m: 45,  // minutes to hour\n    h: 22,  // hours to day\n    d: 26,  // days to month\n    M: 11   // months to year\n};\n\n// helper function for moment.fn.from, moment.fn.fromNow, and moment.duration.fn.humanize\nfunction substituteTimeAgo(string, number, withoutSuffix, isFuture, locale) {\n    return locale.relativeTime(number || 1, !!withoutSuffix, string, isFuture);\n}\n\nfunction relativeTime$1 (posNegDuration, withoutSuffix, locale) {\n    var duration = createDuration(posNegDuration).abs();\n    var seconds  = round(duration.as('s'));\n    var minutes  = round(duration.as('m'));\n    var hours    = round(duration.as('h'));\n    var days     = round(duration.as('d'));\n    var months   = round(duration.as('M'));\n    var years    = round(duration.as('y'));\n\n    var a = seconds < thresholds.s && ['s', seconds]  ||\n            minutes <= 1           && ['m']           ||\n            minutes < thresholds.m && ['mm', minutes] ||\n            hours   <= 1           && ['h']           ||\n            hours   < thresholds.h && ['hh', hours]   ||\n            days    <= 1           && ['d']           ||\n            days    < thresholds.d && ['dd', days]    ||\n            months  <= 1           && ['M']           ||\n            months  < thresholds.M && ['MM', months]  ||\n            years   <= 1           && ['y']           || ['yy', years];\n\n    a[2] = withoutSuffix;\n    a[3] = +posNegDuration > 0;\n    a[4] = locale;\n    return substituteTimeAgo.apply(null, a);\n}\n\n// This function allows you to set the rounding function for relative time strings\nfunction getSetRelativeTimeRounding (roundingFunction) {\n    if (roundingFunction === undefined) {\n        return round;\n    }\n    if (typeof(roundingFunction) === 'function') {\n        round = roundingFunction;\n        return true;\n    }\n    return false;\n}\n\n// This function allows you to set a threshold for relative time strings\nfunction getSetRelativeTimeThreshold (threshold, limit) {\n    if (thresholds[threshold] === undefined) {\n        return false;\n    }\n    if (limit === undefined) {\n        return thresholds[threshold];\n    }\n    thresholds[threshold] = limit;\n    return true;\n}\n\nfunction humanize (withSuffix) {\n    var locale = this.localeData();\n    var output = relativeTime$1(this, !withSuffix, locale);\n\n    if (withSuffix) {\n        output = locale.pastFuture(+this, output);\n    }\n\n    return locale.postformat(output);\n}\n\nvar abs$1 = Math.abs;\n\nfunction toISOString$1() {\n    // for ISO strings we do not use the normal bubbling rules:\n    //  * milliseconds bubble up until they become hours\n    //  * days do not bubble at all\n    //  * months bubble up until they become years\n    // This is because there is no context-free conversion between hours and days\n    // (think of clock changes)\n    // and also not between days and months (28-31 days per month)\n    var seconds = abs$1(this._milliseconds) / 1000;\n    var days         = abs$1(this._days);\n    var months       = abs$1(this._months);\n    var minutes, hours, years;\n\n    // 3600 seconds -> 60 minutes -> 1 hour\n    minutes           = absFloor(seconds / 60);\n    hours             = absFloor(minutes / 60);\n    seconds %= 60;\n    minutes %= 60;\n\n    // 12 months -> 1 year\n    years  = absFloor(months / 12);\n    months %= 12;\n\n\n    // inspired by https://github.com/dordille/moment-isoduration/blob/master/moment.isoduration.js\n    var Y = years;\n    var M = months;\n    var D = days;\n    var h = hours;\n    var m = minutes;\n    var s = seconds;\n    var total = this.asSeconds();\n\n    if (!total) {\n        // this is the same as C#'s (Noda) and python (isodate)...\n        // but not other JS (goog.date)\n        return 'P0D';\n    }\n\n    return (total < 0 ? '-' : '') +\n        'P' +\n        (Y ? Y + 'Y' : '') +\n        (M ? M + 'M' : '') +\n        (D ? D + 'D' : '') +\n        ((h || m || s) ? 'T' : '') +\n        (h ? h + 'H' : '') +\n        (m ? m + 'M' : '') +\n        (s ? s + 'S' : '');\n}\n\nvar proto$2 = Duration.prototype;\n\nproto$2.abs            = abs;\nproto$2.add            = add$1;\nproto$2.subtract       = subtract$1;\nproto$2.as             = as;\nproto$2.asMilliseconds = asMilliseconds;\nproto$2.asSeconds      = asSeconds;\nproto$2.asMinutes      = asMinutes;\nproto$2.asHours        = asHours;\nproto$2.asDays         = asDays;\nproto$2.asWeeks        = asWeeks;\nproto$2.asMonths       = asMonths;\nproto$2.asYears        = asYears;\nproto$2.valueOf        = valueOf$1;\nproto$2._bubble        = bubble;\nproto$2.get            = get$2;\nproto$2.milliseconds   = milliseconds;\nproto$2.seconds        = seconds;\nproto$2.minutes        = minutes;\nproto$2.hours          = hours;\nproto$2.days           = days;\nproto$2.weeks          = weeks;\nproto$2.months         = months;\nproto$2.years          = years;\nproto$2.humanize       = humanize;\nproto$2.toISOString    = toISOString$1;\nproto$2.toString       = toISOString$1;\nproto$2.toJSON         = toISOString$1;\nproto$2.locale         = locale;\nproto$2.localeData     = localeData;\n\n// Deprecations\nproto$2.toIsoString = deprecate('toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)', toISOString$1);\nproto$2.lang = lang;\n\n// Side effect imports\n\n// FORMATTING\n\naddFormatToken('X', 0, 0, 'unix');\naddFormatToken('x', 0, 0, 'valueOf');\n\n// PARSING\n\naddRegexToken('x', matchSigned);\naddRegexToken('X', matchTimestamp);\naddParseToken('X', function (input, array, config) {\n    config._d = new Date(parseFloat(input, 10) * 1000);\n});\naddParseToken('x', function (input, array, config) {\n    config._d = new Date(toInt(input));\n});\n\n// Side effect imports\n\n\nhooks.version = '2.17.1';\n\nsetHookCallback(createLocal);\n\nhooks.fn                    = proto;\nhooks.min                   = min;\nhooks.max                   = max;\nhooks.now                   = now;\nhooks.utc                   = createUTC;\nhooks.unix                  = createUnix;\nhooks.months                = listMonths;\nhooks.isDate                = isDate;\nhooks.locale                = getSetGlobalLocale;\nhooks.invalid               = createInvalid;\nhooks.duration              = createDuration;\nhooks.isMoment              = isMoment;\nhooks.weekdays              = listWeekdays;\nhooks.parseZone             = createInZone;\nhooks.localeData            = getLocale;\nhooks.isDuration            = isDuration;\nhooks.monthsShort           = listMonthsShort;\nhooks.weekdaysMin           = listWeekdaysMin;\nhooks.defineLocale          = defineLocale;\nhooks.updateLocale          = updateLocale;\nhooks.locales               = listLocales;\nhooks.weekdaysShort         = listWeekdaysShort;\nhooks.normalizeUnits        = normalizeUnits;\nhooks.relativeTimeRounding = getSetRelativeTimeRounding;\nhooks.relativeTimeThreshold = getSetRelativeTimeThreshold;\nhooks.calendarFormat        = getCalendarFormat;\nhooks.prototype             = proto;\n\nreturn hooks;\n\n})));\n"
  },
  {
    "path": "web_gui/gui_v3/js/npm.js",
    "content": "// This file is autogenerated via the `commonjs` Grunt task. You can require() this file in a CommonJS environment.\nrequire('../../js/transition.js')\nrequire('../../js/alert.js')\nrequire('../../js/button.js')\nrequire('../../js/carousel.js')\nrequire('../../js/collapse.js')\nrequire('../../js/dropdown.js')\nrequire('../../js/modal.js')\nrequire('../../js/tooltip.js')\nrequire('../../js/popover.js')\nrequire('../../js/scrollspy.js')\nrequire('../../js/tab.js')\nrequire('../../js/affix.js')"
  },
  {
    "path": "web_gui/gui_v3/lang/en.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n$lang['uid'] = \"Owner\";\n$lang['gid'] = \"Group\";\n$lang['lhsm_status'] = \"LHSM Status\";\n$lang['checksum_status'] = \"Checksum Status\";\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/lang/fr.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n$lang['uid'] = \"Proprietaire\";\n$lang['gid'] = \"Groupe\";\n$lang['lhsm_status'] = \"Status LHSM\";\n$lang['checksum_status'] = \"Status Checksum\";\n\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/lang/sys.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n$lang = array();\n\n$lang['sz0'] = \"Empty\";\n$lang['sz1'] = \"1B-32B\";\n$lang['sz32'] = \"32B-1KB\";\n$lang['sz1K'] = \"1KB-32KB\";\n$lang['sz32K'] = \"32KB-1MB\";\n$lang['sz1M'] = \"1MB-32MB\";\n$lang['sz32M'] = \"32MB-1GB\";\n$lang['sz1G'] = \"1GB-32GB\";\n$lang['sz32G'] = \"32GB-1TB\";\n$lang['sz1T'] = \"1TB+\";\n?>\n"
  },
  {
    "path": "web_gui/gui_v3/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/* Plugin template */\n\nclass Plugin {\n    public $Name = \"Generic\";\n    public $Description = \"Something about your plugin\";\n    public $Version = \"0\";\n\n    /* php lib required to run the plugin */\n    public $Req_lib = array();\n    /* plugin required by this plugin */\n    public $Req_plug = array();\n\n    /* Required table for plugin */\n    /* *Create them if they don't exist */\n    public $Req_table = array();\n\n    /* pre_init status */\n    const INIT_OK = 0;\n    /* Non present php lib */\n    const INIT_REQLIB = 1;\n    /* Waiting for another plugin */\n    const INIT_WAITPLUG = 2;\n    /* The other plugin is missing */\n    const INIT_MISSPLUG = 3;\n\n\n    /* Called from UI and api */\n    function pre_init() {\n\tglobal $db;\n\tglobal $DBA;\n         /* Check if the plugin est loadable */\n\tforeach ($this->Req_lib as $lib) {\n\t\tif (!extension_loaded($lib)) {\n\t\t\treturn $this::INIT_REQLIB;\n\t\t}\n\t}\n\n\t$confdb = getDB(\"config\")[0];\n\t/* @TODO should failed if db doesn't work */\n\tforeach ($this->Req_table as $table=>$fields) {\n\t\t$result = $db[$confdb]->query(\"SELECT * FROM information_schema.columns WHERE (table_name = '$table') AND TABLE_SCHEMA = '\".$DBA[$confdb][\"DB_NAME\"].\"';\");\n\t\tif ($result->rowCount()<1) {\n\t\t\t$db[$confdb]->query(\"CREATE TABLE IF NOT EXISTS $table $fields\");\n\t\t}\n\t}\n\n\treturn $this::INIT_OK;\n    }\n\n    /* Called from UI and api */\n    function init() {\n            /* Plugin init */\n    }\n\n    /* Called from jscript customlib */\n    function jscript($param) {\n        /* JS Required by the plugin\n         * Called from param.php  */\n    }\n\n    /* Called from api before processing data */\n    function api_preprocess() {\n        /* Just before the API args parsing\n         * Called from api/robinhood.php */\n    }\n\n    /* Called from api just before processing data */\n    function api_process($param) {\n           /* Just after API args parsing\n            * $param = parsed args */\n\n    }\n\n    /* Called from api when sending the header */\n    function api_header_type($param) {\n            /* Just before sending the data\n             * it's the html header (file type, ...) */\n    }\n\n    /* Called from api just before sending data */\n    function api_response($param) {\n           /* Just before API send data */\n           /* $param = data */\n    }\n\n    /* Custom api call */\n    function api_native($param) {\n           /* Custom api call\n            * $param = (request, parsed args)\n            * Called when reaching something still undeclared from api/native/foo\n            */\n\n    }\n\n    /* Called from API before sending json data of uid/gid for graphs*/\n    function graph_postdata_uid($param) {\n            /* Just before sending uid graph data */\n            /* Whole json as args, for compatibility */\n    }\n\n    /* Called from API before sending json data of sizes for graphs */\n    function graph_postdata_sizes($param) {\n            /* Just before sending sizes graph data */\n            /* Whole json as args, for compatibility */\n    }\n\n    /* Called from API before doing initial SQL request for uid/gid graphs */\n    function graph_presql_uid($param) {\n            /* The sql request as string */\n    }\n\n    /* Called from API before doing initial SQL request for sizes graphs */\n    function graph_presql_sizes($param) {\n            /* The sql request as string */\n    }\n\n    /* Called at each sql request to add where clause for access control */\n    function access_sql_filter($param) {\n            /* The sql filter for clause, should start with \" AND \" */\n    }\n\n    /* Called from UI in form filter */\n    function ui_form_filter($param) {\n            /* HTML in the filter form */\n    }\n\n    /* Called from UI in menu */\n    function ui_menu_bottom($param) {\n            /* HTML */\n    }\n\n    /* Called from UI in menu */\n    function ui_menu_top($param) {\n            /* HTML */\n    }\n\n    /* Called from UI in menu */\n    function ui_header($param) {\n            /* HTML inside <header></header> */\n    }\n\n    /* Called from Common to identify the user */\n    function get_user($param) {\n\t/* User identity */\n    }\n\n    /* Called from cron */\n    function cron() {\n\t/* called each time cron.php is runned */\n    }\n\n}\n\n/**\n *\n * Load all the plugins\n *\n * @return nothing\n */\nfunction plugins_load() {\n    global $PLUGINS_INST;\n    global $PLUGINS_REG;\n\n    foreach ($PLUGINS_REG as $p) {\n        require_once \"plugins/$p/plugin.php\";\n\t$new_plugin = new $p();\n\t$init_result = $new_plugin->pre_init();\n\tif ($init_result==$new_plugin::INIT_REQLIB) {\n\n\t} elseif ($init_result==$new_plugin::INIT_OK) {\n\t        $PLUGINS_INST[] = $new_plugin;\n\t}\n\n    }\n}\n\n/**\n *\n * Call specific function from plugins\n *\n * @return data\n */\nfunction plugins_call($function, $param = NULL) {\n   global $PLUGINS_INST;\n   foreach ($PLUGINS_INST as $p) {\n           $reflection = new ReflectionMethod($p, $function);\n           if (count($reflection->getParameters()) == 0) {\n                $p->$function();\n        } else {\n                $ret = $p->$function($param);\n                if ($ret)\n                    $param = $ret;\n        }\n   }\n   return $param;\n}\n\nplugins_load();\nplugins_call(\"init\");\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/README.txt",
    "content": "Robinhood new web interface (gui_v3) plugins\n\nI - INTRODUCTION\n===========\n\nThis is a very simple plugin system which allows some customization in the robinhood gui.\n\nII - USE\n=================\n\nIn config.php or config_local.php just add the name of the plugins to $PLUGINS_REG.\n\nPlugins are run in the $PLUGINS_REG list order. Some plugins might doesn't work with other plugins\n\nIII - WRITE\n===================\n\nThe requirement for a plugin are:\n -Folder with the name of the plugin\n -php file called plugin.php which contains a class which match your plugin name\n\nThe plugin class overload a class called plugin which contains all the existings callback.\nMethods are described in plugin.php at the root of the website.\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/browser/css/bootstrap-treeview.css",
    "content": "/* =========================================================\n * patternfly-bootstrap-treeview.css v2.1.0\n * =========================================================\n * Copyright 2013 Jonathan Miles\n * Project URL : http://www.jondmiles.com/bootstrap-treeview\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * ========================================================= */\n\n.treeview .list-group-item {\n\tcursor: pointer;\n}\n\n.treeview span.indent {\n\tmargin-left: 10px;\n\tmargin-right: 10px;\n}\n\n.treeview span.icon {\n\twidth: 12px;\n\tmargin-right: 5px;\n}\n\n.treeview .node-disabled {\n\tcolor: silver;\n\tcursor: not-allowed;\n}\n\n.treeview .node-hidden {\n\tdisplay: none;\n}\n\n.treeview span.image {\n  display: inline-block;\n  width: 12px;\n  height: 1.19em;\n  vertical-align: middle;\n  background-size: contain;\n  background-repeat: no-repeat;\n  margin-right: 5px;\n  line-height: 1em;\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/browser/help.html",
    "content": "<h1>Browser plugin overview</h1>\n\n<p>In order to user the Browser plugin your rootid must be correct. You can check it in internal stats. </p>\n\n\n<p> Screenshot </p>\n\n<img src=\"plugins/browser/images/browser.png\">\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/browser/js/bootstrap-treeview.js",
    "content": "/* =========================================================\n * patternfly-bootstrap-treeview.js v2.1.0\n * =========================================================\n * Copyright 2013 Jonathan Miles\n * Project URL : http://www.jondmiles.com/bootstrap-treeview\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * ========================================================= */\n\n;(function ($, window, document, undefined) {\n\n\t/*global jQuery, console*/\n\n\t'use strict';\n\n\tvar pluginName = 'treeview';\n\n\tvar _default = {};\n\n\t_default.settings = {\n\n\t\tinjectStyle: true,\n\n\t\tlevels: 2,\n\n\t\texpandIcon: 'glyphicon glyphicon-plus',\n\t\tcollapseIcon: 'glyphicon glyphicon-minus',\n\t\tloadingIcon: 'glyphicon glyphicon-hourglass',\n\t\temptyIcon: 'glyphicon',\n\t\tnodeIcon: '',\n\t\tselectedIcon: '',\n\t\tcheckedIcon: 'glyphicon glyphicon-check',\n\t\tpartiallyCheckedIcon: 'glyphicon glyphicon-expand',\n\t\tuncheckedIcon: 'glyphicon glyphicon-unchecked',\n\t\ttagsClass: 'badge',\n\n\t\tcolor: undefined,\n\t\tbackColor: undefined,\n\t\tborderColor: undefined,\n\t\tchangedNodeColor: '#39A5DC',\n\t\tonhoverColor: '#F5F5F5',\n\t\tselectedColor: '#FFFFFF',\n\t\tselectedBackColor: '#428bca',\n\t\tsearchResultColor: '#D9534F',\n\t\tsearchResultBackColor: undefined,\n\n\t\thighlightSelected: true,\n\t\thighlightSearchResults: true,\n\t\tshowBorder: true,\n\t\tshowIcon: true,\n\t\tshowImage: false,\n\t\tshowCheckbox: false,\n\t\tcheckboxFirst: false,\n\t\thighlightChanges: false,\n\t\tshowTags: false,\n\t\tmultiSelect: false,\n\t\tpreventUnselect: false,\n\t\tallowReselect: false,\n\t\thierarchicalCheck: false,\n\t\tpropagateCheckEvent: false,\n\t\twrapNodeText: false,\n\n\t\t// Event handlers\n\t\tonLoading: undefined,\n\t\tonLoadingFailed: undefined,\n\t\tonInitialized: undefined,\n\t\tonNodeRendered: undefined,\n\t\tonRendered: undefined,\n\t\tonDestroyed: undefined,\n\n\t\tonNodeChecked: undefined,\n\t\tonNodeCollapsed: undefined,\n\t\tonNodeDisabled: undefined,\n\t\tonNodeEnabled: undefined,\n\t\tonNodeExpanded: undefined,\n\t\tonNodeSelected: undefined,\n\t\tonNodeUnchecked: undefined,\n\t\tonNodeUnselected: undefined,\n\n\t\tonSearchComplete: undefined,\n\t\tonSearchCleared: undefined\n\t};\n\n\t_default.options = {\n\t\tsilent: false,\n\t\tignoreChildren: false\n\t};\n\n\t_default.searchOptions = {\n\t\tignoreCase: true,\n\t\texactMatch: false,\n\t\trevealResults: true\n\t};\n\n\t_default.dataUrl = {\n\t\tmethod: 'GET',\n\t\tdataType: 'json',\n\t\tcache: false\n\t};\n\n\tvar Tree = function (element, options) {\n\t\tthis.$element = $(element);\n\t\tthis._elementId = element.id;\n\t\tthis._styleId = this._elementId + '-style';\n\n\t\tthis._init(options);\n\n\t\treturn {\n\n\t\t\t// Options (public access)\n\t\t\toptions: this._options,\n\n\t\t\t// Initialize / destroy methods\n\t\t\tinit: $.proxy(this._init, this),\n\t\t\tremove: $.proxy(this._remove, this),\n\n\t\t\t// Query methods\n\t\t\tfindNodes: $.proxy(this.findNodes, this),\n\t\t\tgetNodes: $.proxy(this.getNodes, this), // todo document + test\n\t\t\tgetParents: $.proxy(this.getParents, this),\n\t\t\tgetSiblings: $.proxy(this.getSiblings, this),\n\t\t\tgetSelected: $.proxy(this.getSelected, this),\n\t\t\tgetUnselected: $.proxy(this.getUnselected, this),\n\t\t\tgetExpanded: $.proxy(this.getExpanded, this),\n\t\t\tgetCollapsed: $.proxy(this.getCollapsed, this),\n\t\t\tgetChecked: $.proxy(this.getChecked, this),\n\t\t\tgetUnchecked: $.proxy(this.getUnchecked, this),\n\t\t\tgetDisabled: $.proxy(this.getDisabled, this),\n\t\t\tgetEnabled: $.proxy(this.getEnabled, this),\n\n\t\t\t// Tree manipulation methods\n\t\t\taddNode: $.proxy(this.addNode, this),\n\t\t\taddNodeAfter: $.proxy(this.addNodeAfter, this),\n\t\t\taddNodeBefore: $.proxy(this.addNodeBefore, this),\n\t\t\tremoveNode: $.proxy(this.removeNode, this),\n\t\t\tupdateNode: $.proxy(this.updateNode, this),\n\n\t\t\t// Select methods\n\t\t\tselectNode: $.proxy(this.selectNode, this),\n\t\t\tunselectNode: $.proxy(this.unselectNode, this),\n\t\t\ttoggleNodeSelected: $.proxy(this.toggleNodeSelected, this),\n\n\t\t\t// Expand / collapse methods\n\t\t\tcollapseAll: $.proxy(this.collapseAll, this),\n\t\t\tcollapseNode: $.proxy(this.collapseNode, this),\n\t\t\texpandAll: $.proxy(this.expandAll, this),\n\t\t\texpandNode: $.proxy(this.expandNode, this),\n\t\t\ttoggleNodeExpanded: $.proxy(this.toggleNodeExpanded, this),\n\t\t\trevealNode: $.proxy(this.revealNode, this),\n\n\t\t\t// Check / uncheck methods\n\t\t\tcheckAll: $.proxy(this.checkAll, this),\n\t\t\tcheckNode: $.proxy(this.checkNode, this),\n\t\t\tuncheckAll: $.proxy(this.uncheckAll, this),\n\t\t\tuncheckNode: $.proxy(this.uncheckNode, this),\n\t\t\ttoggleNodeChecked: $.proxy(this.toggleNodeChecked, this),\n\t\t\tunmarkCheckboxChanges: $.proxy(this.unmarkCheckboxChanges, this),\n\n\t\t\t// Disable / enable methods\n\t\t\tdisableAll: $.proxy(this.disableAll, this),\n\t\t\tdisableNode: $.proxy(this.disableNode, this),\n\t\t\tenableAll: $.proxy(this.enableAll, this),\n\t\t\tenableNode: $.proxy(this.enableNode, this),\n\t\t\ttoggleNodeDisabled: $.proxy(this.toggleNodeDisabled, this),\n\n\t\t\t// Search methods\n\t\t\tsearch: $.proxy(this.search, this),\n\t\t\tclearSearch: $.proxy(this.clearSearch, this)\n\t\t};\n\t};\n\n\tTree.prototype._init = function (options) {\n\t\tthis._tree = [];\n\t\tthis._initialized = false;\n\n\t\tthis._options = $.extend({}, _default.settings, options);\n\n\t\t// Cache empty icon DOM template\n\t\tthis._template.icon.empty.addClass(this._options.emptyIcon);\n\n\t\tthis._destroy();\n\t\tthis._subscribeEvents();\n\n\t\tthis._triggerEvent('loading', null, _default.options);\n\t\tthis._load(options)\n\t\t\t.then($.proxy(function (data) {\n\t\t\t\t// load done\n\t\t\t\treturn this._tree = $.extend(true, [], data);\n\t\t\t}, this), $.proxy(function (error) {\n\t\t\t\t// load fail\n\t\t\t\tthis._triggerEvent('loadingFailed', error, _default.options);\n\t\t\t}, this))\n\t\t\t.then($.proxy(function (treeData) {\n\t\t\t\t// initialize data\n\t\t\t\treturn this._setInitialStates({ nodes: treeData }, 0);\n\t\t\t}, this))\n\t\t\t.then($.proxy(function () {\n\t\t\t\t// render to DOM\n\t\t\t\tthis._render();\n\t\t\t}, this));\n\t};\n\n\tTree.prototype._load = function (options) {\n\t\tvar done = new $.Deferred();\n\t\tif (options.data) {\n\t\t\tthis._loadLocalData(options, done);\n\t\t} else if (options.dataUrl) {\n\t\t\tthis._loadRemoteData(options, done);\n\t\t}\n\t\treturn done.promise();\n\t};\n\n\tTree.prototype._loadRemoteData = function (options, done) {\n\t\t$.ajax($.extend(true, {}, _default.dataUrl, options.dataUrl))\n\t\t\t.done(function (data) {\n\t\t\t\tdone.resolve(data);\n\t\t\t})\n\t\t\t.fail(function (xhr, status, error) {\n\t\t\t\tdone.reject(error);\n\t\t\t});\n\t};\n\n\tTree.prototype._loadLocalData = function (options, done) {\n\t\tdone.resolve((typeof options.data === 'string') ?\n\t\t\t\t\t\t\t\tJSON.parse(options.data) :\n\t\t\t\t\t\t\t\t$.extend(true, [], options.data));\n\t};\n\n\tTree.prototype._remove = function () {\n\t\tthis._destroy();\n\t\t$.removeData(this, pluginName);\n\t\t$('#' + this._styleId).remove();\n\t};\n\n\tTree.prototype._destroy = function () {\n\t\tif (!this._initialized) return;\n\t\tthis._initialized = false;\n\n\t\tthis._triggerEvent('destroyed', null, _default.options);\n\n\t\t// Switch off events\n\t\tthis._unsubscribeEvents();\n\n\t\t// Tear down\n\t\tthis.$wrapper.remove();\n\t\tthis.$wrapper = null;\n\t};\n\n\tTree.prototype._unsubscribeEvents = function () {\n\t\tthis.$element.off('loading');\n\t\tthis.$element.off('loadingFailed');\n\t\tthis.$element.off('initialized');\n\t\tthis.$element.off('nodeRendered');\n\t\tthis.$element.off('rendered');\n\t\tthis.$element.off('destroyed');\n\t\tthis.$element.off('click');\n\t\tthis.$element.off('nodeChecked');\n\t\tthis.$element.off('nodeCollapsed');\n\t\tthis.$element.off('nodeDisabled');\n\t\tthis.$element.off('nodeEnabled');\n\t\tthis.$element.off('nodeExpanded');\n\t\tthis.$element.off('nodeSelected');\n\t\tthis.$element.off('nodeUnchecked');\n\t\tthis.$element.off('nodeUnselected');\n\t\tthis.$element.off('searchComplete');\n\t\tthis.$element.off('searchCleared');\n\t};\n\n\tTree.prototype._subscribeEvents = function () {\n\t\tthis._unsubscribeEvents();\n\n\t\tif (typeof (this._options.onLoading) === 'function') {\n\t\t\tthis.$element.on('loading', this._options.onLoading);\n\t\t}\n\n\t\tif (typeof (this._options.onLoadingFailed) === 'function') {\n\t\t\tthis.$element.on('loadingFailed', this._options.onLoadingFailed);\n\t\t}\n\n\t\tif (typeof (this._options.onInitialized) === 'function') {\n\t\t\tthis.$element.on('initialized', this._options.onInitialized);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeRendered) === 'function') {\n\t\t\tthis.$element.on('nodeRendered', this._options.onNodeRendered);\n\t\t}\n\n\t\tif (typeof (this._options.onRendered) === 'function') {\n\t\t\tthis.$element.on('rendered', this._options.onRendered);\n\t\t}\n\n\t\tif (typeof (this._options.onDestroyed) === 'function') {\n\t\t\tthis.$element.on('destroyed', this._options.onDestroyed);\n\t\t}\n\n\t\tthis.$element.on('click', $.proxy(this._clickHandler, this));\n\n\t\tif (typeof (this._options.onNodeChecked) === 'function') {\n\t\t\tthis.$element.on('nodeChecked', this._options.onNodeChecked);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeCollapsed) === 'function') {\n\t\t\tthis.$element.on('nodeCollapsed', this._options.onNodeCollapsed);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeDisabled) === 'function') {\n\t\t\tthis.$element.on('nodeDisabled', this._options.onNodeDisabled);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeEnabled) === 'function') {\n\t\t\tthis.$element.on('nodeEnabled', this._options.onNodeEnabled);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeExpanded) === 'function') {\n\t\t\tthis.$element.on('nodeExpanded', this._options.onNodeExpanded);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeSelected) === 'function') {\n\t\t\tthis.$element.on('nodeSelected', this._options.onNodeSelected);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeUnchecked) === 'function') {\n\t\t\tthis.$element.on('nodeUnchecked', this._options.onNodeUnchecked);\n\t\t}\n\n\t\tif (typeof (this._options.onNodeUnselected) === 'function') {\n\t\t\tthis.$element.on('nodeUnselected', this._options.onNodeUnselected);\n\t\t}\n\n\t\tif (typeof (this._options.onSearchComplete) === 'function') {\n\t\t\tthis.$element.on('searchComplete', this._options.onSearchComplete);\n\t\t}\n\n\t\tif (typeof (this._options.onSearchCleared) === 'function') {\n\t\t\tthis.$element.on('searchCleared', this._options.onSearchCleared);\n\t\t}\n\t};\n\n\tTree.prototype._triggerEvent = function (event, data, options) {\n\t\tif (options && !options.silent) {\n\t\t\tthis.$element.trigger(event, $.extend(true, {}, data));\n\t\t}\n\t}\n\n\t/*\n\t\tRecurse the tree structure and ensure all nodes have\n\t\tvalid initial states.  User defined states will be preserved.\n\t\tFor performance we also take this opportunity to\n\t\tindex nodes in a flattened ordered structure\n\t*/\n\tTree.prototype._setInitialStates = function (node, level) {\n\t\tthis._nodes = {};\n\t\treturn $.when.apply(this, this._setInitialState(node, level))\n\t\t\t.done($.proxy(function () {\n\t\t\t\tthis._orderedNodes = this._sortNodes();\n\t\t\t\tthis._inheritCheckboxChanges();\n\t\t\t\tthis._triggerEvent('initialized', this._orderedNodes, _default.options);\n\t\t\t\treturn;\n\t\t\t}, this));\n\t};\n\n\tTree.prototype._setInitialState = function (node, level, done) {\n\t\tif (!node.nodes) return;\n\t\tlevel += 1;\n\t\tdone = done || [];\n\n\t\tvar parent = node;\n\t\t$.each(node.nodes, $.proxy(function (index, node) {\n\t\t\tvar deferred = new $.Deferred();\n\t\t\tdone.push(deferred.promise());\n\n\t\t\t// level : hierarchical tree level, starts at 1\n\t\t\tnode.level = level;\n\n\t\t\t// index : relative to siblings\n\t\t\tnode.index = index;\n\n\t\t\t// nodeId : unique, hierarchical identifier\n\t\t\tnode.nodeId = (parent && parent.nodeId) ?\n\t\t\t\t\t\t\t\t\t\t\tparent.nodeId + '.' + node.index :\n\t\t\t\t\t\t\t\t\t\t\t(level - 1) + '.' + node.index;\n\n\t\t\t// parentId : transversing up the tree\n\t\t\tnode.parentId = parent.nodeId;\n\n\t\t\t// if not provided set selectable default value\n\t\t\tif (!node.hasOwnProperty('selectable')) {\n\t\t\t\tnode.selectable = true;\n\t\t\t}\n\n\t\t\t// if not provided set checkable default value\n\t\t\tif (!node.hasOwnProperty('checkable')) {\n\t\t\t\tnode.checkable = true;\n\t\t\t}\n\n\t\t\t// where provided we should preserve states\n\t\t\tnode.state = node.state || {};\n\n\t\t\t// set checked state; unless set always false\n\t\t\tif (!node.state.hasOwnProperty('checked')) {\n\t\t\t\tnode.state.checked = false;\n\t\t\t}\n\n\t\t\t// convert the undefined string if hierarchical checks are enabled\n\t\t\tif (this._options.hierarchicalCheck && node.state.checked === 'undefined') {\n\t\t\t\tnode.state.checked = undefined;\n\t\t\t}\n\n\t\t\t// set enabled state; unless set always false\n\t\t\tif (!node.state.hasOwnProperty('disabled')) {\n\t\t\t\tnode.state.disabled = false;\n\t\t\t}\n\n\t\t\t// set expanded state; if not provided based on levels\n\t\t\tif (!node.state.hasOwnProperty('expanded')) {\n\t\t\t\tif (!node.state.disabled &&\n\t\t\t\t\t\t(level < this._options.levels) &&\n\t\t\t\t\t\t(node.nodes && node.nodes.length > 0)) {\n\t\t\t\t\tnode.state.expanded = true;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tnode.state.expanded = false;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// set selected state; unless set always false\n\t\t\tif (!node.state.hasOwnProperty('selected')) {\n\t\t\t\tnode.state.selected = false;\n\t\t\t}\n\n\t\t\t// set visible state; based parent state plus levels\n\t\t\tif ((parent && parent.state && parent.state.expanded) ||\n\t\t\t\t\t(level <= this._options.levels)) {\n\t\t\t\tnode.state.visible = true;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tnode.state.visible = false;\n\t\t\t}\n\n\t\t\t// recurse child nodes and transverse the tree, depth-first\n\t\t\tif (node.nodes) {\n\t\t\t\tif (node.nodes.length > 0) {\n\t\t\t\t\tthis._setInitialState(node, level, done);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tdelete node.nodes;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// add / update indexed collection\n\t\t\tthis._nodes[node.nodeId] = node;\n\n\t\t\t// mark task as complete\n\t\t\tdeferred.resolve();\n\t\t}, this));\n\n\t\treturn done;\n\t};\n\n\tTree.prototype._sortNodes = function () {\n\t\treturn $.map(Object.keys(this._nodes).sort(function (a, b) {\n\t\t\tif (a === b) return 0;\n\t\t\tvar a = a.split('.').map(function (level) { return parseInt(level); });\n\t\t\tvar b = b.split('.').map(function (level) { return parseInt(level); });\n\n\t\t\tvar c = Math.max(a.length, b.length);\n\t\t\tfor (var i=0; i<c; i++) {\n\t\t\t\tif (a[i] === undefined) return -1;\n\t\t\t\tif (b[i] === undefined) return +1;\n\t\t\t\tif (a[i] - b[i] > 0) return +1;\n\t\t\t\tif (a[i] - b[i] < 0) return -1;\n\t\t\t};\n\n\t\t}), $.proxy(function (value, index) {\n\t\t  return this._nodes[value];\n\t\t}, this));\n\t};\n\n\tTree.prototype._clickHandler = function (event) {\n\n\t\tvar target = $(event.target);\n\t\tvar node = this.targetNode(target);\n\t\tif (!node || node.state.disabled) return;\n\n\t\tvar classList = target.attr('class') ? target.attr('class').split(' ') : [];\n\t\tif ((classList.indexOf('expand-icon') !== -1)) {\n\t\t\tthis._toggleExpanded(node, $.extend({}, _default.options));\n\t\t}\n\t\telse if ((classList.indexOf('check-icon') !== -1)) {\n\t\t\tif (node.checkable) {\n\t\t\t\tthis._toggleChecked(node, $.extend({}, _default.options));\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tif (node.selectable) {\n\t\t\t\tthis._toggleSelected(node, $.extend({}, _default.options));\n\t\t\t} else {\n\t\t\t\tthis._toggleExpanded(node, $.extend({}, _default.options));\n\t\t\t}\n\t\t}\n\t};\n\n\t// Looks up the DOM for the closest parent list item to retrieve the\n\t// data attribute nodeid, which is used to lookup the node in the flattened structure.\n\tTree.prototype.targetNode = function (target) {\n\t\tvar nodeId = target.closest('li.list-group-item').attr('data-nodeId');\n\t\tvar node = this._nodes[nodeId];\n\t\tif (!node) {\n\t\t\tconsole.log('Error: node does not exist');\n\t\t}\n\t\treturn node;\n\t};\n\n\tTree.prototype._toggleExpanded = function (node, options) {\n\n\t\t// Show a different icon while loading the child nodes\n\t\tif (!node) return;\n\n\t\t// Lazy-load the child nodes if possible\n\t\tif (typeof(this._options.lazyLoad) === 'function' && node.lazyLoad) {\n\t\t\tthis._lazyLoad(node);\n\t\t} else {\n\t\t\tthis._setExpanded(node, !node.state.expanded, options);\n\t\t}\n\t};\n\n\tTree.prototype._lazyLoad = function (node) {\n\t\t// Show a different icon while loading the child nodes\n\t\tnode.$el.children('span.expand-icon')\n\t\t\t.removeClass(this._options.expandIcon)\n\t\t\t.addClass(this._options.loadingIcon);\n\n\t\tvar _this = this;\n\t\tthis._options.lazyLoad(node, function (nodes) {\n\t\t\t// Adding the node will expand its parent automatically\n\t\t\t_this.addNode(nodes, node);\n\t\t});\n\t\t// Only the first expand should do a lazy-load\n\t\tdelete node.lazyLoad;\n\t};\n\n\tTree.prototype._setExpanded = function (node, state, options) {\n\t\t// We never pass options when rendering, so the only time\n\t\t// we need to validate state is from user interaction\n\t\tif (options && state === node.state.expanded) return;\n\n\t\tif (state && node.nodes) {\n\n\t\t\t// Set node state\n\t\t\tnode.state.expanded = true;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.children('span.expand-icon')\n\t\t\t\t\t.removeClass(this._options.expandIcon)\n\t\t\t\t\t.removeClass(this._options.loadingIcon)\n\t\t\t\t\t.addClass(this._options.collapseIcon);\n\t\t\t}\n\n\t\t\t// Expand children\n\t\t\tif (node.nodes && options) {\n\t\t\t\t$.each(node.nodes, $.proxy(function (index, node) {\n\t\t\t\t\tthis._setVisible(node, true, options);\n\t\t\t\t}, this));\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeExpanded', node, options);\n\t\t}\n\t\telse if (!state) {\n\n\t\t\t// Set node state\n\t\t\tnode.state.expanded = false;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\n\t\t\t\tnode.$el.children('span.expand-icon')\n\t\t\t\t\t.removeClass(this._options.collapseIcon)\n\t\t\t\t\t.addClass(this._options.expandIcon);\n\t\t\t}\n\n\t\t\t// Collapse children\n\t\t\tif (node.nodes && options) {\n\t\t\t\t$.each(node.nodes, $.proxy(function (index, node) {\n\t\t\t\t\tthis._setVisible(node, false, options);\n\t\t\t\t\tthis._setExpanded(node, false, options);\n\t\t\t\t}, this));\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeCollapsed', node, options);\n\t\t}\n\t};\n\n\tTree.prototype._setVisible = function (node, state, options) {\n\n\t\tif (options && state === node.state.visible) return;\n\n\t\tif (state) {\n\n\t\t\t// Set node state\n\t\t\tnode.state.visible = true;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.removeClass('node-hidden');\n\t\t\t}\n\t\t}\n\t\telse {\n\n\t\t\t// Set node state to unchecked\n\t\t\tnode.state.visible = false;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-hidden');\n\t\t\t}\n\t\t}\n\t};\n\n\tTree.prototype._toggleSelected = function (node, options) {\n\t\tif (!node) return;\n\t\tthis._setSelected(node, !node.state.selected, options);\n\t\treturn this;\n\t};\n\n\tTree.prototype._setSelected = function (node, state, options) {\n\n\t\t// We never pass options when rendering, so the only time\n\t\t// we need to validate state is from user interaction\n\t\tif (options && (state === node.state.selected)) return;\n\n\t\tif (state) {\n\n\t\t\t// If multiSelect false, unselect previously selected\n\t\t\tif (!this._options.multiSelect) {\n\t\t\t\t$.each(this._findNodes('true', 'state.selected'), $.proxy(function (index, node) {\n\t\t\t\t\tthis._setSelected(node, false, $.extend(options, {unselecting: true}));\n\t\t\t\t}, this));\n\t\t\t}\n\n\t\t\t// Set node state\n\t\t\tnode.state.selected = true;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-selected');\n\n\t\t\t\tif (node.selectedIcon || this._options.selectedIcon) {\n\t\t\t\t\tnode.$el.children('span.node-icon')\n\t\t\t\t\t\t.removeClass(node.icon || this._options.nodeIcon)\n\t\t\t\t\t\t.addClass(node.selectedIcon || this._options.selectedIcon);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeSelected', node, options);\n\t\t}\n\t\telse {\n\n\t\t\t// If preventUnselect true + only one remaining selection, disable unselect\n\t\t\tif (this._options.preventUnselect &&\n\t\t\t\t\t(options && !options.unselecting) &&\n\t\t\t\t\t(this._findNodes('true', 'state.selected').length === 1)) {\n\t\t\t\t// Fire the nodeSelected event if reselection is allowed\n\t\t\t\tif (this._options.allowReselect) {\n\t\t\t\t\tthis._triggerEvent('nodeSelected', node, options);\n\t\t\t\t}\n\t\t\t\treturn this;\n\t\t\t}\n\n\t\t\t// Set node state\n\t\t\tnode.state.selected = false;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.removeClass('node-selected');\n\n\t\t\t\tif (node.selectedIcon || this._options.selectedIcon) {\n\t\t\t\t\tnode.$el.children('span.node-icon')\n\t\t\t\t\t\t.removeClass(node.selectedIcon || this._options.selectedIcon)\n\t\t\t\t\t\t.addClass(node.icon || this._options.nodeIcon);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeUnselected', node, options);\n\t\t}\n\n\t\treturn this;\n\t};\n\n\tTree.prototype._inheritCheckboxChanges = function () {\n\t\tif (this._options.showCheckbox && this._options.highlightChanges) {\n\t\t\tthis._checkedNodes = $.grep(this._orderedNodes, function (node) {\n\t\t\t\treturn node.state.checked;\n\t\t\t});\n\t\t}\n\t};\n\n\tTree.prototype._toggleChecked = function (node, options) {\n\t\tif (!node) return;\n\n\t\tif (this._options.hierarchicalCheck) {\n\t\t\t// Event propagation to the parent/child nodes\n\t\t\tvar childOptions = $.extend({}, options, {silent: options.silent || !this._options.propagateCheckEvent});\n\n\t\t\tvar state, currentNode = node;\n\t\t\t// Temporarily swap the tree state\n\t\t\tnode.state.checked = !node.state.checked;\n\n\t\t\t// Iterate through each parent node\n\t\t\twhile (currentNode = this._nodes[currentNode.parentId]) {\n\n\t\t\t\t// Calculate the state\n\t\t\t\tstate = currentNode.nodes.reduce(function (acc, curr) {\n\t\t\t\t\treturn (acc === curr.state.checked) ? acc : undefined;\n\t\t\t\t}, currentNode.nodes[0].state.checked);\n\n\t\t\t\t// Set the state\n\t\t\t\tthis._setChecked(currentNode, state, childOptions);\n\t\t\t}\n\n\t\t\tif (node.nodes && node.nodes.length > 0) {\n\t\t\t\t// Copy the content of the array\n\t\t\t\tvar child, children = node.nodes.slice();\n\t\t\t\t// Iterate through each child node\n\t\t\t\twhile (children && children.length > 0) {\n\t\t\t\t\tchild = children.pop();\n\n\t\t\t\t\t// Set the state\n\t\t\t\t\tthis._setChecked(child, node.state.checked, childOptions);\n\n\t\t\t\t\t// Append children to the end of the list\n\t\t\t\t\tif (child.nodes && child.nodes.length > 0) {\n\t\t\t\t\t\tchildren = children.concat(child.nodes.slice());\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Swap back the tree state\n\t\t\tnode.state.checked = !node.state.checked;\n\t\t}\n\n\t\tthis._setChecked(node, !node.state.checked, options);\n\t};\n\n\tTree.prototype._setChecked = function (node, state, options) {\n\n\t\t// We never pass options when rendering, so the only time\n\t\t// we need to validate state is from user interaction\n\t\tif (options && state === node.state.checked) return;\n\n\t\t// Highlight the node if its checkbox has unsaved changes\n\t\tif (this._options.highlightChanges) {\n\t\t\tnode.$el.toggleClass('node-check-changed', (this._checkedNodes.indexOf(node) == -1) == state);\n\t\t}\n\n\t\tif (state) {\n\n\t\t\t// Set node state\n\t\t\tnode.state.checked = true;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-checked').removeClass('node-checked-partial');\n\t\t\t\tnode.$el.children('span.check-icon')\n\t\t\t\t\t.removeClass(this._options.uncheckedIcon)\n\t\t\t\t\t.removeClass(this._options.partiallyCheckedIcon)\n\t\t\t\t\t.addClass(this._options.checkedIcon);\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeChecked', node, options);\n\t\t}\n\t\telse if (state === undefined && this._options.hierarchicalCheck) {\n\n\t\t\t// Set node state to partially checked\n\t\t\tnode.state.checked = undefined;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-checked-partial').removeClass('node-checked');\n\t\t\t\tnode.$el.children('span.check-icon')\n\t\t\t\t\t.removeClass(this._options.uncheckedIcon)\n\t\t\t\t\t.removeClass(this._options.checkedIcon)\n\t\t\t\t\t.addClass(this._options.partiallyCheckedIcon);\n\t\t\t}\n\n\t\t\t// Optionally trigger event, partially checked is technically unchecked\n\t\t\tthis._triggerEvent('nodeUnchecked', node, options);\n\t\t} else {\n\n\t\t\t// Set node state to unchecked\n\t\t\tnode.state.checked = false;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.removeClass('node-checked node-checked-partial');\n\t\t\t\tnode.$el.children('span.check-icon')\n\t\t\t\t\t.removeClass(this._options.checkedIcon)\n\t\t\t\t\t.removeClass(this._options.partiallyCheckedIcon)\n\t\t\t\t\t.addClass(this._options.uncheckedIcon);\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeUnchecked', node, options);\n\t\t}\n\t};\n\n\tTree.prototype._setDisabled = function (node, state, options) {\n\n\t\t// We never pass options when rendering, so the only time\n\t\t// we need to validate state is from user interaction\n\t\tif (options && state === node.state.disabled) return;\n\n\t\tif (state) {\n\n\t\t\t// Set node state to disabled\n\t\t\tnode.state.disabled = true;\n\n\t\t\t// Disable all other states\n\t\t\tif (options && !options.keepState) {\n\t\t\t\tthis._setSelected(node, false, options);\n\t\t\t\tthis._setChecked(node, false, options);\n\t\t\t\tthis._setExpanded(node, false, options);\n\t\t\t}\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-disabled');\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeDisabled', node, options);\n\t\t}\n\t\telse {\n\n\t\t\t// Set node state to enabled\n\t\t\tnode.state.disabled = false;\n\n\t\t\t// Set element\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.removeClass('node-disabled');\n\t\t\t}\n\n\t\t\t// Optionally trigger event\n\t\t\tthis._triggerEvent('nodeEnabled', node, options);\n\t\t}\n\t};\n\n\tTree.prototype._setSearchResult = function (node, state, options) {\n\t\tif (options && state === node.searchResult) return;\n\n\t\tif (state) {\n\n\t\t\tnode.searchResult = true;\n\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.addClass('node-result');\n\t\t\t}\n\t\t}\n\t\telse {\n\n\t\t\tnode.searchResult = false;\n\n\t\t\tif (node.$el) {\n\t\t\t\tnode.$el.removeClass('node-result');\n\t\t\t}\n\t\t}\n\t};\n\n\tTree.prototype._render = function () {\n\t\tif (!this._initialized) {\n\n\t\t\t// Setup first time only components\n\t\t\tthis.$wrapper = this._template.tree.clone();\n\t\t\tthis.$element.empty()\n\t\t\t\t.addClass(pluginName)\n\t\t\t\t.append(this.$wrapper);\n\n\t\t\tthis._injectStyle();\n\n\t\t\tthis._initialized = true;\n\t\t}\n\n\t\tvar previousNode;\n\t\t$.each(this._orderedNodes, $.proxy(function (id, node) {\n\t\t\tthis._renderNode(node, previousNode);\n\t\t\tpreviousNode = node;\n\t\t}, this));\n\n\t\tthis._triggerEvent('rendered', this._orderedNodes, _default.options);\n\t};\n\n\tTree.prototype._renderNode = function (node, previousNode) {\n\t\tif (!node) return;\n\n\t\tif (!node.$el) {\n\t\t\tnode.$el = this._newNodeEl(node, previousNode)\n\t\t\t\t.addClass('node-' + this._elementId);\n\t\t}\n\t\telse {\n\t\t\tnode.$el.empty();\n\t\t}\n\n\t\t// Append .classes to the node\n\t\tnode.$el.addClass(node.class);\n\n\t\t// Set the #id of the node if specified\n\t\tif (node.id) {\n\t\t\tnode.$el.attr('id', node.id);\n\t\t}\n\n\t\t// Append custom data- attributes to the node\n\t\tif (node.dataAttr) {\n\t\t\t$.each(node.dataAttr, function (key, value) {\n\t\t\t\tnode.$el.attr('data-' + key, value);\n\t\t\t});\n\t\t}\n\n\t\t// Set / update nodeid; it can change as a result of addNode etc.\n\t\tnode.$el.attr('data-nodeId', node.nodeId);\n\n\t\t// Set the tooltip attribute if present\n\t\tif (node.tooltip) {\n\t\t\tnode.$el.attr('title', node.tooltip);\n\t\t}\n\n\t\t// Add indent/spacer to mimic tree structure\n\t\tfor (var i = 0; i < (node.level - 1); i++) {\n\t\t\tnode.$el.append(this._template.indent.clone());\n\t\t}\n\n\t\t// Add expand / collapse or empty spacer icons\n\t\tnode.$el\n\t\t\t.append(\n\t\t\t\tnode.nodes || node.lazyLoad ? this._template.icon.expand.clone() : this._template.icon.empty.clone()\n\t\t\t);\n\n\t\t// Add checkbox and node icons\n\t\tif (this._options.checkboxFirst) {\n\t\t\tthis._addCheckbox(node);\n\t\t\tthis._addIcon(node);\n\t\t\tthis._addImage(node);\n\t\t} else {\n\t\t\tthis._addIcon(node);\n\t\t\tthis._addImage(node);\n\t\t\tthis._addCheckbox(node);\n\t\t}\n\n\t\t// Add text\n\t\tif (this._options.wrapNodeText) {\n\t\t\tvar wrapper = this._template.text.clone();\n\t\t\tnode.$el.append(wrapper);\n\t\t\twrapper.append(node.text);\n\t\t} else {\n\t\t\tnode.$el.append(node.text);\n\t\t}\n\n\t\t// Add tags as badges\n\t\tif (this._options.showTags && node.tags) {\n\t\t\t$.each(node.tags, $.proxy(function addTag(id, tag) {\n\t\t\t\tnode.$el\n\t\t\t\t\t.append(this._template.badge.clone()\n\t\t\t\t\t\t.addClass(\n\t\t\t\t\t\t\t(typeof tag === 'object' ? tag.class : undefined)\n\t\t\t\t\t\t\t|| node.tagsClass\n\t\t\t\t\t\t\t|| this._options.tagsClass\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.append(\n\t\t\t\t\t\t\t(typeof tag === 'object' ? tag.text : undefined)\n\t\t\t\t\t\t\t|| tag\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t}, this));\n\t\t}\n\n\t\t// Set various node states\n\t\tthis._setSelected(node, node.state.selected);\n\t\tthis._setChecked(node, node.state.checked);\n\t\tthis._setSearchResult(node, node.searchResult);\n\t\tthis._setExpanded(node, node.state.expanded);\n\t\tthis._setDisabled(node, node.state.disabled);\n\t\tthis._setVisible(node, node.state.visible);\n\n\t\t// Trigger nodeRendered event\n\t\tthis._triggerEvent('nodeRendered', node, _default.options);\n\t};\n\n\t// Add checkable icon\n\tTree.prototype._addCheckbox = function (node) {\n\t\tif (this._options.showCheckbox && (node.hideCheckbox === undefined || node.hideCheckbox === false)) {\n\t\t\tnode.$el\n\t\t\t\t.append(this._template.icon.check.clone());\n\t\t}\n\t}\n\n\t// Add node icon\n\tTree.prototype._addIcon = function (node) {\n\t\tif (this._options.showIcon && !(this._options.showImage && node.image)) {\n\t\t\tnode.$el\n\t\t\t\t.append(this._template.icon.node.clone()\n\t\t\t\t\t.addClass(node.icon || this._options.nodeIcon)\n\t\t\t\t);\n\t\t}\n\t}\n\n\tTree.prototype._addImage = function (node) {\n \t\tif (this._options.showImage && node.image) {\n \t\t\tnode.$el\n \t\t\t\t.append(this._template.image.clone()\n \t\t\t\t\t.addClass('node-image')\n \t\t\t\t\t.css('background-image', \"url('\" + node.image + \"')\")\n \t\t\t\t);\n \t\t}\n \t}\n\n\t// Creates a new node element from template and\n\t// ensures the template is inserted at the correct position\n\tTree.prototype._newNodeEl = function (node, previousNode) {\n\t\tvar $el = this._template.node.clone();\n\n\t\tif (previousNode) {\n\t\t\t// typical usage, as nodes are rendered in\n\t\t\t// sort order we add after the previous element\n\t\t\tpreviousNode.$el.after($el);\n\t\t} else {\n\t\t\t// we use prepend instead of append,\n\t\t\t// to cater for root inserts i.e. nodeId 0.0\n\t\t\tthis.$wrapper.prepend($el);\n\t\t}\n\n\t\treturn $el;\n\t};\n\n\t// Recursively remove node elements from DOM\n\tTree.prototype._removeNodeEl = function (node) {\n\t\tif (!node) return;\n\n\t\tif (node.nodes) {\n\t\t\t$.each(node.nodes, $.proxy(function (index, node) {\n\t\t\t\tthis._removeNodeEl(node);\n\t\t\t}, this));\n\t\t}\n\t\tnode.$el.remove();\n\t};\n\n\t// Expand node, rendering it's immediate children\n\tTree.prototype._expandNode = function (node) {\n\t\tif (!node.nodes) return;\n\n\t\t$.each(node.nodes.slice(0).reverse(), $.proxy(function (index, childNode) {\n\t\t\tchildNode.level = node.level + 1;\n\t\t\tthis._renderNode(childNode, node.$el);\n\t\t}, this));\n\t};\n\n\t// Add inline style into head\n\tTree.prototype._injectStyle = function () {\n\t\tif (this._options.injectStyle && !document.getElementById(this._styleId)) {\n\t\t\t$('<style type=\"text/css\" id=\"' + this._styleId + '\"> ' + this._buildStyle() + ' </style>').appendTo('head');\n\t\t}\n\t};\n\n\t// Construct trees style based on user options\n\tTree.prototype._buildStyle = function () {\n\t\tvar style = '.node-' + this._elementId + '{';\n\n\t\t// Basic bootstrap style overrides\n\t\tif (this._options.color) {\n\t\t\tstyle += 'color:' + this._options.color + ';';\n\t\t}\n\n\t\tif (this._options.backColor) {\n\t\t\tstyle += 'background-color:' + this._options.backColor + ';';\n\t\t}\n\n\t\tif (!this._options.showBorder) {\n\t\t\tstyle += 'border:none;';\n\t\t}\n\t\telse if (this._options.borderColor) {\n\t\t\tstyle += 'border:1px solid ' + this._options.borderColor + ';';\n\t\t}\n\t\tstyle += '}';\n\n\t\tif (this._options.onhoverColor) {\n\t\t\tstyle += '.node-' + this._elementId + ':not(.node-disabled):hover{' +\n\t\t\t\t'background-color:' + this._options.onhoverColor + ';' +\n\t\t\t'}';\n\t\t}\n\n\t\t// Style search results\n\t\tif (this._options.highlightSearchResults && (this._options.searchResultColor || this._options.searchResultBackColor)) {\n\n\t\t\tvar innerStyle = ''\n\t\t\tif (this._options.searchResultColor) {\n\t\t\t\tinnerStyle += 'color:' + this._options.searchResultColor + ';';\n\t\t\t}\n\t\t\tif (this._options.searchResultBackColor) {\n\t\t\t\tinnerStyle += 'background-color:' + this._options.searchResultBackColor + ';';\n\t\t\t}\n\n\t\t\tstyle += '.node-' + this._elementId + '.node-result{' + innerStyle + '}';\n\t\t\tstyle += '.node-' + this._elementId + '.node-result:hover{' + innerStyle + '}';\n\t\t}\n\n\t\t// Style selected nodes\n\t\tif (this._options.highlightSelected && (this._options.selectedColor || this._options.selectedBackColor)) {\n\n\t\t\tvar innerStyle = ''\n\t\t\tif (this._options.selectedColor) {\n\t\t\t\tinnerStyle += 'color:' + this._options.selectedColor + ';';\n\t\t\t}\n\t\t\tif (this._options.selectedBackColor) {\n\t\t\t\tinnerStyle += 'background-color:' + this._options.selectedBackColor + ';';\n\t\t\t}\n\n\t\t\tstyle += '.node-' + this._elementId + '.node-selected{' + innerStyle + '}';\n\t\t\tstyle += '.node-' + this._elementId + '.node-selected:hover{' + innerStyle + '}';\n\t\t}\n\n\t\t// Style changed nodes\n\t\tif (this._options.highlightChanges) {\n\t\t\tvar innerStyle = 'color: ' + this._options.changedNodeColor + ';';\n\t\t\tstyle += '.node-' + this._elementId + '.node-check-changed{' + innerStyle + '}';\n\t\t}\n\n\t\t// Node level style overrides\n\t\t$.each(this._orderedNodes, $.proxy(function (index, node) {\n\t\t\tif (node.color || node.backColor) {\n\t\t\t\tvar innerStyle = '';\n\t\t\t\tif (node.color) {\n\t\t\t\t\tinnerStyle += 'color:' + node.color + ';';\n\t\t\t\t}\n\t\t\t\tif (node.backColor) {\n\t\t\t\t\tinnerStyle += 'background-color:' + node.backColor + ';';\n\t\t\t\t}\n\t\t\t\tstyle += '.node-' + this._elementId + '[data-nodeId=\"' + node.nodeId + '\"]{' + innerStyle + '}';\n\t\t\t}\n\n\t\t\tif (node.iconColor) {\n\t\t\t\tvar innerStyle = 'color:' + node.iconColor + ';';\n\t\t\t\tstyle += '.node-' + this._elementId + '[data-nodeId=\"' + node.nodeId + '\"] .node-icon{' + innerStyle + '}';\n\t\t\t}\n\t\t}, this));\n\n\t\treturn this._css + style;\n\t};\n\n\tTree.prototype._template = {\n\t\ttree: $('<ul class=\"list-group\"></ul>'),\n\t\tnode: $('<li class=\"list-group-item\"></li>'),\n\t\tindent: $('<span class=\"indent\"></span>'),\n\t\ticon: {\n\t\t\tnode: $('<span class=\"icon node-icon\"></span>'),\n\t\t\texpand: $('<span class=\"icon expand-icon\"></span>'),\n\t\t\tcheck: $('<span class=\"icon check-icon\"></span>'),\n\t\t\tempty: $('<span class=\"icon\"></span>')\n\t\t},\n\t\timage: $('<span class=\"image\"></span>'),\n\t\tbadge: $('<span></span>'),\n\t\ttext: $('<span class=\"text\"></span>')\n\t};\n\n\tTree.prototype._css = '.treeview .list-group-item{cursor:pointer}.treeview span.indent{margin-left:10px;margin-right:10px}.treeview span.icon{width:12px;margin-right:5px}.treeview .node-disabled{color:silver;cursor:not-allowed}'\n\n\n\t/**\n\t\tReturns an array of matching node objects.\n\t\t@param {String} pattern - A pattern to match against a given field\n\t\t@return {String} field - Field to query pattern against\n\t*/\n\tTree.prototype.findNodes = function (pattern, field) {\n\t\treturn this._findNodes(pattern, field);\n\t};\n\n\n\t/**\n\t\tReturns an ordered aarray of node objects.\n\t\t@return {Array} nodes - An array of all nodes\n\t*/\n\tTree.prototype.getNodes = function () {\n\t\treturn this._orderedNodes;\n\t};\n\n\t/**\n\t\tReturns parent nodes for given nodes, if valid otherwise returns undefined.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@returns {Array} nodes - An array of parent nodes\n\t*/\n\tTree.prototype.getParents = function (nodes) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\tvar parentNodes = [];\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tvar parentNode = node.parentId ? this._nodes[node.parentId] : false;\n\t\t\tif (parentNode) {\n\t\t\t\tparentNodes.push(parentNode);\n\t\t\t}\n\t\t}, this));\n\t\treturn parentNodes;\n\t};\n\n\t/**\n\t\tReturns an array of sibling nodes for given nodes, if valid otherwise returns undefined.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@returns {Array} nodes - An array of sibling nodes\n\t*/\n\tTree.prototype.getSiblings = function (nodes) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\tvar siblingNodes = [];\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tvar parent = this.getParents([node]);\n\t\t\tvar nodes = parent[0] ? parent[0].nodes : this._tree;\n\t\t\tsiblingNodes = nodes.filter(function (obj) {\n\t\t\t\treturn obj.nodeId !== node.nodeId;\n\t\t\t});\n\t\t}, this));\n\n\t\t// flatten possible nested array before returning\n\t\treturn $.map(siblingNodes, function (obj) {\n\t\t\treturn obj;\n\t\t});\n\t};\n\n\t/**\n\t\tReturns an array of selected nodes.\n\t\t@returns {Array} nodes - Selected nodes\n\t*/\n\tTree.prototype.getSelected = function () {\n\t\treturn this._findNodes('true', 'state.selected');\n\t};\n\n\t/**\n\t\tReturns an array of unselected nodes.\n\t\t@returns {Array} nodes - Unselected nodes\n\t*/\n\tTree.prototype.getUnselected = function () {\n\t\treturn this._findNodes('false', 'state.selected');\n\t};\n\n\t/**\n\t\tReturns an array of expanded nodes.\n\t\t@returns {Array} nodes - Expanded nodes\n\t*/\n\tTree.prototype.getExpanded = function () {\n\t\treturn this._findNodes('true', 'state.expanded');\n\t};\n\n\t/**\n\t\tReturns an array of collapsed nodes.\n\t\t@returns {Array} nodes - Collapsed nodes\n\t*/\n\tTree.prototype.getCollapsed = function () {\n\t\treturn this._findNodes('false', 'state.expanded');\n\t};\n\n\t/**\n\t\tReturns an array of checked nodes.\n\t\t@returns {Array} nodes - Checked nodes\n\t*/\n\tTree.prototype.getChecked = function () {\n\t\treturn this._findNodes('true', 'state.checked');\n\t};\n\n\t/**\n\t\tReturns an array of unchecked nodes.\n\t\t@returns {Array} nodes - Unchecked nodes\n\t*/\n\tTree.prototype.getUnchecked = function () {\n\t\treturn this._findNodes('false', 'state.checked');\n\t};\n\n\t/**\n\t\tReturns an array of disabled nodes.\n\t\t@returns {Array} nodes - Disabled nodes\n\t*/\n\tTree.prototype.getDisabled = function () {\n\t\treturn this._findNodes('true', 'state.disabled');\n\t};\n\n\t/**\n\t\tReturns an array of enabled nodes.\n\t\t@returns {Array} nodes - Enabled nodes\n\t*/\n\tTree.prototype.getEnabled = function () {\n\t\treturn this._findNodes('false', 'state.disabled');\n\t};\n\n\n\t/**\n\t \tAdd nodes to the tree.\n\t\t@param {Array} nodes  - An array of nodes to add\n\t\t@param {optional Object} parentNode  - The node to which nodes will be added as children\n\t\t@param {optional number} index  - Zero based insert index\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.addNode = function (nodes, parentNode, index, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\tif (parentNode instanceof Array) {\n\t\t\tparentNode = parentNode[0];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t// identify target nodes; either the tree's root or a parent's child nodes\n\t\tvar targetNodes;\n\t\tif (parentNode && parentNode.nodes) {\n\t\t\ttargetNodes = parentNode.nodes;\n\t\t} else if (parentNode) {\n\t\t\ttargetNodes = parentNode.nodes = [];\n\t\t} else {\n\t\t\ttargetNodes = this._tree;\n\t\t}\n\n\t\t// inserting nodes at specified positions\n\t\t$.each(nodes, $.proxy(function (i, node) {\n\t\t\tvar insertIndex = (typeof(index) === 'number') ? (index + i) : (targetNodes.length + 1);\n\t\t\ttargetNodes.splice(insertIndex, 0, node);\n\t\t}, this));\n\n\t\t// initialize new state and render changes\n\t\tthis._setInitialStates({nodes: this._tree}, 0)\n\t\t\t.done($.proxy(function () {\n\t\t\t\tif (parentNode && !parentNode.state.expanded) {\n\t\t\t\t\tthis._setExpanded(parentNode, true, options);\n\t\t\t\t}\n\t\t\t\tthis._render();\n\t\t\t}, this));\n\t}\n\n\t/**\n\t \tAdd nodes to the tree after given node.\n\t\t@param {Array} nodes  - An array of nodes to add\n\t\t@param {Object} node  - The node to which nodes will be added after\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.addNodeAfter = function (nodes, node, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\tif (node instanceof Array) {\n\t\t\tnode = node[0];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tthis.addNode(nodes, this.getParents(node)[0], (node.index + 1), options);\n\t}\n\n\t/**\n\t \tAdd nodes to the tree before given node.\n\t\t@param {Array} nodes  - An array of nodes to add\n\t\t@param {Object} node  - The node to which nodes will be added before\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.addNodeBefore = function (nodes, node, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\tif (node instanceof Array) {\n\t\t\tnode = node[0];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tthis.addNode(nodes, this.getParents(node)[0], node.index, options);\n\t}\n\n\t/**\n\t \tRemoves given nodes from the tree.\n\t\t@param {Array} nodes  - An array of nodes to remove\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.removeNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tvar targetNodes, parentNode;\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\n\t\t\t// remove nodes from tree\n\t\t\tparentNode = this._nodes[node.parentId];\n\t\t\tif (parentNode) {\n\t\t\t\ttargetNodes = parentNode.nodes;\n\t\t\t} else {\n\t\t\t\ttargetNodes = this._tree;\n\t\t\t}\n\t\t\ttargetNodes.splice(node.index, 1);\n\n\t\t\t// remove node from DOM\n\t\t\tthis._removeNodeEl(node);\n\t\t}, this));\n\n\t\t// initialize new state and render changes\n\t\tthis._setInitialStates({nodes: this._tree}, 0)\n\t\t\t.done(this._render.bind(this));\n\t};\n\n\t/**\n\t \tUpdates / replaces a given tree node\n\t\t@param {Object} node  - A single node to be replaced\n\t\t@param {Object} newNode  - THe replacement node\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.updateNode = function (node, newNode, options) {\n\t\tif (node instanceof Array) {\n\t\t\tnode = node[0];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t// insert new node\n\t\tvar targetNodes;\n\t\tvar parentNode = this._nodes[node.parentId];\n\t\tif (parentNode) {\n\t\t\ttargetNodes = parentNode.nodes;\n\t\t} else {\n\t\t\ttargetNodes = this._tree;\n\t\t}\n\t\ttargetNodes.splice(node.index, 1, newNode);\n\n\t\t// remove old node from DOM\n\t\tthis._removeNodeEl(node);\n\n\t\t// initialize new state and render changes\n\t\tthis._setInitialStates({nodes: this._tree}, 0)\n\t\t\t.done(this._render.bind(this));\n\t};\n\n\n\t/**\n\t\tSelects given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.selectNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setSelected(node, true, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tUnselects given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.unselectNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setSelected(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tToggles a node selected state; selecting if unselected, unselecting if selected.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.toggleNodeSelected = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._toggleSelected(node, options);\n\t\t}, this));\n\t};\n\n\n\t/**\n\t\tCollapse all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.collapseAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\t\toptions.levels = options.levels || 999;\n\t\tthis.collapseNode(this._tree, options);\n\t};\n\n\t/**\n\t\tCollapse a given tree node\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.collapseNode = function (nodes, options) {\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setExpanded(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tExpand all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.expandAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\t\toptions.levels = options.levels || 999;\n\t\tthis.expandNode(this._tree, options);\n\t};\n\n\t/**\n\t\tExpand given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.expandNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\t// Do not re-expand already expanded nodes\n\t\t\tif (node.state.expanded) return;\n\n\t\t\tif (typeof(this._options.lazyLoad) === 'function' && node.lazyLoad) {\n\t\t\t\tthis._lazyLoad(node);\n\t\t\t}\n\n\t\t\tthis._setExpanded(node, true, options);\n\t\t\tif (node.nodes) {\n\t\t\t\tthis._expandLevels(node.nodes, options.levels-1, options);\n\t\t\t}\n\t\t}, this));\n\t};\n\n\tTree.prototype._expandLevels = function (nodes, level, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setExpanded(node, (level > 0) ? true : false, options);\n\t\t\tif (node.nodes) {\n\t\t\t\tthis._expandLevels(node.nodes, level-1, options);\n\t\t\t}\n\t\t}, this));\n\t};\n\n\t/**\n\t\tReveals given tree nodes, expanding the tree from node to root.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.revealNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tvar parentNode = node;\n\t\t\tvar tmpNode;\n\t\t\twhile (tmpNode = this.getParents([parentNode])[0]) {\n\t\t\t\tparentNode = tmpNode;\n\t\t\t\tthis._setExpanded(parentNode, true, options);\n\t\t\t};\n\t\t}, this));\n\t};\n\n\t/**\n\t\tToggles a node's expanded state; collapsing if expanded, expanding if collapsed.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.toggleNodeExpanded = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._toggleExpanded(node, options);\n\t\t}, this));\n\t};\n\n\n\t/**\n\t\tCheck all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.checkAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tvar nodes = $.grep(this._orderedNodes, function (node) {\n\t\t\treturn !node.state.checked;\n\t\t});\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setChecked(node, true, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tChecks given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.checkNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setChecked(node, true, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tUncheck all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.uncheckAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tvar nodes = $.grep(this._orderedNodes, function (node) {\n\t\t\treturn node.state.checked || node.state.checked === undefined;\n\t\t});\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setChecked(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tUncheck given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.uncheckNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setChecked(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tToggles a node's checked state; checking if unchecked, unchecking if checked.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.toggleNodeChecked = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._toggleChecked(node, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tSaves the current state of checkboxes as default, cleaning up any highlighted changes\n\t*/\n\tTree.prototype.unmarkCheckboxChanges = function () {\n\t\tthis._inheritCheckboxChanges();\n\n\t\t$.each(this._nodes, function (index, node) {\n\t\t\tnode.$el.removeClass('node-check-changed');\n\t\t});\n\t};\n\n\t/**\n\t\tDisable all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.disableAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tvar nodes = this._findNodes('false', 'state.disabled');\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setDisabled(node, true, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tDisable given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.disableNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setDisabled(node, true, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tEnable all tree nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.enableAll = function (options) {\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\tvar nodes = this._findNodes('true', 'state.disabled');\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setDisabled(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tEnable given tree nodes\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.enableNode = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setDisabled(node, false, options);\n\t\t}, this));\n\t};\n\n\t/**\n\t\tToggles a node's disabled state; disabling is enabled, enabling if disabled.\n\t\t@param {Array} nodes - An array of nodes\n\t\t@param {optional Object} options\n\t*/\n\tTree.prototype.toggleNodeDisabled = function (nodes, options) {\n\t\tif (!(nodes instanceof Array)) {\n\t\t\tnodes = [nodes];\n\t\t}\n\n\t\toptions = $.extend({}, _default.options, options);\n\n\t\t$.each(nodes, $.proxy(function (index, node) {\n\t\t\tthis._setDisabled(node, !node.state.disabled, options);\n\t\t}, this));\n\t};\n\n\n\t/**\n\t\tSearches the tree for nodes (text) that match given criteria\n\t\t@param {String} pattern - A given string to match against\n\t\t@param {optional Object} options - Search criteria options\n\t\t@return {Array} nodes - Matching nodes\n\t*/\n\tTree.prototype.search = function (pattern, options) {\n\t\toptions = $.extend({}, _default.searchOptions, options);\n\n\t\tvar previous = this._getSearchResults();\n\t\tvar results = [];\n\n\t\tif (pattern && pattern.length > 0) {\n\n\t\t\tif (options.exactMatch) {\n\t\t\t\tpattern = '^' + pattern + '$';\n\t\t\t}\n\n\t\t\tvar modifier = 'g';\n\t\t\tif (options.ignoreCase) {\n\t\t\t\tmodifier += 'i';\n\t\t\t}\n\n\t\t\tresults = this._findNodes(pattern, 'text', modifier);\n\t\t}\n\n\t\t// Clear previous results no longer matched\n\t\t$.each(this._diffArray(results, previous), $.proxy(function (index, node) {\n\t\t\tthis._setSearchResult(node, false, options);\n\t\t}, this));\n\n\t\t// Set new results\n\t\t$.each(this._diffArray(previous, results), $.proxy(function (index, node) {\n\t\t\tthis._setSearchResult(node, true, options);\n\t\t}, this));\n\n\t\t// Reveal hidden nodes\n\t\tif (results && options.revealResults) {\n\t\t\tthis.revealNode(results);\n\t\t}\n\n\t\tthis._triggerEvent('searchComplete', results, options);\n\n\t\treturn results;\n\t};\n\n\t/**\n\t\tClears previous search results\n\t*/\n\tTree.prototype.clearSearch = function (options) {\n\t\toptions = $.extend({}, { render: true }, options);\n\n\t\tvar results = $.each(this._getSearchResults(), $.proxy(function (index, node) {\n\t\t\tthis._setSearchResult(node, false, options);\n\t\t}, this));\n\n\t\tthis._triggerEvent('searchCleared', results, options);\n\t};\n\n\tTree.prototype._getSearchResults = function () {\n\t\treturn this._findNodes('true', 'searchResult');\n\t};\n\n\tTree.prototype._diffArray = function (a, b) {\n\t\tvar diff = [];\n\t\t$.grep(b, function (n) {\n\t\t\tif ($.inArray(n, a) === -1) {\n\t\t\t\tdiff.push(n);\n\t\t\t}\n\t\t});\n\t\treturn diff;\n\t};\n\n\t/**\n\t\tFind nodes that match a given criteria\n\t\t@param {String} pattern - A given string to match against\n\t\t@param {optional String} attribute - Attribute to compare pattern against\n\t\t@param {optional String} modifier - Valid RegEx modifiers\n\t\t@return {Array} nodes - Nodes that match your criteria\n\t*/\n\tTree.prototype._findNodes = function (pattern, attribute, modifier) {\n\t\tattribute = attribute || 'text';\n\t\tmodifier = modifier || 'g';\n\t\treturn $.grep(this._orderedNodes, $.proxy(function (node) {\n\t\t\tvar val = this._getNodeValue(node, attribute);\n\t\t\tif (typeof val === 'string') {\n\t\t\t\treturn val.match(new RegExp(pattern, modifier));\n\t\t\t}\n\t\t}, this));\n\t};\n\n\t/**\n\t\tRecursive find for retrieving nested attributes values\n\t\tAll values are return as strings, unless invalid\n\t\t@param {Object} obj - Typically a node, could be any object\n\t\t@param {String} attr - Identifies an object property using dot notation\n\t\t@return {String} value - Matching attributes string representation\n\t*/\n\tTree.prototype._getNodeValue = function (obj, attr) {\n\t\tvar index = attr.indexOf('.');\n\t\tif (index > 0) {\n\t\t\tvar _obj = obj[attr.substring(0, index)];\n\t\t\tvar _attr = attr.substring(index + 1, attr.length);\n\t\t\treturn this._getNodeValue(_obj, _attr);\n\t\t}\n\t\telse {\n\t\t\tif (obj.hasOwnProperty(attr) && obj[attr] !== undefined) {\n\t\t\t\treturn obj[attr].toString();\n\t\t\t}\n\t\t\telse {\n\t\t\t\treturn undefined;\n\t\t\t}\n\t\t}\n\t};\n\n\tvar logError = function (message) {\n\t\tif (window.console) {\n\t\t\twindow.console.error(message);\n\t\t}\n\t};\n\n\t// Prevent against multiple instantiations,\n\t// handle updates and method calls\n\t$.fn[pluginName] = function (options, args) {\n\n\t\tvar result;\n\t\tif (this.length == 0) {\n\t\t\tthrow \"No element has been found!\";\n\t\t}\n\n\t\tthis.each(function () {\n\t\t\tvar _this = $.data(this, pluginName);\n\t\t\tif (typeof options === 'string') {\n\t\t\t\tif (!_this) {\n\t\t\t\t\tlogError('Not initialized, can not call method : ' + options);\n\t\t\t\t}\n\t\t\t\telse if (!$.isFunction(_this[options]) || options.charAt(0) === '_') {\n\t\t\t\t\tlogError('No such method : ' + options);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tif (!(args instanceof Array)) {\n\t\t\t\t\t\targs = [ args ];\n\t\t\t\t\t}\n\t\t\t\t\tresult = _this[options].apply(_this, args);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (typeof options === 'boolean') {\n\t\t\t\tresult = _this;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t$.data(this, pluginName, new Tree(this, $.extend(true, {}, options)));\n\t\t\t}\n\t\t});\n\n\t\treturn result || this;\n\t};\n\n})(jQuery, window, document);\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/browser/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * Browser V0.1\n * Just browse your file system\n *\n */\n\nclass browser extends Plugin {\n    public $Name = \"Browser\";\n    public $Description = \"Browser\";\n    public $Version = \"V0.1\";\n\n    /*\n     * Plugin options\n     */\n\n    public function init() {\n    }\n\n    /* Called from UI menu */\n    function ui_header($param) {\n            $newparam = '<link rel=\"stylesheet\" href=\"plugins/browser/css/bootstrap-treeview.css\">'.\"\\n\";\n            $newparam.= '<script src=\"plugins/browser/js/bootstrap-treeview.js\"></script>'.\"\\n\";\n            $newparam.= '<script src=\"plugins/browser/script.js\"></script>'.\"\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_top($param) {\n            $newparam = \"<li><a href='#' onclick='browser_GetInfo()'>Browser</a></li>\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/browser/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/***********************************\n * Build table and get data from API\n * Use lazyload\n **********************************/\nfunction browser_GetInfo(){\n\n        /* Create the table template */\n        document.getElementById(\"main_content\").innerHTML = `\n                <h1>Browser</h1>\n                <div id=\"tree\"></div>\n                `;\n\n\n        /* Retrieve the fid of the root */\n        $.ajax({\n                url: \"api/index.php?request=native/vars\"\n        }).then(function(data) {\n                var RootId=data['RootId'];\n                var tree = [{\n                        text: \"/\",\n                nodes: [],\n                id: data['RootId'],\n                lazyLoad: true,\n\n                }];\n                $('#tree').treeview({\n                        data: tree,\n                        showTags: true,\n                        lazyLoad: function(node, loader) {\n\n                                $.ajax({\n                                        url: \"api/index.php?request=native/files/parent_id.filter/\" + node.id\n                                }).then(function(data) {\n                                        nodes=[];\n\n                                        for (var key in data) {\n                                                tnode={}\n                                                tnode['text']=data[key].name;\n                                                tnode['id']=data[key].id;\n                                                tnode['tags']=[]\n                                                tnode['tags'].push(data[key].uid+\" : \" + data[key].gid + \" : \" + parseInt(data[key].mode,10).toString(8));\n                                                if (data[key]['type']==\"dir\") {\n                                                        tnode['icon']=\"glyphicon glyphicon-folder-close\";\n                                                        tnode['lazyLoad']=true;\n                                                        tnode['showTags']=true;\n                                                        /* You can add more tags for folders here */\n                                                } else {\n                                                        tnode['icon']=\"glyphicon glyphicon-open-file\";\n                                                        tnode['tags'].push(formatCount(data[key].blocks + \"Blks\"));\n                                                        tnode['tags'].push(formatBytes(data[key].size));\n                                                        /* You can add more tags for files here */\n\n                                                }\n\n                                                nodes.push(tnode);\n                                        }\n                                        loader(nodes);\n                                });\n\n                        }, /* end of lazyload */\n\n                    });\n        });\n\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/colorgraph/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * ColorGraph V0.1\n * Replace random colors\n *\n */\n\nclass colorgraph extends Plugin {\n    public $Name = \"Color Graph\";\n    public $Description = \"Clean Graph colors\";\n    public $Version = \"V0.1\";\n\n    public function graph_postdata_uid($param) {\n\n        $c_count = count($param['labels']);\n\n        for($i = 0; $i < $c_count; $i++) {\n\n            /* Fancy color mix */\n            $alt = $i%2;\n            $r = round(($i+1)*220/$c_count)+16;\n            $g = (240-$r)+16;\n            $b = 230*$alt+16;\n            $color = \"#\".dechex($r).dechex($g).dechex($b);\n\n            /* Replace the previous color */\n            $param['datasets'][0]['backgroundColor'][$i] = $color;\n            $param['datasets'][1]['backgroundColor'][$i] = $color;\n        }\n\n        return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/console/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * Console V0.1\n * Use the API to export data\n *\n */\n\nclass console extends Plugin {\n    public $Name = \"Console\";\n    public $Description = \"Use API to Export Data\";\n    public $Version = \"V0.1\";\n\n    private $output = false;\n\n    /* Called from UI menu */\n    function ui_header($param) {\n            $newparam = '<script src=\"plugins/console/script.js\"></script>'.\"\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_top($param) {\n            $newparam = \"<li><a href='#' onclick='console_GetInfo()'>Console</a></li>\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/console/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\nfunction ConsoleRun() {\n        var request = document.getElementById(\"consolerequest\").value;\n        $.ajax({\n                url: \"api/index.php?request=\" + request\n        }).then(function(data) {\n                defaultx = \"\"\n                document.getElementById(\"consoledata\").innerHTML=\"Number of result: \" + data.length;\n                if (Array.isArray(data)) {\n                        document.getElementById(\"consoledatabox\").value=JSON.stringify(data,null,2);\n                        graphx = document.getElementById(\"consolerequestgraphx\")\n                        graphy = document.getElementById(\"consolerequestgraphy\")\n                        graphs = document.getElementById(\"consolerequestseries\")\n                        while (graphx.options.length) graphx.remove(0);\n                        while (graphy.options.length) graphy.remove(0);\n                        while (graphs.options.length) graphs.remove(0);\n                        graphs.add(new Option(\"None\", \"None\"));\n                        for (var item in data[0]) {\n                                if (defaultx==\"\" || item==\"CronDate\") { \n                                    defaultx=item\n                                }\n                                graphx.add(new Option(item, item));\n                                graphy.add(new Option(item, item));\n                                graphs.add(new Option(item, item));\n                        }\n                graphx.value=defaultx\n\n                } else {\n                        document.getElementById(\"consoledatabox\").value=data;\n                }\n        });\n}\n\nfunction ConsoleGraphRun() {\n        document.getElementById(\"consolegraph\").innerHTML='<canvas style=\"max-height:640px; min-height:320px\" id=\"ctx\"></canvas> <!-- Canvas for Graph -->'\n\n        var request = document.getElementById(\"consolerequest\").value;\n        $.ajax({\n                url: \"api/index.php?request=\" + request\n        }).then(function(data) {\n\n        xitem = document.getElementById(\"consolerequestgraphx\").value;\n        yitem = document.getElementById(\"consolerequestgraphy\").value;\n        type = document.getElementById(\"consolerequestgraph\").value;\n        series = document.getElementById(\"consolerequestseries\").value;\n        mapping = document.getElementById(\"mapping\").value;\n        /* scatter graph */\n        if (type==\"scatter\") {\n\n        dataxy = []\n        for (var item in data)\n        {\n                dataxy.push({x: data[item][xitem], y: data[item][yitem]})\n        }\n\n       scatterChartData = {\n            datasets: [{\n                label: \"Dataset\",\n                borderColor: \"FF0000\",\n                backgroundColor: \"00FF00\",\n                data: dataxy,\n                }]};\n\n        var ctx = document.getElementById(\"ctx\").getContext(\"2d\");\n        window.myScatter = Chart.Scatter(ctx, {\n                data: scatterChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js Scatter Chart'\n                        },\n                }\n        });\n\n        }\n\n        /* line graph */\n        if (type==\"line\" || type==\"line stacked\" || type==\"bar\") {\n\n        if (series != 'None')\n        {\n \n        //series_label =  {}\n        series_title = {}\n        series_x = {}\n        series_index={}\n        for (var item in data)\n        {\n            series_title[data[item][series]]=\"\"\n            series_x[data[item][xitem]]=\"\"\n            key = data[item][series]+\"_\"+data[item][xitem]\n            series_index[key] = data[item][yitem]\n        }\n\n        labels = []\n        for (var t in series_x) \n        {\n            labels.push(t)\n        }\n\n        datasets=[]\n        for (var s in series_title)\n        {\n                datay=[]\n                for (var t in series_x) \n                {\n                    key = s+\"_\"+t\n                    if (key in series_index) {\n                        datay.push(series_index[key])\n                    } else {\n                        datay.push(0)\n                    }\n                }\n                \n                if (mapping != \"None\") {\n                    mapp = mapping.split(\",\");\n                    imapp = mapp.indexOf(s);\n                    \n                    if (imapp==-1) {\n                        label_s = s;\n                    } else {\n                        label_s = mapp[imapp+1];\n                    } \n                } else {\n                    label_s = s;\n                }\n                \n\n                if (type==\"line\") {\n                    dataset = {\n                        label: label_s,\n                        borderColor: stringToColour(s),\n                        backgroundColor: \"#00000000\",\n                        data: datay,\n                    }\n                } else {\n                    dataset = {\n                        label: label_s,\n                        borderColor: \"#000000\",\n                        backgroundColor: stringToColour(s),\n                        data: datay,\n                    }\n\n                }\n\n                datasets.push(dataset)\n        \n        }        \n\n       lineChartData = {\n            labels: labels,\n            datasets: datasets\n       };\n\n        } else {\n        datay = []\n        labels = []\n        for (var item in data)\n        {\n                datay.push(data[item][yitem])\n                labels.push(data[item][xitem])\n        }\n        \n\n       lineChartData = {\n            labels: labels,\n            datasets: [{\n                label: \"Dataset\",\n                borderColor: \"FF0000\",\n                backgroundColor: \"00FF00\",\n                data: datay,\n                }]};\n        }\n\n\n        var ctx = document.getElementById(\"ctx\").getContext(\"2d\");\n\n        if (type==\"line\") {\n        window.myBar = Chart.Line(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js line Chart'\n                        },\n                }\n        });\n        } else if (type==\"line stacked\") {\n        window.myBar = Chart.Line(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js line stacked Chart'\n                        },\n                    scales: {\n                        yAxes: [{stacked: true,}]\n                    },\n                }\n        });\n        } else if (type==\"bar\") {\n\n        window.mybar = Chart.Bar(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'chart.js bar chart'\n                        },\n                }\n        });\n\n\n        }\n\n\n        }\n\n        });\n}\n\nfunction console_GetInfo(request){\n\n        /* Create the table template */\n        document.getElementById(\"main_content\").innerHTML = `\n                <h1>Console</h1>\n                <h2>Request</h2>\n                <form id=\"consoleform\" name=\"consoleform\" class=\"form-inline\">\n                <fieldset class=\"form-group\">\n                <label for=\"interative\">Filter</label>\n                <input type=\"text\" class=\"form-control\" id=\"consolerequest\" name=\"consolerequest\" placeholder=\"request\" size=64>\n                </fieldset>\n                <button type=\"button\" id=\"Run\" class=\"btn btn-primary\"  autocomplete=\"off\" onclick=\"ConsoleRun()\">Run</button>\n                </form>\n                <h2>Data</h2>\n                <div name=\"consoledata\" id=\"consoledata\"></div>\n                <textarea class=\"consoledatabox\" name=\"consoledatabox\" id=\"consoledatabox\" rows=\"24\" style='width:100%;'>\n                </textarea>\n                <h2>Graph</h2>\n                <form id=\"consoleformgraph\" name=\"consoleformgraph\" class=\"form-inline\">\n                <fieldset class=\"form-group\">\n                 <div class=\"form-group\">\n                <label>Type</label>\n                <select  class=\"form-control\" id=\"consolerequestgraph\" name=\"consolerequestgraph\" placeholder=\"Type\">\n                    <option value=\"line\">Line</option>\n                    <option value=\"line stacked\">Line stacked</option>\n                    <option value=\"bar\">Bar</option>\n                    <option value=\"scatter\">Scatter</option>\n                </select>\n                </div>\n                 <div class=\"form-group\">\n                    <label>Label/X</label>\n                    <select  class=\"form-control\" id=\"consolerequestgraphx\" name=\"consolerequestgraphx\" placeholder=\"fieldx\" placeholder=\"Label/X\" ></select>\n                </div>\n                 <div class=\"form-group\">\n            <label>Values/Y</label>\n                    <select  class=\"form-control\" id=\"consolerequestgraphy\" name=\"consolerequestgraphy\" placeholder=\"fieldy\" placeholder=\"Y\"></select>\n                </div>\n                 <div class=\"form-group\">\n            <label>Series</label>\n                    <select  class=\"form-control\" id=\"consolerequestseries\" name=\"consolerequestseries\" placeholder=\"series\" placeholder=\"series\"></select>\n                </div>\n                 <div class=\"form-group\">\n                <button type=\"button\" id=\"RunGraph\" class=\"btn btn-primary\" autocomplete=\"off\" onclick=\"ConsoleGraphRun()\">Refresh</button>\n                <input type=\"text\" class=\"form-control\" id=\"graphname\" name=\"graphname\" placeholder=\"Name\" size=24>\n                <input type=\"text\" class=\"form-control\" id=\"mapping\" name=\"mapping\" placeholder=\"Mapping\" size=24>\n                <button type=\"button\" id=\"AddGraph\" class=\"btn btn-primary\" autocomplete=\"off\" onclick=\"AddCustomGraph()\">Add to custom graphs</button>\n                </div>\n                </fieldset>\n                </form>\n                <div name=\"consolegraph\" id=\"consolegraph\"></div>\n                <h2>Help</h2>\n                <h3>Request</h3>\n                <p>Example: native/acct/uid.group to get the stats by user.</p>\n                <p>Example: native/files/uid.filter/robin/ to get all the files and directories of robin.</p>\n                <p>Please refer to README.txt for more filter and request</p>\n                <h3>Graph</h3>\n                <p> Use series to automaticaly create differents dataset using with of the field </p>\n                <p> Mapping is a coma separed list to change dataset labels </p>\n                <p> Example: if you use groupbysize you can replace 0,1,2,... by B,KB,MB with 0,K,1,KB,2,MB, ...</p>\n                `;\n\n        if (request) {\n            document.getElementById(\"consolerequest\").value = request;\n            ConsoleRun();\n        }\n\n        //Add keypress event for filter form\n        $('#consoleform input').on('keypress', function(event){\n                if(event.key == \"Enter\" && !event.shiftKey){\n                        ConsoleRun();\n                        return false;\n                }\n        });\n\n}\n\n\nfunction AddCustomGraph(){\n\n    var queryString=\"\";\n    var myForm = document.getElementById(\"consoleformgraph\");\n    var myFormc = document.getElementById(\"consoleform\");\n    queryString = queryString + \"/\";\n    queryString = queryString + \"Name/\" + myForm.elements[\"graphname\"].value+ \"/\";\n    queryString = queryString + \"ChartType/\" + myForm.elements[\"consolerequestgraph\"].value+ \"/\";\n    queryString = queryString + \"Request/\" + myFormc.elements[\"consolerequest\"].value.replace(/\\//g, \"---\")+ \"/\";\n    queryString = queryString + \"X/\" + myForm.elements[\"consolerequestgraphx\"].value+ \"/\";\n    queryString = queryString + \"Y/\" + myForm.elements[\"consolerequestgraphy\"].value+ \"/\";\n    queryString = queryString + \"Series/\" + myForm.elements[\"consolerequestseries\"].value+ \"/\";\n    queryString = queryString + \"Mapping/\" + myForm.elements[\"mapping\"].value+ \"/\";\n   $.ajax({\n        url: \"api/index.php?request=native/customgraph/add\" + \"/\" + queryString\n    }).then(function(data) {\n    customgraph_GetInfo()\n    });\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/customgraph/help.html",
    "content": "<h1>Custom Graph</h1>\n<strong> Beta version </strong>\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/customgraph/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * Custom Graph V0.1\n * Create Custom Graphs\n *\n */\n\nclass customgraph extends Plugin {\n    public $Name = \"Custom Graph\";\n    public $Description = \"Manage Custom Graph\";\n    public $Version = \"V0.1\";\n\n\n    public $Req_table = array(\n            \"CUSTOMGRAPH\" => \"(Id INT NOT NULL AUTO_INCREMENT, Name VARCHAR(255), ChartType VARCHAR(255), Request VARCHAR(255), X VARCHAR(1024), Y VARCHAR(255), Series VARCHAR(255), Mapping VARCHAR(512), PRIMARY KEY(Id))\",\n            );\n\n    function init() {\n    }\n\n    /* Called from UI menu */\n    function ui_header($param) {\n        $newparam = '<script src=\"plugins/customgraph/script.js\"></script>'.\"\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_top($param) {\n        global $db;\n        if (!check_access(\"customgraph\")) {\n            return $param;\n        }\n        $newparam = \"<li><a href='#' onclick='customgraph_GetInfo()'>Custom Graph</a></li>\\n\";\n        $param = $param.$newparam;\n        $confdb = getDB(\"config\")[0];\n        $req = $db[$confdb]->prepare(\"SELECT * FROM CUSTOMGRAPH;\");\n        $req->execute();\n        $data = $req->fetchall(PDO::FETCH_ASSOC);\n        foreach ($data as $v) {\n            $newparam = \"<li><a href='#' onclick='customgraph_GetGraph(\\\"\".$v['Id'].\"\\\")'>*\".$v['Name'].\"</a></li>\\n\";\n            $param = $param.$newparam;\n        }\n        return $param;\n    }\n\n    /* Called from api just processing data */\n    function api_native($param) {\n        global $db;\n        global $CURRENT_DB;\n        if (!check_access(\"customgraph\")) {\n            return \"Permission denied\";\n        }\n        if ($param[0] == \"customgraph\" && $param[1][0]== \"get\") {\n            $confdb = getDB(\"config\")[0];\n            $req = $db[$confdb]->prepare(\"SELECT * FROM CUSTOMGRAPH;\");\n            $req->execute();\n            $data = $req->fetchall(PDO::FETCH_ASSOC);\n            return $data;\n        }\n        if ($param[0] == \"customgraph\" && $param[1][0]== \"add\") {\n            $confdb = getDB(\"config\")[0];\n\n            $keys = Array();\n            $vals = Array();\n            $flip = True;\n            $last_item=\"\";\n            foreach (array_slice($param[1],2) as $item) {\n                if ($flip == True) {\n                    $keys[]=\":\".$item;\n                    $last_item=$item;\n                    $flip=False;\n                } else {\n                    $vals[\":\".$last_item]=str_replace(\"---\",\"/\",$item);\n                    $flip=True;\n                }\n            }\n\n            $request = \"INSERT INTO CUSTOMGRAPH ( Id,Name, ChartType, Request, X, Y, Series, Mapping) VALUES ('', \".join(\",\",$keys).\");\";\n            $req = $db[$confdb]->prepare($request);\n            $req->execute($vals);\n        }\n        if ($param[0] == \"customgraph\" && $param[1][0]== \"del\") {\n            $confdb = getDB(\"config\")[0];\n            $request = \"DELETE FROM CUSTOMGRAPH WHERE Id = :Id\";\n            $vals = Array();\n            $vals[\":Id\"]=$param[1][1];\n            $req = $db[$confdb]->prepare($request);\n            $req->execute($vals);\n        }\n\n\n\n    }\n\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/customgraph/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/***********************************\n * Build table and get data from API\n **********************************/\nfunction customgraph_GetInfo(){\n\n/* Create the table template */\n    document.getElementById(\"main_content\").innerHTML = `\n<h1>Custom Graph</h1>\n <form id=\"graphform\" name=\"graphform\">\n <table class=\"table\">\n    <thead>\n      <tr>\n        <th>Id</th>\n        <th>Graph name</th>\n        <th>Chart type</th>\n        <th>Request</th>\n        <th>X</th>\n        <th>Y</th>\n        <th>Series</th>\n        <th>Mapping</th>\n        <th>Action</th>\n      </tr>\n    </thead>\n    <tbody id=fsinfobody name=fsinfobody>\n    </tbody>\n  </table>\n  </form>\n `;\n\n\n/* Retrieve the data */\n    $.ajax({\n        url: \"api/index.php?request=native/customgraph/get\"\n    }).then(function(data) {\n    if (data[0]!='customgraph') {\n\t    for (var key in data) {\n\n\t$(\"#fsinfobody\").append(`\n\t      <tr>\n\t\t<td>`+data[key]['Id']+` <a href='#' onclick='customgraph_GetGraph(\"`+data[key]['Id']+`\")'><span class=\"glyphicon glyphicon-signal\"></span></a> </td>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Name_`+data[key]['Id']+`\" name=\"Name_`+data[key]['Id']+`\" placeholder=\"Name\" value=`+data[key]['Name']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"ChartType_`+data[key]['Id']+`\" name=\"ChartType_`+data[key]['Id']+`\" placeholder=\"ChartType\" value=`+data[key]['ChartType']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Request_`+data[key]['Id']+`\" name=\"Request_`+data[key]['Id']+`\" placeholder=\"Request\" value=`+data[key]['Request']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"X_`+data[key]['Id']+`\" name=\"X_`+data[key]['Id']+`\" placeholder=\"X\" value=`+data[key]['X']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Y_`+data[key]['Id']+`\" name=\"Y_`+data[key]['Id']+`\" placeholder=\"Y\" value=`+data[key]['Y']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Series_`+data[key]['Id']+`\" name=\"Series_`+data[key]['Id']+`\" placeholder=\"Series\" value=`+data[key]['Series']+`>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Mapping_`+data[key]['Id']+`\" name=\"Mapping_`+data[key]['Id']+`\" placeholder=\"Mapping\" value=`+data[key]['Mapping']+`>\n\t\t</th>\n\t\t<th> \n            <button type=\"button\" class=\"btn btn-primary\" onclick=\"updateCustomGraph(`+data[key]['Id']+`)\">Modify</button>\n            <button type=\"button\" class=\"btn btn-primary\" onclick=\"delCustomGraph(`+data[key]['Id']+`)\">Delete</button>\n        </th>\n\t      </tr>\n\t`);\n    }\n    }\n$(\"#fsinfobody\").append(`\n      <tr>\n        <th>#</th>\n        <th>\n                <input type=\"text\" class=\"form-control\" id=\"Name\" name=\"Name\" placeholder=\"Name\">\n\t</th>\n        <th>\n                <input type=\"text\" class=\"form-control\" id=\"ChartType\" name=\"ChartType\" placeholder=\"ChartType\">\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"Request\" name=\"Request\" placeholder=\"Request\">\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"X\" name=\"X\" placeholder=\"X\">\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"Y\" name=\"Y\" placeholder=\"Y\">\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"Series\" name=\"Series\" placeholder=\"Series\">\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"Mapping\" name=\"Mapping\" placeholder=\"Mapping\">\n\t</th>\n        <th> <button type=\"button\" class=\"btn btn-primary\" onclick=\"setCustomGraph('')\">Add</button></th>\n      </tr>\n\n`);\n    });\n}\n\n/***********************************\n * Add or modify graphs\n **********************************/\n\nfunction setCustomGraph(item){\n\n\tvar queryString=\"/\";\n\tvar myForm = document.getElementById(\"graphform\");\n\tfor (var i = 0; i < myForm.elements.length; i++) {\n\t\tif (myForm.elements[i].name.length>0 && myForm.elements[i].name.search(\"_\")==-1) {\n            if (myForm.elements[i].value == \"\") {\n                myForm.elements[i].value=\"None\";\n            }\n\t\t\tqueryString = queryString + \"\" + myForm.elements[i].name + \"/\" + myForm.elements[i].value.replace(/\\//g, \"---\") + \"/\";\n\t\t}\n\t}\n   $.ajax({\n        url: \"api/index.php?request=native/customgraph/add\" + \"/\" + queryString\n    }).then(function(data) {\n\tcustomgraph_GetInfo()\n\t});\n}\n\n\nfunction updateCustomGraph(item){\n\n\nvar queryString=\"\";\n    var myForm = document.getElementById(\"graphform\");\n    for (var i = 0; i < myForm.elements.length; i++) {\n        if (myForm.elements[i].name.length>0 && myForm.elements[i].name.search(\"_\")==-1) {\n            if (myForm.elements[myForm.elements[i].name+\"_\"+item].value==\"\") {\n                myForm.elements[myForm.elements[i].name+\"_\"+item].value=\"None\";\n            }\n            queryString = queryString + \"/\" + myForm.elements[i].name + \"/\" + myForm.elements[myForm.elements[i].name+\"_\"+item].value.replace(/\\//g, \"---\");\n        }\n    }\n   $.ajax({\n        url: \"api/index.php?request=native/customgraph/add\" + \"/\" + queryString\n    }).then(function(data) {\n        delCustomGraph(item);\n    })\n\n\n\n}\n\n\n\n\nfunction delCustomGraph(item){\n   $.ajax({\n        url: \"api/index.php?request=native/customgraph/del/\" + item\n    }).then(function(data) {\n\tcustomgraph_GetInfo()\n\t});\n}\n\n\n\n/***********************************\n * Display Graph\n **********************************/\n\nfunction customgraph_GetGraph(Id){\n    $.ajax({\n        url: \"api/index.php?request=native/customgraph/get\"\n    }).then(function(data) {\n    if (data[0]!='customgraph') {\n        for (var key in data) {\n            if (data[key][`Id`]==Id) {\n        document.getElementById(\"main_content\").innerHTML = `\n                <h1>Graph `+data[key]['Name']+`</h1>\n                <div name=\"cgraph\" id=\"cgraph\"></div>\n                `;\n            ConsoleCGraphRun(data[key]);\n            }\n\n\n    }\n\n}\n\n})\n}\n\n\n\n\nfunction ConsoleCGraphRun(dgraph) {\n        document.getElementById(\"cgraph\").innerHTML='<canvas style=\"max-height:640px; min-height:320px\" id=\"ctx\"></canvas> <!-- Canvas for Graph -->'\n\n        var request = dgraph['Request'];\n        $.ajax({\n                url: \"api/index.php?request=\" + request\n        }).then(function(data) {\n\n        xitem = dgraph['X'];\n        yitem = dgraph['Y'];\n        type = dgraph['ChartType'];\n        series = dgraph['Series'];\n        mapping = dgraph['Mapping'];\n        /* scatter graph */\n        if (type==\"scatter\") {\n\n        dataxy = []\n        for (var item in data)\n        {\n                dataxy.push({x: data[item][xitem], y: data[item][yitem]})\n        }\n\n       scatterChartData = {\n            datasets: [{\n                label: \"Dataset\",\n                borderColor: \"FF0000\",\n                backgroundColor: \"00FF00\",\n                data: dataxy,\n                }]};\n\n        var ctx = document.getElementById(\"ctx\").getContext(\"2d\");\n        window.myScatter = Chart.Scatter(ctx, {\n                data: scatterChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js Scatter Chart'\n                        },\n                }\n        });\n\n        }\n\n        /* line graph */\n        if (type==\"line\" || type==\"line stacked\" || type==\"bar\") {\n\n        if (series != 'None')\n        {\n        //series_label =  {}\n        series_title = {}\n        series_x = {}\n        series_index={}\n        for (var item in data)\n        {\n            series_title[data[item][series]]=\"\"\n            series_x[data[item][xitem]]=\"\"\n            key = data[item][series]+\"_\"+data[item][xitem]\n            series_index[key] = data[item][yitem]\n        }\n\n        labels = []\n        for (var t in series_x)\n        {\n            labels.push(t)\n        }\n        datasets=[]\n        for (var s in series_title)\n        {\n                datay=[]\n                for (var t in series_x)\n                {\n                    key = s+\"_\"+t\n                    if (key in series_index) {\n                        datay.push(series_index[key])\n                    } else {\n                        datay.push(0)\n                    }\n                }\n\n                if (mapping != \"None\") {\n                    mapp = mapping.split(\",\");\n                    imapp = mapp.indexOf(s);\n                    \n                    if (imapp==-1) {\n                        label_s = s;\n                    } else {\n                        label_s = mapp[imapp+1];\n                    } \n                } else {\n                    label_s = s;\n                }\n\n                if (type==\"line\") {\n                    dataset = {\n                        label: label_s,\n                        borderColor: stringToColour(s),\n                        backgroundColor: \"#00000000\",\n                        data: datay,\n                    }\n                } else {\n                    dataset = {\n                        label: label_s,\n                        borderColor: \"#000000\",\n                        backgroundColor: stringToColour(s),\n                        data: datay,\n                    }\n\n                }\n\n                datasets.push(dataset)\n        }\n\n       lineChartData = {\n            labels: labels,\n            datasets: datasets\n       };\n\n        } else {\n        datay = []\n        labels = []\n        for (var item in data)\n        {\n                datay.push(data[item][yitem])\n                labels.push(data[item][xitem])\n        }\n\n       lineChartData = {\n            labels: labels,\n            datasets: [{\n                label: \"Dataset\",\n                borderColor: \"FF0000\",\n                backgroundColor: \"00FF00\",\n                data: datay,\n                }]};\n        }\n\n\n        var ctx = document.getElementById(\"ctx\").getContext(\"2d\");\n\n        if (type==\"line\") {\n        window.myBar = Chart.Line(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js line Chart'\n                        },\n                }\n        });\n        } else if (type==\"line stacked\") {\n        window.myBar = Chart.Line(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'Chart.js line stacked Chart'\n                        },\n                    scales: {\n                        yAxes: [{stacked: true,}]\n                    },\n                }\n        });\n        } else if (type==\"bar\") {\n\n        window.mybar = Chart.Bar(ctx, {\n                data: lineChartData,\n                options: {\n                    title: {\n                         display: true,\n                         text: 'chart.js bar chart'\n                        },\n                }\n        });\n\n\n        }\n\n\n        }\n\n        });\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/internalstats/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * InternalStats V0.1\n * Just display informations from var table\n *\n */\n\nclass internalstats extends Plugin {\n    public $Name = \"Internal Stats\";\n    public $Description = \"Add page with FS Information\";\n    public $Version = \"V0.1\";\n\n    /* Called from UI menu */\n    function ui_header($param) {\n            $newparam = '<script src=\"plugins/internalstats/script.js\"></script>'.\"\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_top($param) {\n            $newparam = \"<li><a href='#' onclick='internalstats_GetInfo()'>Internal Stats</a></li>\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/internalstats/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/***********************************\n * Build table and get data from API\n **********************************/\nfunction internalstats_GetInfo(){\n\n/* Create the table template */\n    document.getElementById(\"main_content\").innerHTML = `\n<h1>Internal Stats</h1>\n <table class=\"table\">\n    <thead>\n      <tr>\n        <th>Key</th>\n        <th>Val</th>\n      </tr>\n    </thead>\n    <tbody id=fsinfobody name=fsinfobody>\n    </tbody>\n  </table>\n `;\n\n/* Retrieve the data */\n    $.ajax({\n        url: \"api/index.php?request=native/vars\"\n    }).then(function(data) {\n    console.log(data);\n    for (var key in data) {\n        $(\"#fsinfobody\").append(\"<tr><td>\"+key+\"</td><td>\"+data[key]+\"</td></tr>\")\n    }\n    });\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/ldapauth/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * ldapauth V0.1\n * Use ldap informations for access control\n * requirements: php-ldap\n * WARNING: This version has a simplied access control for files\n * which allows users to see all files with r/x on others.\n */\n\nclass ldapauth extends Plugin {\n    public $Name = \"LDAPAuth\";\n    public $Description = \"Use ldap informations for access control\";\n    public $Version = \"V0.1\";\n\n    public $Req_lib = array('ldap');\n\n\n    /* The ui page which allows the user to check his access */\n    public $ui_page = true;\n\n    /* Allow to access to ldap informations with the API */\n    public $api_export = true;\n\n    /* You need to configure the host and dn before using the plugin */\n    private $ldap_host = \"ldapserver\";\n    private $ldap_dn = \"dc=foo,dc=bar,dc=foo,dc=com\";\n\n    private $ldap = null;\n\n\n    /*\n     * Plugin options\n     */\n\n    public function init() {\n        $this->ldap_host = $this->ldap_host;\n        $this->ldap = ldap_connect($this->ldap_host) or die(\"LDAPAuth: Could not connect to LDAP\");\n        ldap_set_option($this->ldap,LDAP_OPT_PROTOCOL_VERSION,3);\n        ldap_bind($this->ldap) or die(\"LDAPAuth: Could not bind to LDAP\");\n    }\n\n\n    /* Called from UI menu */\n    function ui_header($param) {\n        $newparam = '<script src=\"plugins/ldapauth/script.js\"></script>'.\"\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n    private function get_user_info_int($uid) {\n        $data =array();\n\n        $results = ldap_search($this->ldap,$this->ldap_dn,\"(uid=$uid)\");\n        $entries = ldap_get_entries($this->ldap, $results);\n        array_shift($entries);\n        $data['uid'] = $uid;\n        $data['uidnumber']= $entries[0]['uidnumber'][0];\n        $data['gidnumber']= $entries[0]['gidnumber'][0];\n\n        $data['groups'] = array();\n        $results = ldap_search($this->ldap,$this->ldap_dn,\"(memberuid=$uid)\");\n        $entries = ldap_get_entries($this->ldap, $results);\n        array_shift($entries);\n        foreach ($entries as $g) {\n            $data['groups'][]=$g['cn'][0];\n        }\n\n        return $data;\n    }\n\n    public function get_user_info($uid) {\n        $data = $this->get_user_info_int($uid);\n        $newdata = array();\n        $newdata['uids'] = array();\n        $newdata['uids'][] = $data['uid'];\n        $newdata['uids'][] = $data['uidnumber'];\n        $newdata['groups'] = $data['groups'];\n        $newdata['groups'][] = $data['gidnumber'];\n        return $newdata;\n    }\n\n\n    public function access_sql_filter($param)\n    {\n        $sql_where = $param[0];\n\n        /* just ignore the standard filter */\n        $sql_where = \"\";\n        unset($param[2][\"k_uid\"]);\n\n        /* build a new filter from ldap informations */\n        $part = $this->get_user_info(get_user());\n        $sql_where.= \"(\";\n        $sql_where.= \"uid IN ('\".implode($part['uids'],\"','\").\"') OR gid IN ('\";\n        $sql_where.= implode($part['groups'],\"','\").\"')\";\n\n        /* Simple access control for files */\n        if ($param[1] == \"NAMES\") {\n            $sql_where.= ' OR (mode & 3)>0)';\n        } else {\n            $sql_where.= ')';\n        }\n\n        $param[0] = $sql_where;\n        return $param;\n    }\n\n    /* Custom api call */\n    public function api_native($param) {\n        if (! $this->api_export)\n            return;\n        /* Custom api call */\n        if ($param[0] == \"ldapauth\") {\n            $data = $this->get_user_info_int(get_user());\n            return $data;\n        }\n\n        if ($param[0] == \"ldapauth_sqlfilter\") {\n            $data = $this->access_sql_filter(get_user());\n            return $data;\n        }\n    }\n\n    /* Called from UI menu */\n    public function ui_menu_top($param) {\n        if (! $this->ui_page || ! $this->api_export)\n            return;\n        $newparam = \"<li><a href='#' onclick='ldapauth_GetInfo()'>WhoAmI</a></li>\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/ldapauth/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/***********************************\n * Build table and get data from API\n **********************************/\nfunction ldapauth_GetInfo(){\n\n        /* Create the table template */\n        document.getElementById(\"main_content\").innerHTML = `\n\n                <h1>WhoAmI</h1>\n                <div id=\"ldapauth\" name=\"ldapauth\"></div>\n                <table class=\"table\">\n                <thead>\n                <tr>\n                <th>Group</th>\n                </tr>\n                </thead>\n                <tbody id=fsinfobody name=fsinfobody>\n                </tbody>\n                </table>\n                `;\n\n        /* Retrieve the data */\n        $.ajax({\n                url: \"api/index.php?request=native/ldapauth\"\n        }).then(function(data) {\n                document.getElementById(\"ldapauth\").innerHTML = \"<h3> User: \" + data.uid + \" - UidNumber: \" + data.uidnumber + \" - GidNumber: \" + data.gidnumber +\"</h3>\";\n                console.log(data);\n                for (var key in data.groups) {\n                        $(\"#fsinfobody\").append(\"<tr><td>\"+data.groups[key]+\"</td></tr>\")\n                }\n        });\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/netauth/help.html",
    "content": "<h1>netauth</h1>\n\n<p> When the other plugins cannot identify the user, use the remote ip adress or reverse dns as login.\nUse standard ACL with ip adress or DNS</p>\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/netauth/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2018 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * netauth V0.1\n * Use remote_addr informations for access control\n */\n\nclass netauth extends Plugin {\n    public $Name = \"IPAuth\";\n    public $Description = \"Use IP for access control\";\n    public $Version = \"V0.1\";\n\n    /* Use DNS name instead */\n    public $dns_resolv = true;\n\n    /* Force IP Auth even if we already have an uid*/\n    public $force_netauth = false;\n\n\n    /*\n     * Plugin options\n     */\n\n    public function init() {\n    }\n\n\n    public function get_user($uid) {\n\tif ($uid == '$NOAUTH' || $this->force_netauth) {\n\t\tif ($this->dns_resolv  && array_key_exists('REMOTE_HOST', $_SERVER))\n\t\t\treturn $_SERVER['REMOTE_HOST'];\n\t\telse\n\t\t\treturn $_SERVER['REMOTE_ADDR'];\n\t}\n        return $uid;\n    }\n\n\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/output/help.html",
    "content": "<h1>Import robinhood data with the output plugin in MS Office</h1>\n\n<p>In MS Office, in Data, Select Import->from text </p>\n\n<img src=\"plugins/output/images/1.png\">\n\n<p>Use your URL instead of a local file, you can test your request with the console plugin</p>\n\n<img src=\"plugins/output/images/2.png\">\n\n<p> Don't change anything (Delimited) , just click next </p>\n\n<img src=\"plugins/output/images/3.png\">\n\n<p> Change to semi-colon , then lock on finish</p>\n\n<img src=\"plugins/output/images/4.png\">\n\n<p> Select where you want to put the data </p>\n\n<img src=\"plugins/output/images/5.png\">\n\n<p> In order to do a pivot table, select all your data and click on filter in data</p>\n\n<img src=\"plugins/output/images/6.png\">\n<img src=\"plugins/output/images/7.png\">\n\n<p> To create the pivot table, in insert select pivot table</p>\n\n<img src=\"plugins/output/images/8.png\">\n\n<p> let the default value to create the chart in a new sheet</p>\n\n<img src=\"plugins/output/images/9.png\">\n\n<p> Select axis and value (ex. gid and sid) and you have the size usage by group </p>\n\n<img src=\"plugins/output/images/10.png\">\n\n<img src=\"plugins/output/images/11.png\">\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/output/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * Output V0.1\n * Use the API to export data\n *\n */\n\nclass output extends Plugin {\n    public $Name = \"Output\";\n    public $Description = \"Use API to Export Data\";\n    public $Version = \"V0.1\";\n\n    private $output = false;\n    /*\n     * Plugin options\n     */\n\n    public function init() {\n    }\n\n    /* Called from api just processing data */\n    function api_process($param) {\n        if ($param[0] == \"native\")\n        {\n            $p_count=count($param[1]);\n            for ($i=0; $i<$p_count-1; $i++) {\n                if ($param[1][$i] = \"output\") {\n                    if ($param[1][$i+1] == \"csv\")\n                        $this->output = \"csv\";\n                }\n            }\n        }\n    }\n\n    /* Change the HTTP Header type */\n    function api_header_type($param) {\n\n            if ($this->output == \"csv\") {\n                    $param = \"Content-Type: text\";\n            }\n            return $param;\n    }\n\n    /* Called from api just before sending data */\n    function api_response($param) {\n            if ($this->output == \"csv\") {\n                    $out = \"\";\n                    if ( ount($param) == 0) {\n                            return null;\n                    }\n                    foreach ($param[0] as $key => $val) {\n                            $out.= $key.\";\";\n                    }\n                    $out.= \"\\n\";\n                    foreach ($param as $item) {\n                            foreach ($item as $key => $val) {\n                                    $out.= $val.\";\";\n                            }\n                            $out.= \"\\n\";\n                    }\n                    return $out;\n            }\n\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/plugdisplay/help.html",
    "content": "<h1>Plugins Display</h1>\n\n<p> You can click on plugins names in the navbar to view the doc.</p>\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/plugdisplay/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * plugdisplay V0.1\n * Just keep the n in uid/gid charts and stack the others\n *\n */\n\nclass plugdisplay extends Plugin {\n    public $Name = \"Plugins Display\";\n    public $Description = \"Display Information about plugins\";\n    public $Version = \"V0.1\";\n\n\n    /* Called from UI menu */\n    function ui_header($param) {\n        $newparam = '<script src=\"plugins/plugdisplay/script.js\"></script>'.\"\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_bottom($param) {\n            global $PLUGINS_INST;\n            $newparam = \"<div>\\n\";\n            $newparam.= \"<br><label>Plugins</label>\\n\";\n            $newparam.= '<div class=\"list-group\">'.\"\\n\";\n            foreach ($PLUGINS_INST as $p) {\n                    $newparam.= '<a href=\"#\" onclick=\"plugdisplay_GetInfo(\\''.get_class($p).'\\')\" class=\"list-group-item\">'.$p->Name.' - '.$p->Version.'</a>'.\"\\n\";\n            }\n            $newparam.= \"</div>\\n\";\n            $newparam.= \"</div>\\n\";\n            $param = $param.$newparam;\n            return $param;\n    }\n\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/plugdisplay/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\nfunction plugdisplay_GetInfo(plugin){\n        /* Retrieve the data */\n        $.ajax({\n                url: \"plugins/\"+plugin+\"/help.html\",\n                statusCode: {\n                        404: function () {\n                            document.getElementById(\"main_content\").innerHTML = \"<h1>Help not available</h1><h2>Read the sources</h2><h3>The hospitality in this country is as warm as the weather.</h3>\"\n                        }\n                }\n        }).then(function(data) {\n                document.getElementById(\"main_content\").innerHTML = data;\n        });\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/stackgraph/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * StackGraph V0.1\n * Just keep the n in uid/gid charts and stack the others\n *\n */\n\nclass stackgraph extends Plugin {\n    public $Name = \"Stack Graph\";\n    public $Description = \"Stack users\";\n    public $Version = \"V0.1\";\n\n    /* sort key, ssize, scount or off  */\n    public $uid_sortby='ssize';\n    /* max_items to display, stack the others */\n    public $max_items=10;\n    /* strings for others */\n    public $other_string=\"Others\";\n\n    /* Called from api just processing data */\n    function api_process($param) {\n        if ($param[0] == \"graph\")\n        {\n            $p_count = count($param[1]);\n            for ($i=0; $i<$p_count-1; $i++) {\n                if ($param[1][$i] = \"stackgraph\") {\n                    switch ($param[1][$i+1]) {\n                    case \"size\":\n                        $this->uid_sortby = \"ssize\";\n                        break;\n                    case \"count\":\n                        $this->uid_sortby = \"scount\";\n                        break;\n                    case \"off\":\n                        $this->uid_sortby = \"off\";\n                        break;\n                    }\n                }\n            }\n        }\n    }\n\n    public function graph_presql_uid($param) {\n        if ($this->uid_sortby == \"off\")\n            return NULL;\n\n        $param = $param.\" ORDER BY \".$this->uid_sortby.\" DESC\";\n        return $param;\n    }\n\n    public function graph_postdata_uid($param) {\n        if ($this->uid_sortby == \"off\")\n            return NULL;\n\n        $c_count = count($param['labels']);\n        $c_trunk = $this->max_items;\n        if ($c_count > $this->max_items) {\n            $param['labels'][$this->max_items] = $this->other_string;\n            for($i = $c_trunk; $i<$c_count-1;$i++) {\n                $param['datasets'][0]['data'][$c_trunk] += $param['datasets'][0]['data'][$i+1];\n                $param['datasets'][1]['data'][$c_trunk] += $param['datasets'][1]['data'][$i+1];\n                unset($param['datasets'][0]['data'][$i+1]);\n                unset($param['datasets'][1]['data'][$i+1]);\n                unset($param['datasets'][0]['backgroundColor'][$i+1]);\n                unset($param['datasets'][1]['backgroundColor'][$i+1]);\n                unset($param['labels'][$i+1]);\n            }\n        }\n        return $param;\n    }\n\n    /* Called from UI in form filter */\n    function ui_form_filter($param) {\n        $newparam= <<<EOT\n            <fieldset class=\"form-group\">\n            <label>StackGraph</label>\n            <select class=\"form-control\" id=\"stackgraph\" name=\"formstackgraph\">\n                <option value=\"off\">Off</option>\n                <option selected value=\"size\">Size</option>\n                <option value=\"count\">Count</option>\n            </select>\n            </fieldset>\nEOT;\n        $param = $param.$newparam;\n        return $param;\n    }\n}\n\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/tasks/help.html",
    "content": "<h1>Tasks</h1>\n<strong> Beta version </strong>\n<p>You need to configure a crontab on your system to run 'php /var/www/robinhood/cron.php' as a user every hour.</p>\n<p>You also need to give the an access to the api for the local server. You can allow 127.0.0.1 to access to the api with the netauth plugin</p>\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/tasks/plugin.php",
    "content": "<?php\n/*\n * Copyright (C) 2018 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\n/*\n * Tasks V0.1\n * Run tasks\n *\n */\n\nclass tasks extends Plugin {\n    public $Name = \"Tasks\";\n    public $Description = \"Manage and run tasks\";\n    public $Version = \"V0.1\";\n\n\n    public $Req_table = array(\n            \"TASKS\" => \"(Id INT NOT NULL AUTO_INCREMENT, Name VARCHAR(255), Trig VARCHAR(255), Request VARCHAR(1024), Action VARCHAR(255), TargetDB VARCHAR(255), LastRun TIMESTAMP, ExecTime INT, PRIMARY KEY(Id))\",\n            );\n\n    function init() {\n    }\n\n    /* Called from UI menu */\n    function ui_header($param) {\n        $newparam = '<script src=\"plugins/tasks/script.js\"></script>'.\"\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n\n    /* Called from UI menu */\n    function ui_menu_top($param) {\n        $newparam = \"<li><a href='#' onclick='tasks_GetInfo()'>Tasks</a></li>\\n\";\n        $param = $param.$newparam;\n        return $param;\n    }\n\n    /* Called from api just processing data */\n    function api_native($param) {\n        global $db;\n        global $CURRENT_DB;\n        if (!check_access(\"tasks\")) {\n            return \"Permission denied\";\n        }\n        if ($param[0] == \"tasks\" && $param[1][0]== \"get\") {\n            $confdb = getDB(\"config\")[0];\n            $req = $db[$confdb]->prepare(\"SELECT * FROM TASKS;\");\n            $req->execute();\n            $data = $req->fetchall(PDO::FETCH_ASSOC);\n            return $data;\n        }\n        if ($param[0] == \"tasks\" && $param[1][0]== \"add\") {\n            $confdb = getDB(\"config\")[0];\n\n            $keys = Array();\n            $vals = Array();\n            $flip = True;\n            $last_item=\"\";\n            foreach (array_slice($param[1],2) as $item) {\n                if ($flip == True) {\n                    $keys[]=\":\".$item;\n                    $last_item=$item;\n                    $flip=False;\n                } else {\n                    $vals[\":\".$last_item]=str_replace(\"---\",\"/\",$item);\n                    $flip=True;\n                }\n            }\n\n            $request = \"INSERT INTO TASKS ( Id,ExecTime, LastRun, Name, Trig, Request, Action, TargetDB) VALUES ('',0,0, \".join(\",\",$keys).\");\";\n            $req = $db[$confdb]->prepare($request);\n            $req->execute($vals);\n        }\n        if ($param[0] == \"tasks\" && $param[1][0]== \"del\") {\n            $confdb = getDB(\"config\")[0];\n            $request = \"DELETE FROM TASKS WHERE Id = :Id\";\n            $vals = Array();\n            $vals[\":Id\"]=$param[1][1];\n            $req = $db[$confdb]->prepare($request);\n            $req->execute($vals);\n        }\n\n\n        if ($param[0] == \"task\") {\n            $self = '$SELF';\n            if (!check_access(\"tasks\")) {\n                $self = check_self_access(\"tasks\");\n                if (!$self)\n                    return \"Permission denied\";\n            }\n            $confdb = getDB(\"config\")[0];\n            $request = \"SELECT * FROM TASKS WHERE Name = :Name\";\n            $vals = Array();\n            $vals[\":Name\"]=$param[1][0];\n            $req = $db[$confdb]->prepare($request);\n            $req->execute($vals);\n            $data = $req->fetchall(PDO::FETCH_ASSOC);\n            $confdb = $data[0]['TargetDB'];\n            $CURRENT_DB=$confdb;\n            $fullfilter = build_advanced_filter($param[1], $self, $param[1][0]);\n\n            $req = $db[$confdb]->prepare($fullfilter[0]);\n            $req->execute($fullfilter[1]);\n            $data = $req->fetchall(PDO::FETCH_ASSOC);\n            return $data;\n        }\n\n    }\n\n    function cron() {\n        global $db;\n        global $DBA;\n        echo \"Starting CRON from Console\\n\";\n        $confdb = getDB(\"config\")[0];\n        $req = $db[$confdb]->prepare(\"SELECT *, TIMESTAMPDIFF(MINUTE,`LastRun`,NOW()) AS Delta FROM TASKS WHERE Trig != 'Never' ORDER BY LastRun DESC;\");\n        $req->execute();\n        $data = $req->fetchall(PDO::FETCH_ASSOC);\n        foreach ($data as $task) {\n            if ( $task['Delta'] == \"\"\n                 || $task['Delta'] == \"NULL\"\n                 || ($task['Delta']>=60 && $task['Trig']==\"hourly\")\n                 || ($task['Delta']>60*24 && $task['Trig']==\"daily\")) {\n\n                echo \"Task \".$task['Name'].\" is starting \".$task['Delta'].\" M. Late\\n\";\n                $start = microtime(true);\n                $req = $db[$confdb]->prepare(\"UPDATE TASKS SET LastRun=NOW() WHERE Id=\".$task['Id'].\";\");\n                $req->execute();\n\n                /* Run request / Might be changed to internal request */\n                echo \"Run query: http://127.0.0.1/robinhood/api/index.php?request=\".$task['Request'].\"\\n\";\n                $ctx = stream_context_create(array('http'=>\n                            array(\n                                'timeout' => 1200,\n                                )\n                            ));\n\n                $data = json_decode(file_get_contents(\"http://127.0.0.1/robinhood/api/index.php?request=\".$task['Request'], false, $ctx), true);\n\n                /* Build fields array for table create and insert */\n                $fields = Array();\n                $build = Array();\n\n                $insert_fields = Array();\n                $value_fields = Array();\n\n\n\n                foreach ($data[0] as $k=>$v) {\n                    if (is_numeric($v)) {\n                        $fields[$k]=\"INT\";\n\n                        foreach ($data as $dk=>$dv) {\n                            if (!is_numeric($dv[$k]) && $dv[$k]!=\"\") {\n                                $fields[$k]=\"VARCHAR(255)\";\n                            }\n                        }\n                    } else {\n                        $fields[$k]=\"VARCHAR(255)\";\n                    }\n\n                    array_push($build,$k.\" \".$fields[$k]);\n                    array_push($insert_fields,$k);\n                    array_push($value_fields,\":\".$k);\n                }\n                if ($task['Action']=='history') {\n                    echo \"Task: History action\\n\";\n                    $bfields=\"( CronDate TIMESTAMP, \".join(\", \",$build).\")\";\n                    $ifields=\"( CronDate, \".join(\", \",$insert_fields).\")\";\n                    $vfields=\"( NOW(), \".join(\", \",$value_fields).\")\";\n                } else {\n                    echo \"Task: Copy action\\n\";\n                    $bfields=\"(\".join(\", \",$build).\")\";\n                    $ifields=\"(\".join(\", \",$insert_fields).\")\";\n                    $vfields=\"(\".join(\", \",$value_fields).\")\";\n                }\n\n\n                $table = $task['Name'];\n                /* Check target DB & Table */\n                $result = $db[$task['TargetDB']]->query(\"SELECT * FROM information_schema.columns WHERE (table_name = '$table') AND TABLE_SCHEMA = '\".$DBA[$task['TargetDB']][\"DB_NAME\"].\"';\");\n                if ($result->rowCount()<1) {\n                    echo \"Task: Create new table: CREATE TABLE IF NOT EXISTS $table $bfields\";\n                    $db[$task['TargetDB']]->query(\"CREATE TABLE IF NOT EXISTS $table $bfields\");\n                }\n\n                if ($task['Action']=='History') {\n                    $db[$task['TargetDB']]->query(\"TRUNCATE TABLE $table\");\n                }\n                foreach ($data as $line) {\n                    $dict = Array();\n                    foreach ($insert_fields as $f) {\n                        $val =  $line[$f];\n                        $dict[\":\".$f] = $line[$f];\n                    }\n                    $req = $db[$task['TargetDB']]->prepare(\"INSERT INTO $table $ifields VALUES $vfields\");\n                    $req->execute($dict);\n                }\n\n\n                $req = $db[$confdb]->prepare(\"UPDATE TASKS SET ExecTime=\".(microtime(true)-$start).\" WHERE Id=\".$task['Id'].\";\");\n                $req->execute();\n                echo \"Task Done \\n\";\n\n            }\n        }\n        return $data;\n    }\n\n}\n"
  },
  {
    "path": "web_gui/gui_v3/plugins/tasks/script.js",
    "content": "/*\n * Copyright (C) 2016-2017 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n/***********************************\n * Build table and get data from API\n **********************************/\nfunction tasks_GetInfo(){\n\n/* Create the table template */\n    document.getElementById(\"main_content\").innerHTML = `\n<h1>Tasks</h1>\n <form id=\"taskform\" name=\"taskform\">\n <table class=\"table\">\n    <thead>\n      <tr>\n        <th>Id</th>\n        <th>Name</th>\n        <th>Trigger</th>\n        <th>Request</th>\n        <th>Action</th>\n        <th>Database</th>\n        <th>Last Run</th>\n        <th>Last execution time</th>\n        <th>Modify</th>\n      </tr>\n    </thead>\n    <tbody id=fsinfobody name=fsinfobody>\n    </tbody>\n  </table>\n  </form>\n `;\n\n\n/* Retrieve the data */\n    $.ajax({\n        url: \"api/index.php?request=native/tasks/get\"\n    }).then(function(data) {\n    if (data[0]!='tasks') {\n\t    for (var key in data) {\n\n\t$(\"#fsinfobody\").append(`\n\t      <tr>\n\t\t<td>`+data[key]['Id']+`<a href='#' onclick='console_GetInfo(\"native/task/`+data[key]['Name']+`\")'><span class=\"glyphicon glyphicon-signal\"></span></a></td>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Name_`+data[key]['Id']+`\" name=\"Name_`+data[key]['Id']+`\" placeholder=\"Name\" value=`+data[key]['Name']+`>\n\t\t</th>\n\t\t<th>\n\n\t\t<select class=\"form-control\" id=\"Trig_`+data[key]['Id']+`\" name=\"Trig_`+data[key]['Id']+`\">\n\t\t\t<option value=\"hourly\">Hourly</option>\n\t\t\t<option value=\"daily\">Daily</option>\n\t\t\t<option value=\"never\">Never</option>\n\t\t</select>\n\t\t<script>document.getElementById(\"Trig_`+data[key]['Id']+`\").value=\"`+data[key]['Trig']+`\"</script>\n\t\t</th>\n\t\t<th>\n\t\t\t<input type=\"text\" class=\"form-control\" id=\"Request_`+data[key]['Id']+`\" name=\"Request_`+data[key]['Id']+`\" placeholder=\"Request\" value=`+data[key]['Request']+`>\n\t\t</th>\n\t\t<th>\n\t\t<select class=\"form-control\" id=\"Action_`+data[key]['Id']+`\" name=\"Action_`+data[key]['Id']+`\" value=`+data[key]['Action']+`>\n\t\t\t<option value=\"history\">History</option>\n\t\t\t<option value=\"copy\">Copy</option>\n\t\t</select>\n\t\t<script>document.getElementById(\"Action_`+data[key]['Id']+`\").value=\"`+data[key]['Action']+`\"</script>\n\t\t</th>\n\t\t<th>\n\t\t<select class=\"form-control\" id=\"TargetDB_`+data[key]['Id']+`\" name=\"TargetDB_`+data[key]['Id']+`\">\n        <script>\n        $.ajax({\n                url: \"api/index.php?request=db_info\"\n            }).then(function(data) {\n            for (var key in data) {\n                var option = document.createElement('option');\n                option.text = option.value = key;\n                document.getElementById(\"TargetDB_`+data[key]['Id']+`\").add(option,0);\n                document.getElementById(\"TargetDB_`+data[key]['Id']+`\").value=\"`+data[key]['TargetDB']+`\";\n            }\n        });\n        </script>\n\t\t</select>\n\t\t</th>\n\t\t<th> `+data[key]['LastRun']+` </th>\n\t\t<th> `+data[key]['ExecTime']+` Seconds </th>\n\t\t<th> <button type=\"button\" class=\"btn btn-primary\" onclick=\"updateTask(`+data[key]['Id']+`)\">Modify</button>\n             <button type=\"button\" class=\"btn btn-primary\" onclick=\"delTask(`+data[key]['Id']+`)\">Delete</button>\n        </th>\n\t      </tr>\n\t`);\n    }\n      //</form>\n    }\n$(\"#fsinfobody\").append(`\n      <tr>\n        <th>#</th>\n        <th>\n                <input type=\"text\" class=\"form-control\" id=\"Name\" name=\"Name\" placeholder=\"Name\">\n\t</th>\n        <th>\n\n        <select class=\"form-control\" id=\"Trig\" name=\"Trig\">\n                <option value=\"hourly\">Hourly</option>\n                <option selected value=\"daily\">Daily</option>\n                <option value=\"never\">Never</option>\n        </select>\n\t</th>\n\t<th>\n\t\t<input type=\"text\" class=\"form-control\" id=\"Request\" name=\"Request\" placeholder=\"Request\">\n\t</th>\n        <th>\n        <select class=\"form-control\" id=\"Action\" name=\"Action\">\n                <option value=\"history\">History</option>\n                <option selected value=\"copy\">Copy</option>\n        </select>\n\t</th>\n        <th>\n        <select class=\"form-control\" id=\"TargetDB\" name=\"TargetDB\">\n        </select>\n        <script>\n        $.ajax({\n                url: \"api/index.php?request=db_info\"\n            }).then(function(data) {\n            for (var key in data) {\n                var option = document.createElement('option');\n                option.text = option.value = key;\n                document.getElementById(\"TargetDB\").add(option,0);\n        }\n        });\n        </script>\n\t</th>\n        <th> # </th>\n        <th> # </th>\n        <th> <button type=\"button\" class=\"btn btn-primary\" onclick=\"setTask('')\">Add</button></th>\n      </tr>\n\n`);\n    });\n}\n\n/***********************************\n * Add or modify tasks\n **********************************/\n\nfunction setTask(item){\n\n\tvar queryString=\"\";\n\tvar myForm = document.getElementById(\"taskform\");\n\tfor (var i = 0; i < myForm.elements.length; i++) {\n\t\tif (myForm.elements[i].name.length>0 && myForm.elements[i].name.search(\"_\")==-1) {\n\t\t\tqueryString = queryString + \"/\" + myForm.elements[i].name + \"/\" + myForm.elements[i].value.replace(/\\//g, \"---\");\n\t\t}\n\t}\n   $.ajax({\n        url: \"api/index.php?request=native/tasks/add\" + \"/\" + queryString\n    }).then(function(data) {\n\ttasks_GetInfo()\n\t})\n}\n\nfunction updateTask(item){\n\n\nvar queryString=\"\";\n    var myForm = document.getElementById(\"taskform\");\n    for (var i = 0; i < myForm.elements.length; i++) {\n        if (myForm.elements[i].name.length>0 && myForm.elements[i].name.search(\"_\")==-1) {\n            queryString = queryString + \"/\" + myForm.elements[i].name + \"/\" + myForm.elements[myForm.elements[i].name+\"_\"+item].value.replace(/\\//g, \"---\");\n        }\n    }\n   $.ajax({\n        url: \"api/index.php?request=native/tasks/add\" + \"/\" + queryString\n    }).then(function(data) {\n        delTask(item);\n    })\n\n\n\n}\n\nfunction delTask(item){\n\n   $.ajax({\n        url: \"api/index.php?request=native/tasks/del/\" + item \n    }).then(function(data) {\n\ttasks_GetInfo()\n\t})\n}\n"
  },
  {
    "path": "web_gui/robinhood.conf",
    "content": "#\n#\tRobinhood gui_v3 default apache config\n#\n#\tYou need to change the ServerName\n#\n#\n<VirtualHost *:80>\nServerName localhost\nDocumentRoot /var/www/\n\nAlias /robinhood /var/www/robinhood/\n<Directory \"/var/www/robinhood\">\n\n   <IfModule mod_authz_core.c>\n     # Apache 2.4\n     AllowOverride All\n     Require all granted\n   </IfModule>\n   <IfModule !mod_authz_core.c>\n     # Apache 2.2\n     Order allow,deny\n     Allow from All\n   </IfModule>\n\n</Directory>\n\n\n</VirtualHost>\n"
  },
  {
    "path": "web_gui/scripts/check_robinhood.py",
    "content": "#!/usr/bin/python\n\n# Copyright (C) 2016 CEA/DAM\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the CeCILL License.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license (http://www.cecill.info) and that you\n# accept its terms.\n\n\"\"\"\nRobinhood v3 API Nagios plugin\n\nThis script query a robinhood v3 database throught the web API.\nIt returns the status in nagios format with optionnal perf data.\n\"\"\"\n\nimport requests\nimport json\nimport sys\nimport time,datetime\nfrom optparse import OptionParser\n\nclass Convert():\n    \"\"\"\n    Simple class to convert size\n    \"\"\"\n    units = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n\n    def h2b(self, string):\n        \"\"\"\n        Convert human readable size to bytes\n        \"\"\"\n        num = float(string[:-1])\n        p = {self.units[0]:1}\n        for i, s in enumerate(self.units[1:]):\n            p[s] = 1024 ** (i+1)\n        return int(num * p[string[-1:].upper()])\n\n    def b2h(self, n):\n        \"\"\"\n        Convert bytes to human readable size\n        \"\"\"\n        p = {}\n        for i, s in enumerate(self.units[1:]):\n            p[s] = 1024 ** (i+1)\n        for unit in reversed(self.units[1:]):\n            if int(n) >= p[unit]:\n                val = float(n) / p[unit]\n                return \"%f%s\" % (val, unit)\n        return \"%dB\" % n\n\nclass RobinHood():\n    args = {}\n    options = {}\n    \n    SECONDS = 1\n    MINUTES = SECONDS * 60\n    HOURS = MINUTES * 60\n    DAYS = HOURS * 24\n    WEEK = DAYS * 7\n\n    def __init__(self):\n        parser = OptionParser(usage=\"usage: %prog [options] server\",\n                version = \"%prog 1.0\")\n        parser.add_option(\"-p\", \"--perf\",\n                action = \"store_true\",\n                dest = \"perf\",\n                default = False,\n                help = \"Add perfstats\")\n        parser.add_option(\"-q\", \"--query\",\n                action = \"store\",\n                dest = \"query\",\n                default = \"scan\",\n                help = \"scan or user_size\",)\n        parser.add_option(\"-w\", \"--warning\",\n                action = \"store\",\n                dest = \"warning\",\n                default = \"100T\",\n                help = \"\",)\n        parser.add_option(\"-c\", \"--critical\",\n                action = \"store\",\n                dest = \"critical\",\n                default = \"1P\",\n                help = \"\",)\n        (self.options, self.args) = parser.parse_args()\n\n        if len(self.args) != 1:\n            parser.error(\"wrong number of arguments\")\n            sys.exit(2)\n\n    def getData(self,request):\n        try:\n            r = requests.get(\"http://%s/robinhood/api/index.php?request=%s\" % (self.args[0], request))\n            return json.loads(r.text)\n        except:\n            print \"Can't retrieve data: \",sys.exc_info()\n            sys.exit(2)\n\n\n    def runQuery(self):\n        code = 0\n        text = \"\"\n\n        if self.options.query == \"scan\":\n            data = self.getData(\"native/vars\")\n            nextscan = int(data['LastScanEndTime']) + int(data['ScanInterval'])\n            #Warning if the lastscan is 10 minutes to 10 hours old.\n            if int(time.time()) > nextscan+self.MINUTES*10 and int(time.time()) < nextscan+self.HOURS*10:\n                if data['LastScanStatus'] == 'running':\n                    text = text + \"OK: Scan is running\"\n                else:\n                    text = text + \"WARNING: Scan is lightly overdue\"\n                    code = 1\n            #Critical if the lastscan is older than 10 hours\n            elif int(time.time()) > nextscan+self.HOURS*10:\n                text = text + \"CRITICAL: Scan is overdue\"\n                code = 2\n            else:\n                text = text + \"OK: On time\"\n\n            text = text + \" / Nextscan: \" + datetime.datetime.utcfromtimestamp(nextscan).strftime('%Y-%m-%dT %H:%M:%SZ')\n\n            if self.options.perf:\n                text = text + \" | nextscan=%d, overdue=%d \" % (nextscan, int(time.time()) - nextscan)\n\n        elif self.options.query == \"user_size\":\n            data = self.getData(\"native/acct/uid.group\")\n            critical = Convert().h2b(self.options.critical)\n            warning = Convert().h2b(self.options.warning)\n            warn = {}\n            crit = {}\n            perf = []\n            for item in data:\n                if int(item[\"size\"]) > warning and int(item[\"size\"]) < critical:\n                    warn[item[\"uid\"]] = Convert().b2h(item[\"size\"])\n                elif int(item[\"size\"]) > critical:\n                    crit[item[\"uid\"]] = Convert().b2h(item[\"size\"])\n                if self.options.perf:\n                    perf.append(\"%s=%s\" % (item[\"uid\"], item[\"size\"]))\n\n            if (len(warn) > 0):\n                msg = \"WARNING: \"\n                code = 1\n                for k in warn:\n                    msg = msg + k + \"=\" + warn[k] + \" \"\n                text = text + msg + \" \"\n\n            if (len(crit) > 0):\n                msg = \"CRITICAL: \"\n                code = 2\n                for k in crit:\n                    msg = msg + k + \"=\" + crit[k] + \" \"\n                text = msg + text\n\n            if len(warn) == 0 and len(crit) == 0:\n                text = \"OK\"\n\n            if self.options.perf:\n                perf = \" | %s \" % (\", \".join(perf))\n                text = text + perf\n\n        return (text,code)\n\n\nInst = RobinHood()\nresult = Inst.runQuery()\n\nprint result[0]\nsys.exit(result[1])\n"
  },
  {
    "path": "web_gui/tests/nonreg.bash",
    "content": "#!/bin/bash\n\n# Static lint\nfor f in $(find ../ -name \"*.php\"); do\n       php -l $f\n       if [ $? -ne 0 ]; then\n               exit $?\n       fi;\ndone;\n\n# Advanced tests\nphp-cgi ./nonreg.php\nexit $?\n\n"
  },
  {
    "path": "web_gui/tests/nonreg.php",
    "content": "<?php\n/*\n * Copyright (C) 2016 CEA/DAM\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the CeCILL License.\n *\n * The fact that you are presently reading this means that you have had\n * knowledge of the CeCILL license (http://www.cecill.info) and that you\n * accept its terms.\n */\n\n\nfunction check_error() {\n        $err=error_get_last();\n        switch($err['type']) {\n        case E_ERROR: // 1 //\n        case E_WARNING: // 2 //\n                //Let's ignore the config_local warning\n                if (startsWith($err['message'],\"include(): Failed opening\"))\n                        return 0;\n        case E_PARSE: // 4 //\n        case E_NOTICE: // 8 //\n        case E_CORE_ERROR: // 16 //\n        case E_CORE_WARNING: // 32 //\n        case E_COMPILE_ERROR: // 64 //\n        case E_COMPILE_WARNING: // 128 //\n        case E_USER_ERROR: // 256 //\n        case E_USER_WARNING: // 512 //\n        case E_USER_NOTICE: // 1024 //\n        case E_STRICT: // 2048 //\n        case E_RECOVERABLE_ERROR: // 4096 //\n        case E_DEPRECATED: // 8192 //\n        case E_USER_DEPRECATED: // 16384 //\n                break;\n        }\n        print \"[ERROR] Someting goes wrong:\\n\";\n        print_r($err);\n        exit($err['type']); \n}\n\n$cwd = getcwd();\n\n\necho \"[NOTICE] include API\\n\";\nchdir(\"../gui_v3/api\");\nrequire_once(\"robinhood.php\");\ncheck_error();\n\nchdir(\"..\");\necho \"[NOTICE] include Common\\n\";\nrequire_once(\"common.php\");\ncheck_error();\n\necho \"[NOTICE] Process API\\n\";\n$API = new MyAPI(\"robinhood\");\n$result =  $API->processAPI();\nif ($result != '\"\\\"Fear not, my friends. This will be my greatest performance.\\\"\"') {\n        echo \"API is broken, return $result instead of status\";\n        echo \"[ERROR] API Failed\\n\";\n        exit(1);\n}\ncheck_error();\n/* Check index.php */\necho \"[NOTICE] Process index.php\\n\";\nob_start();\nrequire_once(\"index.php\");\n$data = ob_get_clean();\ncheck_error();\n\n/* TODO check database connexion */\n/* TODO check natives/vars       */\n\nchdir($cwd);\necho \"Success\\n\";\n?>\n"
  }
]